hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3d63a676f62f397501daa765ef6e26164f7bb466
| 228
|
py
|
Python
|
leonardo_otavio_telefone.py
|
aldrinhronchi/python_regex_tarefa
|
fc008b3b65ac02b561ea113afd9e5074aa813360
|
[
"MIT"
] | 1
|
2021-07-15T07:05:01.000Z
|
2021-07-15T07:05:01.000Z
|
leonardo_otavio_telefone.py
|
aldrinhronchi/python_regex_tarefa
|
fc008b3b65ac02b561ea113afd9e5074aa813360
|
[
"MIT"
] | null | null | null |
leonardo_otavio_telefone.py
|
aldrinhronchi/python_regex_tarefa
|
fc008b3b65ac02b561ea113afd9e5074aa813360
|
[
"MIT"
] | 8
|
2019-09-11T23:56:24.000Z
|
2019-09-12T00:21:40.000Z
|
# coding: utf-8
import re
telefone = input('Digite seu telefone: ')
pattern = re.compile(r"(\([0-9]{2}\)?\s)?([0-9]{4,5}\-?[0-9]{4})")
if pattern.match(telefone):
print("telefone valido!")
else:
print("telefone invalido!")
| 19
| 66
| 0.622807
|
97dcb559abdaa29d2a3d2b4366d889fb198f159d
| 8,349
|
py
|
Python
|
plugin.video.yatp/site-packages/hachoir_metadata/register.py
|
mesabib/kodi.yatp
|
d874df43047b5b58f84cb3760fc891d9a133a69f
|
[
"CNRI-Python"
] | 194
|
2016-12-23T19:11:09.000Z
|
2020-12-07T04:04:10.000Z
|
lib/hachoir_metadata/register.py
|
nosmokingbandit/Autolycus
|
37b376ba2fc27aa7e98f0071b457ebfbc605d4ab
|
[
"ECL-2.0",
"Apache-2.0"
] | 236
|
2016-11-20T07:56:15.000Z
|
2017-04-12T12:10:00.000Z
|
lib/hachoir_metadata/register.py
|
nosmokingbandit/Autolycus
|
37b376ba2fc27aa7e98f0071b457ebfbc605d4ab
|
[
"ECL-2.0",
"Apache-2.0"
] | 51
|
2016-11-20T08:05:33.000Z
|
2021-01-26T13:22:40.000Z
|
from hachoir_core.i18n import _
from hachoir_core.tools import (
humanDuration, humanBitRate,
humanFrequency, humanBitSize, humanFilesize,
humanDatetime)
from hachoir_core.language import Language
from hachoir_metadata.filter import Filter, NumberFilter, DATETIME_FILTER
from datetime import date, datetime, timedelta
from hachoir_metadata.formatter import (
humanAudioChannel, humanFrameRate, humanComprRate, humanAltitude,
humanPixelSize, humanDPI)
from hachoir_metadata.setter import (
setDatetime, setTrackNumber, setTrackTotal, setLanguage)
from hachoir_metadata.metadata_item import Data
MIN_SAMPLE_RATE = 1000 # 1 kHz
MAX_SAMPLE_RATE = 192000 # 192 kHz
MAX_NB_CHANNEL = 8 # 8 channels
MAX_WIDTH = 20000 # 20 000 pixels
MAX_BIT_RATE = 500 * 1024 * 1024 # 500 Mbit/s
MAX_HEIGHT = MAX_WIDTH
MAX_DPI_WIDTH = 10000
MAX_DPI_HEIGHT = MAX_DPI_WIDTH
MAX_NB_COLOR = 2 ** 24 # 16 million of color
MAX_BITS_PER_PIXEL = 256 # 256 bits/pixel
MAX_FRAME_RATE = 150 # 150 frame/sec
MAX_NB_PAGE = 20000
MAX_COMPR_RATE = 1000.0
MIN_COMPR_RATE = 0.001
MAX_TRACK = 999
DURATION_FILTER = Filter(timedelta,
timedelta(milliseconds=1),
timedelta(days=365))
def registerAllItems(meta):
meta.register(Data("title", 100, _("Title"), type=unicode))
meta.register(Data("artist", 101, _("Artist"), type=unicode))
meta.register(Data("author", 102, _("Author"), type=unicode))
meta.register(Data("music_composer", 103, _("Music composer"), type=unicode))
meta.register(Data("album", 200, _("Album"), type=unicode))
meta.register(Data("duration", 201, _("Duration"), # integer in milliseconde
type=timedelta, text_handler=humanDuration, filter=DURATION_FILTER))
meta.register(Data("nb_page", 202, _("Nb page"), filter=NumberFilter(1, MAX_NB_PAGE)))
meta.register(Data("music_genre", 203, _("Music genre"), type=unicode))
meta.register(Data("language", 204, _("Language"), conversion=setLanguage, type=Language))
meta.register(Data("track_number", 205, _("Track number"), conversion=setTrackNumber,
filter=NumberFilter(1, MAX_TRACK), type=(int, long)))
meta.register(Data("track_total", 206, _("Track total"), conversion=setTrackTotal,
filter=NumberFilter(1, MAX_TRACK), type=(int, long)))
meta.register(Data("organization", 210, _("Organization"), type=unicode))
meta.register(Data("version", 220, _("Version")))
meta.register(Data("width", 301, _("Image width"), filter=NumberFilter(1, MAX_WIDTH), type=(int, long), text_handler=humanPixelSize))
meta.register(Data("height", 302, _("Image height"), filter=NumberFilter(1, MAX_HEIGHT), type=(int, long), text_handler=humanPixelSize))
meta.register(Data("nb_channel", 303, _("Channel"), text_handler=humanAudioChannel, filter=NumberFilter(1, MAX_NB_CHANNEL), type=(int, long)))
meta.register(Data("sample_rate", 304, _("Sample rate"), text_handler=humanFrequency, filter=NumberFilter(MIN_SAMPLE_RATE, MAX_SAMPLE_RATE), type=(int, long, float)))
meta.register(Data("bits_per_sample", 305, _("Bits/sample"), text_handler=humanBitSize, filter=NumberFilter(1, 64), type=(int, long)))
meta.register(Data("image_orientation", 306, _("Image orientation")))
meta.register(Data("nb_colors", 307, _("Number of colors"), filter=NumberFilter(1, MAX_NB_COLOR), type=(int, long)))
meta.register(Data("bits_per_pixel", 308, _("Bits/pixel"), filter=NumberFilter(1, MAX_BITS_PER_PIXEL), type=(int, long)))
meta.register(Data("filename", 309, _("File name"), type=unicode))
meta.register(Data("file_size", 310, _("File size"), text_handler=humanFilesize, type=(int, long)))
meta.register(Data("pixel_format", 311, _("Pixel format")))
meta.register(Data("compr_size", 312, _("Compressed file size"), text_handler=humanFilesize, type=(int, long)))
meta.register(Data("compr_rate", 313, _("Compression rate"), text_handler=humanComprRate, filter=NumberFilter(MIN_COMPR_RATE, MAX_COMPR_RATE), type=(int, long, float)))
meta.register(Data("width_dpi", 320, _("Image DPI width"), filter=NumberFilter(1, MAX_DPI_WIDTH), type=(int, long), text_handler=humanDPI))
meta.register(Data("height_dpi", 321, _("Image DPI height"), filter=NumberFilter(1, MAX_DPI_HEIGHT), type=(int, long), text_handler=humanDPI))
meta.register(Data("file_attr", 400, _("File attributes")))
meta.register(Data("file_type", 401, _("File type")))
meta.register(Data("subtitle_author", 402, _("Subtitle author"), type=unicode))
meta.register(Data("creation_date", 500, _("Creation date"), text_handler=humanDatetime,
filter=DATETIME_FILTER, type=(datetime, date), conversion=setDatetime))
meta.register(Data("last_modification", 501, _("Last modification"), text_handler=humanDatetime,
filter=DATETIME_FILTER, type=(datetime, date), conversion=setDatetime))
meta.register(Data("latitude", 510, _("Latitude"), type=float))
meta.register(Data("longitude", 511, _("Longitude"), type=float))
meta.register(Data("altitude", 511, _("Altitude"), type=float, text_handler=humanAltitude))
meta.register(Data("location", 530, _("Location"), type=unicode))
meta.register(Data("city", 531, _("City"), type=unicode))
meta.register(Data("country", 532, _("Country"), type=unicode))
meta.register(Data("charset", 540, _("Charset"), type=unicode))
meta.register(Data("font_weight", 550, _("Font weight")))
meta.register(Data("camera_aperture", 520, _("Camera aperture")))
meta.register(Data("camera_focal", 521, _("Camera focal")))
meta.register(Data("camera_exposure", 522, _("Camera exposure")))
meta.register(Data("camera_brightness", 530, _("Camera brightness")))
meta.register(Data("camera_model", 531, _("Camera model"), type=unicode))
meta.register(Data("camera_manufacturer", 532, _("Camera manufacturer"), type=unicode))
meta.register(Data("compression", 600, _("Compression")))
meta.register(Data("copyright", 601, _("Copyright"), type=unicode))
meta.register(Data("url", 602, _("URL"), type=unicode))
meta.register(Data("frame_rate", 603, _("Frame rate"), text_handler=humanFrameRate,
filter=NumberFilter(1, MAX_FRAME_RATE), type=(int, long, float)))
meta.register(Data("bit_rate", 604, _("Bit rate"), text_handler=humanBitRate,
filter=NumberFilter(1, MAX_BIT_RATE), type=(int, long, float)))
meta.register(Data("aspect_ratio", 604, _("Aspect ratio"), type=(int, long, float)))
meta.register(Data("thumbnail_size", 604, _("Thumbnail size"), text_handler=humanFilesize, type=(int, long, float)))
meta.register(Data("iso_speed_ratings", 800, _("ISO speed rating")))
meta.register(Data("exif_version", 801, _("EXIF version")))
meta.register(Data("date_time_original", 802, _("Date-time original"), text_handler=humanDatetime,
filter=DATETIME_FILTER, type=(datetime, date), conversion=setDatetime))
meta.register(Data("date_time_digitized", 803, _("Date-time digitized"), text_handler=humanDatetime,
filter=DATETIME_FILTER, type=(datetime, date), conversion=setDatetime))
meta.register(Data("compressed_bits_per_pixel", 804, _("Compressed bits per pixel"), type=(int, long, float)))
meta.register(Data("shutter_speed_value", 805, _("Shutter speed"), type=(int, long, float)))
meta.register(Data("aperture_value", 806, _("Aperture")))
meta.register(Data("exposure_bias_value", 807, _("Exposure bias")))
meta.register(Data("focal_length", 808, _("Focal length")))
meta.register(Data("flashpix_version", 809, _("Flashpix version")))
meta.register(Data("focal_plane_x_resolution", 810, _("Focal plane width")))
meta.register(Data("focal_plane_y_resolution", 811, _("Focal plane height"), type=float))
meta.register(Data("focal_length_in_35mm_film", 812, _("Focal length in 35mm film")))
meta.register(Data("os", 900, _("OS"), type=unicode))
meta.register(Data("producer", 901, _("Producer"), type=unicode))
meta.register(Data("comment", 902, _("Comment"), type=unicode))
meta.register(Data("format_version", 950, _("Format version"), type=unicode))
meta.register(Data("mime_type", 951, _("MIME type"), type=unicode))
meta.register(Data("endian", 952, _("Endianness"), type=unicode))
| 64.223077
| 172
| 0.709067
|
7365b70b5ccfcc8ab11470a5aa33d0cf98ef4f5f
| 1,829
|
py
|
Python
|
_1327/polls/migrations/0001_initial.py
|
Nef10/1327
|
71fb83a3c12ba24a7638acebeeffed80825e0101
|
[
"MIT"
] | null | null | null |
_1327/polls/migrations/0001_initial.py
|
Nef10/1327
|
71fb83a3c12ba24a7638acebeeffed80825e0101
|
[
"MIT"
] | null | null | null |
_1327/polls/migrations/0001_initial.py
|
Nef10/1327
|
71fb83a3c12ba24a7638acebeeffed80825e0101
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('text', models.CharField(max_length=255)),
('description', models.TextField(blank=True, default='')),
('votes', models.IntegerField(default=0)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Poll',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('title', models.CharField(max_length=255)),
('description', models.TextField(blank=True, default='')),
('start_date', models.DateField()),
('end_date', models.DateField()),
('is_multiple_choice_question', models.BooleanField(default=True)),
('participants', models.ManyToManyField(related_name='polls', blank=True, to=settings.AUTH_USER_MODEL)),
],
options={
'permissions': (('view_poll', 'User/Group is allowed to view that question'),),
},
bases=(models.Model,),
),
migrations.AddField(
model_name='choice',
name='poll',
field=models.ForeignKey(related_name='choices', to='polls.Poll'),
preserve_default=True,
),
]
| 36.58
| 120
| 0.559322
|
d13e245c30a533081d4ca09061b28be55ee28c4e
| 59
|
py
|
Python
|
tests/i18n/other/locale/fr/formats.py
|
ni-ning/django
|
2e7ba6057cfc82a15a22b6021cd60cf307152e2d
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 61,676
|
2015-01-01T00:05:13.000Z
|
2022-03-31T20:37:54.000Z
|
tests/i18n/other/locale/fr/formats.py
|
ni-ning/django
|
2e7ba6057cfc82a15a22b6021cd60cf307152e2d
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 8,884
|
2015-01-01T00:12:05.000Z
|
2022-03-31T19:53:11.000Z
|
tests/i18n/other/locale/fr/formats.py
|
mustafa0x/django
|
d7394cfa13a4d1a02356e3a83e10ec100fbb9948
|
[
"BSD-3-Clause",
"0BSD"
] | 33,143
|
2015-01-01T02:04:52.000Z
|
2022-03-31T19:42:46.000Z
|
# A user-defined format
CUSTOM_DAY_FORMAT = 'd/m/Y CUSTOM'
| 19.666667
| 34
| 0.745763
|
0f3a2c02367282969f333efedd2f44fd4d4f3dae
| 4,085
|
py
|
Python
|
accelbyte_py_sdk/api/sessionbrowser/models/models_matching_ally.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/api/sessionbrowser/models/models_matching_ally.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | 1
|
2021-10-13T03:46:58.000Z
|
2021-10-13T03:46:58.000Z
|
accelbyte_py_sdk/api/sessionbrowser/models/models_matching_ally.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# justice-session-browser-service ()
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
from ..models.models_matching_party import ModelsMatchingParty
class ModelsMatchingAlly(Model):
"""Models matching ally (models.MatchingAlly)
Properties:
matching_parties: (matching_parties) REQUIRED List[ModelsMatchingParty]
"""
# region fields
matching_parties: List[ModelsMatchingParty] # REQUIRED
# endregion fields
# region with_x methods
def with_matching_parties(self, value: List[ModelsMatchingParty]) -> ModelsMatchingAlly:
self.matching_parties = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "matching_parties"):
result["matching_parties"] = [i0.to_dict(include_empty=include_empty) for i0 in self.matching_parties]
elif include_empty:
result["matching_parties"] = []
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
matching_parties: List[ModelsMatchingParty],
) -> ModelsMatchingAlly:
instance = cls()
instance.matching_parties = matching_parties
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> ModelsMatchingAlly:
instance = cls()
if not dict_:
return instance
if "matching_parties" in dict_ and dict_["matching_parties"] is not None:
instance.matching_parties = [ModelsMatchingParty.create_from_dict(i0, include_empty=include_empty) for i0 in dict_["matching_parties"]]
elif include_empty:
instance.matching_parties = []
return instance
@classmethod
def create_many_from_dict(cls, dict_: dict, include_empty: bool = False) -> Dict[str, ModelsMatchingAlly]:
return {k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_} if dict_ else {}
@classmethod
def create_many_from_list(cls, list_: list, include_empty: bool = False) -> List[ModelsMatchingAlly]:
return [cls.create_from_dict(i, include_empty=include_empty) for i in list_] if list_ else []
@classmethod
def create_from_any(cls, any_: any, include_empty: bool = False, many: bool = False) -> Union[ModelsMatchingAlly, List[ModelsMatchingAlly], Dict[Any, ModelsMatchingAlly]]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"matching_parties": "matching_parties",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"matching_parties": True,
}
# endregion static methods
| 33.760331
| 175
| 0.682742
|
2bdd6522855852247d41887b08197cc17f7dd13b
| 1,561
|
py
|
Python
|
common_api/eval.py
|
ntuliuteam/EDLAB
|
453451e3d6673f872e8f8cf44038fda2768a802b
|
[
"Apache-2.0"
] | null | null | null |
common_api/eval.py
|
ntuliuteam/EDLAB
|
453451e3d6673f872e8f8cf44038fda2768a802b
|
[
"Apache-2.0"
] | null | null | null |
common_api/eval.py
|
ntuliuteam/EDLAB
|
453451e3d6673f872e8f8cf44038fda2768a802b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) HP-NTU Digital Manufacturing Corporate Lab, Nanyang Technological University, Singapore.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# import matplotlib.pyplot as plt
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from argparse import ArgumentParser
# import numpy as np
# import skimage.io as io
# import pylab
# pylab.rcParams['figure.figsize'] = (10.0, 8.0)
def build_args():
parser = ArgumentParser()
parser.add_argument('--annFile', help='The ground truth file', required=True)
parser.add_argument('--resFile', help='The experimental results', required=True)
parser.add_argument('--imgIds', help='The id file of tested images', required=True)
return parser
args = build_args().parse_args()
annType = 'bbox'
# annFile = '/home/mlab/Documents/konghao/workspace/hp-ntu/dataset/coco2014/annotations/instances_val2014.json'
cocoGt = COCO(args.annFile)
# resFile = '/home/mlab/Documents/konghao/workspace/hp-ntu/HP-Tool/output/results.json'
cocoDt = cocoGt.loadRes(args.resFile)
# minival_ids = '/home/mlab/Documents/konghao/workspace/hp-ntu/dataset/coco2014/mscoco_minival_ids.txt'
with open(args.imgIds, 'r') as f:
imgIds = f.read()
imgIds = imgIds.split()
imgIds = list(map(int,imgIds))
imgIds = sorted(imgIds)
cocoEval = COCOeval(cocoGt, cocoDt, annType)
cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
| 33.212766
| 112
| 0.734785
|
dedfa4a421ab36a262c97c582324ad3d87717618
| 1,156
|
py
|
Python
|
get_fav_tracks.py
|
bathulas/spotifyListeningTrends
|
8f42a9bbade430096a807a7aa8b90cb0ab8a3770
|
[
"MIT"
] | null | null | null |
get_fav_tracks.py
|
bathulas/spotifyListeningTrends
|
8f42a9bbade430096a807a7aa8b90cb0ab8a3770
|
[
"MIT"
] | null | null | null |
get_fav_tracks.py
|
bathulas/spotifyListeningTrends
|
8f42a9bbade430096a807a7aa8b90cb0ab8a3770
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import json
import time
import requests
offset = 0
limit = 50
total_tracks = []
def get_data(url, offset, limit):
token = "{token}"
#Your token goes here
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": "Bearer {}".format(token)
}
req = requests.get(url,params={"offset":str(offset), "limit":str(limit)}, headers=headers)
return req.json()
response = get("https://api.spotify.com/v1/me/tracks", offset, limit)
while len(response["items"]) > 0:
time.sleep(1)
offset += limit
total.append(response)
response = get_data("https://api.spotify.com/v1/me/tracks", offset, limit)
# Get audio features of favorite tracks
track_ids = [[(track['added_at'],track['track']['id']) for track in x['items']] for x in total]
id_strings = ["%2C".join([x[1] for x in id_list_]) for id_list_ in track_ids]
counter= 0
track_features_ = []
while (counter+1) < len(id_strings):
counter += 1
time.sleep(0.6)
resp = get_data("https://api.spotify.com/v1/audio-features?ids=" + id_strings[counter],0,0)
track_features_.append(resp)
| 31.243243
| 97
| 0.658304
|
bc76dad292f350bcdb06200bf7758909d5464b93
| 1,428
|
py
|
Python
|
test/test_api_result_my_account_balance.py
|
parruc/bondora_api
|
f36ea8d7149d75a2e5f14a695e5a4e57f0a3518d
|
[
"Apache-2.0"
] | 8
|
2019-03-09T20:38:27.000Z
|
2021-02-10T20:44:22.000Z
|
test/test_api_result_my_account_balance.py
|
parruc/bondora_api
|
f36ea8d7149d75a2e5f14a695e5a4e57f0a3518d
|
[
"Apache-2.0"
] | 1
|
2018-03-06T09:44:21.000Z
|
2018-03-06T09:44:21.000Z
|
test/test_api_result_my_account_balance.py
|
parruc/bondora_api
|
f36ea8d7149d75a2e5f14a695e5a4e57f0a3518d
|
[
"Apache-2.0"
] | 3
|
2019-06-03T13:44:05.000Z
|
2020-11-16T13:17:38.000Z
|
# coding: utf-8
"""
Bondora API V1
Bondora API version 1
OpenAPI spec version: v1
Contact: investor@bondora.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import bondora_api
from bondora_api.rest import ApiException
from bondora_api.models.api_result_my_account_balance import ApiResultMyAccountBalance
class TestApiResultMyAccountBalance(unittest.TestCase):
""" ApiResultMyAccountBalance unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testApiResultMyAccountBalance(self):
"""
Test ApiResultMyAccountBalance
"""
model = bondora_api.models.api_result_my_account_balance.ApiResultMyAccountBalance()
if __name__ == '__main__':
unittest.main()
| 26.444444
| 92
| 0.733894
|
442dcddd9f4f612f3ee6c379f139b3869ea14f68
| 15,358
|
py
|
Python
|
SCRM/audit/auditor_service.py
|
prakashar11/secure-cloud-native-fabric
|
8a6c51d8744c33afb8edc1455caaccb011f7e1de
|
[
"Apache-2.0"
] | 2
|
2019-07-30T05:53:36.000Z
|
2019-10-26T20:11:52.000Z
|
SCRM/audit/auditor_service.py
|
prakashar11/secure-cloud-native-fabric
|
8a6c51d8744c33afb8edc1455caaccb011f7e1de
|
[
"Apache-2.0"
] | 6
|
2019-06-17T05:22:32.000Z
|
2021-06-10T21:36:22.000Z
|
SCRM/audit/auditor_service.py
|
prakashar11/secure-cloud-native-fabric
|
8a6c51d8744c33afb8edc1455caaccb011f7e1de
|
[
"Apache-2.0"
] | 3
|
2019-10-26T21:20:02.000Z
|
2020-06-01T14:25:17.000Z
|
#
# Copyright 2019 Altran. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import json
import os
import sys
import asyncio
import base64
import collections
from nameko.events import EventDispatcher
from nameko.rpc import rpc, RpcProxy
from nameko.standalone.rpc import ClusterRpcProxy
from datetime import datetime
from nats.aio.client import Client as NATS
#from auditor_data import Auditor
from audit.auditor_data import Auditor
from audit.run_audit import RunAuditClass
import configparser
import yaml
import eventlet.tpool
import urllib.parse
sys.path.append('./util/')
from elastic_search_client import get_request, post_request, post_request_ver2, get_document_by_url, get_document_by_id_ver2, update_document, get_all_documents, create_auditor_index
from gphmodels import GphDB, AuditorInstance, Cluster
from gphmodels import Auditor as GphAuditor
CONFIG = {'AMQP_URI' : "amqp://guest:guest@localhost:5672"}
class AuditorService:
name = 'AuditorService'
cloud_configurations = []
def __init__(self):
self.__NATSserversList = ["nats://SCF_IP:4222"]
self.__natsClient = NATS()
self.__subject = 'audit_result_queue'
self.__msgReader = self.__reportHandler
self.__loop = asyncio.new_event_loop()
print("Created a NatsEventSubscriberService")
''' Method loads the default configurations for existing Auditor types in Auditor table '''
@staticmethod
def loadAuditorTypeRegistry(auditor_dict):
'loads the Auditor dict'
print('Loading Auditor service ....')
try:
#url = auditor_dict[0]
config = configparser.ConfigParser()
# config.read("/home/ubuntu/scrm_git/config.ini")
config.read("PROJ_PATH/config.ini")
'''Load Autior type default conficguration in Elastic DB '''
if config['Auditor_data'] and config['Auditor_data']['Auditors']:
auditor_name_list = config['Auditor_data']["Auditors"].split(',')
auditor_list = []
for auditor_name in auditor_name_list:
availableAuditors = Auditor.getAvailableAuditors()
if(config[auditor_name]['Type'] in availableAuditors.keys()):
resp = AuditorService.__createDefaultEntryForAuditor(config[auditor_name]['Default_yaml_path'], config[auditor_name]['Type'])
else:
print("Auditor Type not found in Registry : " + config[auditor_name]['Type'])
'''Load the Auditor-Cluster Registry '''
auditorClusterRegistries = get_all_documents('auditor_cluster_registry')
for registry in auditorClusterRegistries:
Auditor.createAuditorInClusterRegistry(registry.get('_source').get('jsondoc').get('cluster'), registry.get('_source').get('jsondoc').get('auditor_type'), registry.get('_source').get('jsondoc').get('config'), registry.get('_source').get('jsondoc').get('runState'))
'''Load the cloud configurations for refernce in cloud_configurations '''
cloudList = config['Organization']['Clouds'].split(',')
for cloudName in cloudList:
cloud = {'Id' : config[cloudName]['Id'], 'cloudName' : config[cloudName]['CloudName'], 'nats_queue' : config[cloudName]['nats_queue']}
AuditorService.cloud_configurations.append(cloud)
#print("----Clouds----")
#print(self.cloud_configurations)
print('Done loading!!')
#AuditorService.createNatsSubscriber()
except Exception as e:
print("Exception : %s" %(e))
sys.stdout.flush()
print("End")
return [{"result":"SUCCESS"}]
@rpc
def getAuditorTypes(self):
print("---getAuditorTypes--")
print(Auditor.getAvailableAuditors())
auditor_dict = Auditor.getAvailableAuditors()
auditor_types = []
if auditor_dict != None:
auditor_types = list(auditor_dict.keys())
sys.stdout.flush()
return auditor_types
def __createDefaultEntryForAuditor(filePath, auditorType):
try:
with open(filePath, "r") as f:
auditorBodystr = f.read()
auditorName = auditorType+'_default'
auditor = dict()
auditor.update({'auditor_type' : auditorType })
auditor.update({'auditor_name' : auditorName})
#auditor.update({'auditor_body': auditorBody})
blobdata = base64.b64encode(bytes(auditorBodystr, 'utf-8')).decode('ascii')
metadata = dict()
metadata.update({'auditor_type' : auditorType })
metadata.update({'cluster' : None})
#resp = post_request_ver2('auditors', json.dumps(auditor), auditorName, metadata)
post_request_ver2('auditors', data = json.dumps(auditor), identity = auditorName, metadata = metadata, blob = blobdata)
print('successful for {}'.format(auditorType))
return None
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("!!!! Exception while reading default yaml !!!!")
print(exc_type, fname, exc_tb.tb_lineno)
print(e)
'''Creates an auditor with given config, for each cluster in the list and adds a mapping for the same '''
@rpc
def createAuditorAndMapCluster(self,requestData):
print("***** Enter Method - " + str(datetime.now()))
sys.stdout.flush()
auditorType = requestData['auditorType']
postureName = requestData['postureName']
auditorBody = requestData['auditorBody']
clusters = requestData['clusters']
auditorRelations = []
relationDict = dict()
for cluster in clusters:
''' Create the Auditors in Databse'''
auditorName = auditorType + '_' + cluster
auditor = dict()
auditor.update({'auditor_type' : auditorType })
auditor.update({'auditor_name' : auditorName })
#auditor.update({'auditor_body' : auditorBody})
metadata = dict()
metadata.update({'auditor_type' : auditorType })
metadata.update({'cluster' : cluster })
blob = base64.b64encode(bytes(auditorBody, 'utf-8')).decode('ascii')
auditorUrl = post_request_ver2('auditors', json.dumps(auditor), urllib.parse.quote_plus(auditorName), metadata, blob )
if auditorType == 'SDWANBranchSysCalls':
try:
cluster_obj = GphDB.getGphObject(cluster, Cluster)
agent_obj = get_document_by_id_ver2('agents', str(cluster_obj.cloudId))
finaldata = {'purpose' : 'SetConfig', 'data' : [] }
aud_obj = dict()
aud_obj.update({'auditor_type' : auditorType })
aud_obj.update({'auditor_name' : auditorName})
aud_obj.update({'auditor_body' : list(yaml.load_all(auditorBody))})
finaldata['data'].append(aud_obj)
loop = asyncio.new_event_loop()
loop.run_until_complete(RunAuditClass.publishAudit(loop, agent_obj['agent_queue'], json.dumps(finaldata)))
loop.close()
except Exception as e:
print("Exception while setting config on agent : \n %s" %(e))
''' Add the AuditorType-Auditor-Cluster relationships to the node graph'''
relationData = dict()
relationData.update({'cluster': cluster})
relationData.update({'auditorName' : auditorName})
relationData.update({'auditorUrl' : auditorUrl})
auditorRelations.append(relationData)
''' Update the Cluster Auditor Registry'''
registryResp = Auditor.createAuditorInClusterRegistry(cluster, auditorType, auditorUrl)
print("Registry entry created, Status - " + registryResp)
''' Add AuditorInstance to Graph DB '''
audInst = AuditorInstance(name=auditorName, documentLink=auditorUrl, state=registryResp).save()
print("auditor instance saved to graph DB !!!")
try:
audiInGph = GphDB.getGphObject(auditorType, GphAuditor)
audInst.instanceOf.connect(audiInGph)
except Exception as e:
print("Exception while linkinf AuditorInstance to Auditor : \n %s" %(e))
''' Update index to add Registry data'''
registryEntry = {'cluster': cluster, 'auditor_type' : auditorType, 'config' : auditorUrl, 'runState' : registryResp, 'auditor_name' : auditorName}
post_request_ver2('auditor_cluster_registry', json.dumps(registryEntry), urllib.parse.quote_plus(auditorName), None )
print("data saved to auditor_cluster_registry !!!")
relationDict.update({'posture' : postureName})
relationDict.update({'relations' : auditorRelations})
#call the node graph method
try:
GphDB.addAudInstRelationsBulk(relationDict)
except Exception as e:
print("Exception while linkinf AuditorInstance to Auditor : \n %s" %(e))
return "Success"
@rpc
def runAuditorForCluster(self, requestData):
print("----runAuditorForCluster-------")
auditorType = requestData.get('auditorType')
cluster = requestData.get('cluster')
#set the cloudid for the cloud of the cluster ----
#There will be a difference NATs queue for each cloud.
clusterInDB = GphDB.getGphObject(cluster, Cluster)
if clusterInDB:
cloudId = clusterInDB.cloudId
else:
cloudId = '1'
cloud = self.__getCloudConfigurationForId(cloudId)
print("auditor type " + auditorType)
print("cluster" + cluster)
runstate = Auditor.runAuditor(auditorType, cluster, cloud)
'''Update the runstate in DB '''
cluster_auditor_id = auditorType + '_' + cluster
cluster_auditor_entry = get_document_by_id_ver2('auditor_cluster_registry', urllib.parse.quote_plus(cluster_auditor_id))
print('**************cluster_auditor_entry***************' + str(cluster_auditor_entry))
print("rubstate" + runstate)
update_object ={'jsondoc' : {'runState' : runstate}}
cluster_auditor_entry['runState'] = runstate
update_document('auditor_cluster_registry', cluster_auditor_id , update_object)
try:
''' Update status in Gph DB '''
audiInGph = GphDB.getGphObject(cluster_auditor_id, AuditorInstance)
if audiInGph:
audiInGph.state = runstate
audiInGph.save()
except Exception as e:
print("---------EXCEPTION IN updating auditor_cluster_registry in elasticDB---------------- %s\n" % e)
return cluster_auditor_entry
#@rpc
def createNatsSubscriber(self):
'Create a NATS Subscriber and run its event loop'
self.__loop.run_until_complete(self.__natsReaderLoop(self.__loop))
print("Completed run until complete loop")
try:
print("Starting event Loop")
self.__loop.run_forever()
finally:
print("Closing event Loop")
self.__loop.close()
def __natsReaderLoop(self, loop):
try:
yield from self.__natsClient.connect(servers=self.__NATSserversList,
io_loop=self.__loop)
yield from self.__natsClient.subscribe(self.__subject,
"nats-subscriber",
self.__msgReader)
print("Subscribed")
sys.stdout.flush()
except Exception as e:
print(e)
sys.stdout.flush()
sys.exit(1)
async def __reportHandler(self, msg):
print("Received message on Topic: {subject} : Data {data}"
.format(subject = msg.subject, data = msg.data.decode()))
report = json.loads(msg.data.decode())
metadata = dict()
metadata.update({'auditor_type' : report['auditor_type'] })
metadata.update({'cluster' : report['cluster']})
post_request_ver2('audit_reports', json.dumps(report), None, metadata)
print('******** successfuly saved report in elastic DB ********')
#Update the runstate in DB
runstate = Auditor.finishedAuditorRun(report['auditor_type'], report['cluster'])
print('******** successfuly updated status in registry *********')
cluster_auditor_id = report['auditor_type'] + '_' + report['cluster']
cluster_auditor_entry = get_document_by_id_ver2('auditor_cluster_registry', urllib.parse.quote_plus(cluster_auditor_id))
update_object ={'jsondoc' : {'runState' : runstate}}
cluster_auditor_entry['runState'] = runstate
update_document('auditor_cluster_registry', cluster_auditor_id , update_object)
print('******* successfuly updated auditor_cluster_registry in elastic DB *******')
audiInGph = GphDB.getGphObject(cluster_auditor_id, AuditorInstance)
if audiInGph:
audiInGph.state = runstate
audiInGph.save()
print('******** successfuly updated status in GphDB ********')
print("************* data saved ************** ")
def __getCloudConfigurationForId(self, cloudId):
for cloud in self.cloud_configurations:
if cloud['Id'] == cloudId:
return cloud
return None
from nameko.containers import ServiceContainer
from nameko.runners import ServiceRunner
from nameko.testing.utils import get_container
runner = ServiceRunner(config=CONFIG)
runner.add_service(AuditorService)
runner.start()
create_auditor_index()
AuditorService.loadAuditorTypeRegistry(None)
aud = AuditorService()
aud.createNatsSubscriber()
if __name__ == '__main__':
print("main")
"""request = [{'type' : 'K8S', 'class' : 'kubeaud'}, {'type' : 'secmonk', 'class' : 'awsaud'}]
ob = AuditorService()
ob.loadAuditorTypeRegistry(request)
print(Auditor.AuditorTypeRegistry)"""
f = open("/home/ubuntu/SCF/SCRM/audit/K8S.Falco/default.yaml", "r")
#print(f.read())
file_object = yaml.load(f, Loader=yaml.FullLoader)
print(file_object)
| 39.278772
| 281
| 0.617138
|
09e59add6929b12ed1898742c534770bea5dffb1
| 2,181
|
py
|
Python
|
selection/sampling/randomized/norms/base.py
|
Xiaoying-Tian/selective-inference
|
a20c5ad3f527beb709d5b8d7301016640738b092
|
[
"BSD-3-Clause"
] | null | null | null |
selection/sampling/randomized/norms/base.py
|
Xiaoying-Tian/selective-inference
|
a20c5ad3f527beb709d5b8d7301016640738b092
|
[
"BSD-3-Clause"
] | null | null | null |
selection/sampling/randomized/norms/base.py
|
Xiaoying-Tian/selective-inference
|
a20c5ad3f527beb709d5b8d7301016640738b092
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from regreg.atoms.seminorms import seminorm
class selective_penalty(seminorm):
def setup_sampling(self,
gradient,
soln,
linear_randomization,
quadratic_coef):
"""
Should store quadratic_coef.
Its return value is the chosen parametrization
of the selection event.
In other API methods, this return value is
referred to as `opt_vars`
"""
raise NotImplementedError("abstract method")
def form_subgradient(self, opt_vars):
"""
Given the chosen parametrization
of the selection event, this should form
`z`, an element the subgradient of the penalty
at `beta`.
"""
raise NotImplementedError("abstract method")
def form_parameters(self, opt_vars):
"""
Given the chosen parametrization
of the selection event, this should form
`beta`.
"""
raise NotImplementedError("abstract method")
def form_optimization_vector(self, opt_vars):
"""
Given the chosen parametrization
of the selection event, this should form
`(beta, z, epsilon * beta + z)`.
"""
raise NotImplementedError("abstract method")
def log_jacobian(self, hessian):
"""
Given the Hessian of the loss at `beta`,
compute the appropriate Jacobian which is the
determinant of this matrix plus the Jacobian
of the map $\epsilon \beta + z$
"""
raise NotImplementedError("abstract method")
def step_variables(self, state, randomization, logpdf, gradient):
"""
State is a tuple (data, opt_vars).
This method should take a Metropolis-Hastings
step for `opt_vars`.
The logpdf, is the callable that computes
the density of the randomization,
as well as the jacobian of the parameterization.
randomization should be a callable that samples
from the original randomization density.
"""
raise NotImplementedError("abstract method")
| 30.291667
| 69
| 0.608895
|
6c909d916cbbb13f84607aa3c345938882d9f430
| 21,555
|
py
|
Python
|
gvsoc/runner/python/runner/stim_utils.py
|
00-01/gap_sdk
|
25444d752b26ccf0b848301c381692d77172852c
|
[
"Apache-2.0"
] | 118
|
2018-05-22T08:45:59.000Z
|
2022-03-30T07:00:45.000Z
|
gvsoc/runner/python/runner/stim_utils.py
|
00-01/gap_sdk
|
25444d752b26ccf0b848301c381692d77172852c
|
[
"Apache-2.0"
] | 213
|
2018-07-25T02:37:32.000Z
|
2022-03-30T18:04:01.000Z
|
gvsoc/runner/python/runner/stim_utils.py
|
00-01/gap_sdk
|
25444d752b26ccf0b848301c381692d77172852c
|
[
"Apache-2.0"
] | 76
|
2018-07-04T08:19:27.000Z
|
2022-03-24T09:58:05.000Z
|
#!/usr/bin/env python3
#
# Copyright (C) 2018 ETH Zurich, University of Bologna and GreenWaves Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors: Germain Haugou, ETH (germain.haugou@iis.ee.ethz.ch)
#
from elftools.elf.elffile import ELFFile
import os
import os.path
import struct
import argparse
class stim(object):
def __init__(self, verbose=False):
self.binaries = []
self.mem = {}
self.verbose = verbose
self.areas = []
self.dump('Created stimuli generator')
def get_entry(self):
with open(self.binaries[0], 'rb') as file:
elffile = ELFFile(file)
return elffile.header['e_entry']
def dump(self, str):
if self.verbose:
print (str)
def add_binary(self, binary):
self.dump(' Added binary: %s' % binary)
self.binaries.append(binary)
def add_area(self, start, size):
self.dump(' Added target area: [0x%x -> 0x%x]' % (start, start + size))
self.areas.append([start, start+size])
def __add_mem_word(self, base, size, data, width):
aligned_base = base & ~(width - 1)
shift = base - aligned_base
iter_size = width - shift
if iter_size > size:
iter_size = size
value = self.mem.get(str(aligned_base))
if value is None:
value = 0
value &= ~(((1<<width) - 1) << (shift*8))
value |= int.from_bytes(data[0:iter_size], byteorder='little') << (shift*8)
self.mem[str(aligned_base)] = value
return iter_size
def __add_mem(self, base, size, data, width):
while size > 0:
iter_size = self.__add_mem_word(base, size, data, width)
size -= iter_size
base += iter_size
data = data[iter_size:]
def __gen_stim_slm(self, filename, width):
self.dump(' Generating to file: ' + filename)
try:
os.makedirs(os.path.dirname(filename))
except:
pass
with open(filename, 'w') as file:
for key in sorted(self.mem.keys()):
file.write('%X_%0*X\n' % (int(key), width*2, self.mem.get(key)))
def __parse_binaries(self, width):
self.mem = {}
for binary in self.binaries:
with open(binary, 'rb') as file:
elffile = ELFFile(file)
for segment in elffile.iter_segments():
if segment['p_type'] == 'PT_LOAD':
data = segment.data()
addr = segment['p_paddr']
size = len(data)
load = True
if len(self.areas) != 0:
load = False
for area in self.areas:
if addr >= area[0] and addr + size <= area[1]:
load = True
break
if load:
self.dump(' Handling section (base: 0x%x, size: 0x%x)' % (addr, size))
self.__add_mem(addr, size, data, width)
if segment['p_filesz'] < segment['p_memsz']:
addr = segment['p_paddr'] + segment['p_filesz']
size = segment['p_memsz'] - segment['p_filesz']
self.dump(' Init section to 0 (base: 0x%x, size: 0x%x)' % (addr, size))
self.__add_mem(addr, size, [0] * size, width)
else:
self.dump(' Bypassing section (base: 0x%x, size: 0x%x)' % (addr, size))
def gen_stim_slm_64(self, stim_file):
self.__parse_binaries(8)
self.__gen_stim_slm(stim_file, 8)
def gen_stim_bin(self, stim_file):
self.__parse_binaries(1)
try:
os.makedirs(os.path.dirname(stim_file))
except:
pass
with open(stim_file, 'wb') as file:
prev_addr = None
for key in sorted(self.mem.keys()):
addr = int(key)
if prev_addr is not None:
while prev_addr != addr - 1:
file.write(struct.pack('B', 0))
prev_addr += 1
prev_addr = addr
file.write(struct.pack('B', int(self.mem.get(key))))
class Efuse(object):
def __init__(self, config, verbose=False):
self.config = config
self.verbose = verbose
self.dump('Created efuse stimuli generator')
def dump(self, str):
if self.verbose:
print (str)
def gen_stim_txt(self, filename):
user_efuses = {}
efuses = self.config.get('**/efuse/values')
if efuses is None:
efuses = []
else:
efuses = efuses.get_dict()
for efuse in efuses:
efuse_id, val = efuse.split(':')
efuse_id = int(efuse_id, 0)
val = int(val, 0)
user_efuses[efuse_id] = val
nb_regs = self.config.get_child_int('**/efuse/nb_regs')
pulp_chip = self.config.get_child_str('**/chip/name')
pulp_chip_family = self.config.get_child_str('**/chip/pulp_chip_family')
if pulp_chip_family == 'gap' or pulp_chip == 'vega' or pulp_chip == 'gap9' or pulp_chip == 'gap9_v2':
load_mode = self.config.get_child_str('**/runner/boot-mode')
encrypted = self.config.get_child_str('**/efuse/encrypted')
aes_key = self.config.get_child_str('**/efuse/aes_key')
aes_iv = self.config.get_child_str('**/efuse/aes_iv')
xtal_check = self.config.get_child_bool('**/efuse/xtal_check')
xtal_check_delta = self.config.get_child_bool('**/efuse/xtal_check_delta')
xtal_check_min = self.config.get_child_bool('**/efuse/xtal_check_min')
xtal_check_max = self.config.get_child_bool('**/efuse/xtal_check_max')
no_preload = self.config.get_child_str('**/efuse/no-preload')
# In case we boot with the classic rom mode, don't init any efuse, the boot loader will boot with the default mode
load_mode_hex = None
if pulp_chip == 'gap':
if load_mode == 'rom':
load_mode_hex = 0x3A
elif load_mode == 'spi':
load_mode_hex = 0x0A
elif load_mode == 'jtag':
load_mode_hex = 0x12
elif load_mode == 'rom_hyper':
load_mode_hex = 0x2A
elif load_mode == 'rom_spim_single':
load_mode_hex = 0x32
elif load_mode == 'rom_spim':
load_mode_hex = 0x3A
elif load_mode == 'jtag_dev' or load_mode == 'spi_dev':
load_mode_hex = None
if xtal_check:
if load_mode_hex == None: load_mode_hex = 0
load_mode_hex |= 1<<7
delta = int(xtal_check_delta*((1 << 15)-1))
efuses.append('26:0x%x' % (delta & 0xff))
efuses.append('27:0x%x' % ((delta >> 8) & 0xff))
efuses.append('28:0x%x' % (xtal_check_min))
efuses.append('29:0x%x' % (xtal_check_max))
if load_mode_hex != None:
if encrypted:
load_mode_hex |= 0x40
for i in range(0, 16):
efuses.append('%d:0x%s' % (2+i, aes_key[30-i*2:32-i*2]))
for i in range(0, 8):
efuses.append('%d:0x%s' % (18+i, aes_iv[14-i*2:16-i*2]))
efuses.append('0:%s' % load_mode_hex)
elif pulp_chip == 'vega' or pulp_chip == 'gap9':
efuses = [0] * 128
info2 = 0
info3 = 0
info4 = 0
info5 = 0
info6 = 0
clk_div = self.config.get_child_int('**/efuse/clkdiv')
fll_freq = self.config.get_child_int('**/efuse/fll/freq')
fll_assert_cycles = self.config.get_child_int('**/efuse/fll/assert_cycles')
fll_lock_tolerance = self.config.get_child_int('**/efuse/fll/lock_tolerance')
hyper_delay = self.config.get_child_int('**/efuse/hyper/delay')
hyper_latency = self.config.get_child_int('**/efuse/hyper/latency')
if load_mode == 'rom':
# RTL platform | flash boot | no encryption | no wait xtal
load_mode_hex = 2 | (2 << 3) | (0 << 4) | (0 << 5) | (0 << 6) | (0 << 7)
elif load_mode == 'rom_hyper':
# RTL platform | flash boot | no encryption | no wait xtal
load_mode_hex = 2 | (2 << 3) | (0 << 4) | (0 << 5) | (0 << 6) | (0 << 7)
# Hyperflash type
info3 = (1 << 0)
elif load_mode == 'rom_spim':
# RTL platform | flash boot | no encryption | no wait xtal
load_mode_hex = 2 | (2 << 3) | (0 << 4) | (0 << 5) | (0 << 6) | (0 << 7)
# SPI flash type
info3 = (0 << 0)
elif load_mode == 'rom_mram':
# RTL platform | flash boot | no encryption | no wait xtal
load_mode_hex = 2 | (2 << 3) | (0 << 4) | (0 << 5) | (0 << 6) | (0 << 7)
# MRAM type
info3 = (2 << 0)
# Activate MRAM TRIM CFG and fill it with dummy numbers until we get the real one. Also activate clock divider
info6 |= (1 << 6) | (1<<7)
info2 |= (2 << 3)
efuses[56] = 32*4
for i in range(0, 32):
efuses [57+i] = i | ((i*4+1)<<8) | ((i*4+2)<<16) | ((i*4+3)<<24)
if clk_div is not None:
info6 |= 1 << 7
info2 = (info2 & ~(3<<3)) | (clk_div << 3)
if fll_freq is not None:
info2 |= (1 << 0) | (1 << 2)
efuses[31] = fll_freq
if fll_lock_tolerance is not None or fll_assert_cycles is not None:
info2 |= (1<< 1)
efuses[32] = fll_lock_tolerance
efuses[33] = fll_assert_cycles
if hyper_delay is not None:
info5 |= (1<<6)
efuses[30] = hyper_delay
if hyper_latency is not None:
info5 |= (1<<7)
efuses[51] = hyper_latency
if load_mode_hex != None:
if encrypted:
load_mode_hex |= 0x40
info6 |= 1<<4
for i in range(0, 16):
efuses[2+i] = aes_key[30-i*2:32-i*2]
for i in range(0, 8):
efuses[18+i] = aes_iv[14-i*2:16-i*2]
efuses[0] = load_mode_hex
efuses[1] = info2
efuses[37] = info3
efuses[38] = info4
efuses[39] = info5
efuses[40] = info6
elif pulp_chip == 'gap9_v2':
efuses = [0] * 128
info2 = 0
info3 = 0
info4 = 0
info5 = 0
info6 = 0
clk_div = self.config.get_child_int('**/efuse/clkdiv')
fll_freq = self.config.get_child_int('**/efuse/fll/freq')
fll_assert_cycles = self.config.get_child_int('**/efuse/fll/assert_cycles')
fll_lock_tolerance = self.config.get_child_int('**/efuse/fll/lock_tolerance')
hyper_delay = self.config.get_child_int('**/efuse/hyper/delay')
hyper_latency = self.config.get_child_int('**/efuse/hyper/latency')
if load_mode == 'rom':
# RTL platform | flash boot | no encryption | no wait xtal
load_mode_hex = 2 | (2 << 3) | (0 << 4) | (0 << 5) | (0 << 6) | (0 << 7)
info5 |= 1 << 1 # Boot on UDMA SPIM1 interface (first single spi)
elif load_mode == 'rom_hyper':
# RTL platform | flash boot | no encryption | no wait xtal
load_mode_hex = 2 | (2 << 3) | (0 << 4) | (0 << 5) | (0 << 6) | (0 << 7)
# Hyperflash type
info3 = (1 << 0)
elif load_mode == 'rom_spim':
# RTL platform | flash boot | no encryption | no wait xtal
load_mode_hex = 2 | (2 << 3) | (0 << 4) | (0 << 5) | (0 << 6) | (0 << 7)
# SPI flash type
info3 = (0 << 0)
info5 |= 1 << 1 # Boot on UDMA SPIM1 interface (first single spi)
elif load_mode == 'rom_mram':
# RTL platform | flash boot | no encryption | no wait xtal
load_mode_hex = 2 | (2 << 3) | (0 << 4) | (0 << 5) | (0 << 6) | (0 << 7)
# MRAM type
info3 = (2 << 0)
# Activate MRAM TRIM CFG and fill it with dummy numbers until we get the real one. Also activate clock divider
info6 |= (1 << 6) | (1<<7)
info2 |= (2 << 3)
efuses[56] = 32*4
for i in range(0, 32):
efuses [57+i] = i | ((i*4+1)<<8) | ((i*4+2)<<16) | ((i*4+3)<<24)
if clk_div is not None:
info6 |= 1 << 7
info2 = (info2 & ~(3<<3)) | (clk_div << 3)
if fll_freq is not None:
info2 |= (1 << 0) | (1 << 2)
efuses[31] = fll_freq
if fll_lock_tolerance is not None or fll_assert_cycles is not None:
info2 |= (1<< 1)
efuses[32] = fll_lock_tolerance
efuses[33] = fll_assert_cycles
if hyper_delay is not None:
info5 |= (1<<6)
efuses[30] = hyper_delay
if hyper_latency is not None:
info5 |= (1<<7)
efuses[51] = hyper_latency
if load_mode_hex != None:
if encrypted:
load_mode_hex |= 0x40
info6 |= 1<<4
for i in range(0, 16):
efuses[2+i] = aes_key[30-i*2:32-i*2]
for i in range(0, 8):
efuses[18+i] = aes_iv[14-i*2:16-i*2]
efuses[0] = load_mode_hex
efuses[1] = info2
efuses[37] = info3
efuses[38] = info4
efuses[39] = info5
efuses[40] = info6
elif pulp_chip == 'gap_rev1':
info3 = 0
info6 = 0
if load_mode == 'rom':
# RTL platform | flash boot | no encryption | no wait xtal
load_mode_hex = 2 | (2 << 3) | (0 << 4) | (0 << 5) | (0 << 6) | (0 << 7)
elif load_mode == 'rom_hyper':
# RTL platform | flash boot | no encryption | no wait xtal
load_mode_hex = 2 | (2 << 3) | (0 << 4) | (0 << 5) | (0 << 6) | (0 << 7)
# Hyperflash type
info3 = (1 << 0)
elif load_mode == 'rom_spim':
# RTL platform | flash boot | no encryption | no wait xtal
load_mode_hex = 2 | (2 << 3) | (0 << 4) | (0 << 5) | (0 << 6) | (0 << 7)
# SPI flash type
info3 = (0 << 0)
if xtal_check:
if load_mode_hex == None: load_mode_hex = 0
load_mode_hex |= 1<<7
delta = int(xtal_check_delta*((1 << 15)-1))
efuses.append('26:0x%x' % (delta & 0xff))
efuses.append('27:0x%x' % ((delta >> 8) & 0xff))
efuses.append('28:0x%x' % (xtal_check_min))
efuses.append('29:0x%x' % (xtal_check_max))
if load_mode_hex != None:
if encrypted:
load_mode_hex |= 0x40
info6 |= 1<<4
for i in range(0, 16):
efuses.append('%d:0x%s' % (2+i, aes_key[30-i*2:32-i*2]))
for i in range(0, 8):
efuses.append('%d:0x%s' % (18+i, aes_iv[14-i*2:16-i*2]))
efuses.append('0:%s' % load_mode_hex)
efuses.append('1:%s' % 0)
efuses.append('37:%s' % (info3))
efuses.append('38:%s' % 0)
efuses.append('39:%s' % 0)
efuses.append('40:%s' % (info6))
elif pulp_chip == 'gap8_revc':
fll_freq = self.config.get_child_int('**/efuse/fll/freq')
ref_clk_wait = self.config.get_child_int('**/efuse/ref_clk_wait')
burst_size = self.config.get_child_int('**/efuse/burst_size')
flash_id = self.config.get_child_bool('**/efuse/flash_id')
fll_assert_cycles = self.config.get_child_int('**/efuse/fll/assert_cycles')
fll_lock_tolerance = self.config.get_child_int('**/efuse/fll/lock_tolerance')
hyper_delay = self.config.get_child_int('**/efuse/hyper/delay')
hyper_latency = self.config.get_child_int('**/efuse/hyper/latency')
if hyper_delay is None:
hyper_delay = 3
efuses = [0] * 128
info3 = 0
info2 = 0
info6 = 0
info5 = 0
if self.config.get_child_str('**/vsim/model') == 'rtl':
info7 = 1 # Don't use UDMA MEMCPY as it makes RTL platform crash
else:
info7 = 0
if load_mode == 'rom':
# RTL platform | flash boot | no encryption | no wait xtal
load_mode_hex = 2 | (2 << 3) | (0 << 4) | (0 << 5) | (0 << 6) | (0 << 7)
elif load_mode == 'rom_hyper':
# RTL platform | flash boot | no encryption | no wait xtal
load_mode_hex = 2 | (2 << 3) | (0 << 4) | (0 << 5) | (0 << 6) | (0 << 7)
# Hyperflash type
info3 = (1 << 0)
info7 |= (1 << 2) # Partially reconfigure pads to overcome HW issue with rwds cg latch
elif load_mode == 'rom_spim':
# RTL platform | flash boot | no encryption | no wait xtal
load_mode_hex = 2 | (2 << 3) | (0 << 4) | (0 << 5) | (0 << 6) | (0 << 7)
# SPI flash type
info3 = (0 << 0)
if burst_size is not None:
info6 |= (1 << 7)
efuses[61] = burst_size & 0xff
efuses[62] = (burst_size >> 8) & 0xff
if flash_id:
info6 |= (1 << 5)
if fll_freq is not None:
info2 |= (1 << 0)
efuses[57] = fll_freq
if ref_clk_wait is not None:
info2 |= (1 << 6)
efuses[35] = ref_clk_wait & 0xff
efuses[36] = (ref_clk_wait >> 8) & 0xff
else:
info2 |= (1 << 6)
efuses[35] = 0
efuses[36] = 0
if hyper_delay is not None:
info5 |= (1<<6)
efuses[32] |= hyper_delay
if hyper_latency is not None:
info5 |= (1<<7)
efuses[51] |= hyper_latency
if fll_lock_tolerance is not None or fll_assert_cycles is not None:
info2 |= (1<< 1)
efuses[58] = fll_lock_tolerance
efuses[59] = fll_assert_cycles
if xtal_check:
if load_mode_hex == None: load_mode_hex = 0
load_mode_hex |= 1<<7
delta = int(xtal_check_delta*((1 << 15)-1))
efuses[26] = delta & 0xff
efuses[27] = (delta >> 8) & 0xff
efuses[28] = xtal_check_min & 0xff
efuses[29] = (xtal_check_min >> 8) & 0xff
efuses[30] |= xtal_check_max & 0xff
efuses[31] = (xtal_check_max >> 8) & 0xff
if load_mode_hex != None:
if encrypted:
load_mode_hex |= 0x40
info6 |= 1<<4
for i in range(0, 16):
efuses[2+i] = int('0x%s' % aes_key[30-i*2:32-i*2], 0)
for i in range(0, 8):
efuses[18+i] = int('0x%s' % aes_iv[14-i*2:16-i*2], 0)
efuses[0] = load_mode_hex
efuses[1] = info2
efuses[37] = info3
efuses[38] = 0
efuses[39] = info5
efuses[40] = info6
efuses[60] = info7
# Efuse preloading file generation
if pulp_chip == 'vega' or pulp_chip == 'gap9' or pulp_chip == 'gap9_v2':
self.dump(' Generating to file: ' + filename)
with open(filename, 'w') as file:
if no_preload is None or no_preload == False:
for efuseId in range (0, 128):
value = efuses[efuseId]
self.dump(' Writing register (index: %d, value: 0x%x)' % (efuseId, value))
file.write('{0:032b}\n'.format(value))
elif pulp_chip == 'gap8_revc':
values = [0] * nb_regs * 8
for efuseId in range (0, nb_regs):
value = user_efuses.get(efuseId)
if value is None:
value = efuses[efuseId]
self.dump(' Writing register (index: %d, value: 0x%x)' % (efuseId, value))
for index in range(0, 8):
if (value >> index) & 1 == 1: values[efuseId + index*128] = 1
self.dump(' Generating to file: ' + filename)
with open(filename, 'w') as file:
for value in values:
file.write('%d ' % (value))
else:
values = [0] * nb_regs * 8
for efuse in efuses:
efuseId, value = efuse.split(':')
self.dump(' Writing register (index: %d, value: 0x%x)' % (int(efuseId, 0), int(value, 0)))
efuseId = int(efuseId, 0)
value = int(value, 0)
for index in range(0, 8):
if (value >> index) & 1 == 1: values[efuseId + index*128] = 1
self.dump(' Generating to file: ' + filename)
with open(filename, 'w') as file:
for value in values:
file.write('%d ' % (value))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generate stimuli')
parser.add_argument("--binary", dest="binary", default=None, help="Specify input binary")
parser.add_argument("--vectors", dest="vectors", default=None, help="Specify output vectors file")
args = parser.parse_args()
if args.binary is None:
raise Exception('Specify the input binary with --binary=<path>')
if args.vectors is not None:
stim_gen = stim(verbose=True)
stim_gen.add_binary(args.binary)
stim_gen.gen_stim_slm_64(args.vectors)
| 33.470497
| 120
| 0.524379
|
8b42d6cb3ff1dcf9e7aac289acee3db5b98db927
| 7,343
|
py
|
Python
|
matrix-methods/frame2d/Frame2D/Tables.py
|
nholtz/structural-analysis
|
246d6358355bd9768e30075d1f6af282ceb995be
|
[
"CC0-1.0"
] | 3
|
2016-05-26T07:01:51.000Z
|
2019-05-31T23:48:11.000Z
|
matrix-methods/frame2d/Frame2D/Tables.py
|
nholtz/structural-analysis
|
246d6358355bd9768e30075d1f6af282ceb995be
|
[
"CC0-1.0"
] | null | null | null |
matrix-methods/frame2d/Frame2D/Tables.py
|
nholtz/structural-analysis
|
246d6358355bd9768e30075d1f6af282ceb995be
|
[
"CC0-1.0"
] | 1
|
2016-08-30T06:08:03.000Z
|
2016-08-30T06:08:03.000Z
|
## Compiled from Tables.ipynb on Sun Dec 10 12:51:08 2017
## DO NOT EDIT THIS FILE. YOUR CHANGES WILL BE LOST!!
## In [1]:
from salib import extend
import pandas as pd
import os, os.path
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import hashlib
from IPython.core.magic import register_cell_magic
import re
## In [2]:
class Table(pd.DataFrame):
"""A Table is just like a pandas DataFrame except that it has
a table name, a data set name, and a file name - the latter two describing
the source of the data."""
_internal_names = pd.DataFrame._internal_names + ['filename','tablename']
_internal_names_set = set(_internal_names)
_metadata = ['dsname']
def __init__(self,*args,**kwargs):
dsname = kwargs.pop('dsname',None)
tablename = kwargs.pop('tablename',None)
filename = kwargs.pop('filename',None)
super(self.__class__,self).__init__(*args,**kwargs)
if dsname is not None:
self.dsname = dsname
if tablename is not None:
self.tablename = tablename
if filename is not None:
self.filename = filename
@property
def _constructor(self):
return self.__class__
## In [13]:
class DataSource(object):
ROOT = 'data'
DSNAME = None # default data set name
DSTYPE = 'dir' # someday we will allow 'zip' for zip archives
#DSTYPE = 'cell' # for CSV data provided via %%Table cell magic
#DSTYPE = 'data' # for dataframe data provided directly
CELLDATA = {} # csv text from %%Table magic cells, indexed by table name
TABLES = {} # dataframes directly provided by client, indexed by table name
DATASOURCE = None # the one and only data source
def __init__(self):
cls = self.__class__
if cls.DATASOURCE is not None:
raise ValueError("Can only create one instance of class '{}'".format(cls.__name__))
self.root = cls.ROOT
self.dsname = cls.DSNAME
self.prefix = None
self.dstype = cls.DSTYPE
self.celldata = cls.CELLDATA
self.tables = cls.TABLES
cls.DATASOURCE = self
## In [16]:
@extend
class DataSource:
@classmethod
def set_root(cls,newroot):
self = cls.DATASOURCE
if not os.path.exists(newroot):
raise ValueError("Root '{}' does not exist.".format(newroot))
self.root = newroot
@classmethod
def set_source(cls,dsname,dstype=None):
self = cls.DATASOURCE
if dsname is not None:
if dstype is None:
dirname = self.root + '/' + dsname + '.d'
if os.path.exists(dirname):
dstype = 'dir'
else:
dstype = 'unknown'
if dstype not in ['dir','cell','data']:
raise ValueError("dstype '{}' is invalid.".format(dstype))
self.dsname = dsname
self.dstype = dstype
self.celldata = {}
self.tables = {}
@classmethod
def set_table(cls,tablename,table):
self = cls.DATASOURCE
self.tables[tablename] = table
if tablename in self.celldata:
del self.celldata[tablename]
@classmethod
def set_celldata(cls,tablename,celltext):
self = cls.DATASOURCE
self.celldata[tablename] = celltext
if tablename in self.tables:
del self.tables[tablename]
def _file_name(self,tablename,prefix=None):
n = tablename
if prefix:
n = prefix + '/' + tablename
return self.root + '/' + self.dsname + '.d/' + n + '.csv'
## In [26]:
@extend
class DataSource:
@classmethod
def read_table(cls,tablename,optional=False,prefix=None,columns=None,extrasok=True):
self = cls.DATASOURCE
stream = None
filename = None
t = None
def _chk(t,columns=columns):
if columns is None:
return t
prov = set(t.columns)
reqd = set(columns)
if reqd-prov:
raise ValueError("Columns missing for table '{}': {}. Required columns are: {}"
.format(tablename,list(reqd-prov),columns))
if prov-reqd:
if not extrasok:
raise ValueError("Extra columns for table '{}': {}. Required columns are: '{}'"
.format(tablename,list(prov-reqd),columns))
t = t[columns]
return t
if tablename in self.tables:
return _chk(self.tables[tablename])
if tablename in self.celldata:
stream = StringIO(self.celldata[tablename])
else:
if self.dsname is not None:
filename = self._file_name(tablename,prefix=prefix)
if os.path.exists(filename):
stream = open(filename,'r')
if stream is None:
if optional:
d = pd.DataFrame(columns=columns)
else:
raise ValueError("Table '{}' does not exist.".format(tablename))
else:
d = pd.read_csv(stream,index_col=None,skipinitialspace=True)
t = Table(d,dsname=self.dsname,tablename=tablename,filename=filename)
return _chk(t)
## In [37]:
@register_cell_magic('Table')
def cell_table(line,celltext):
mo = re.match(r'\s*(\S+)\s*$',line)
if not mo:
raise ValueError('Usage: %%Table tablename')
tablename = mo.group(1)
global DataSource
DataSource.set_celldata(tablename,celltext)
## In [42]:
@extend
class DataSource:
@classmethod
def write_table(cls,table,root=None,dsname=None,tablename=None,prefix=None,precision=None,index=False,makedir=False):
self = cls.DATASOURCE
if root is None:
root = self.root
if dsname is None:
dsname = self.dsname
if tablename is None:
tablename = table.tablename
dirname = root + '/' + dsname + '.d'
if makedir and not os.path.exists(dirname):
os.mkdir(dirname)
if prefix is not None:
dirname = dirname + '/' + prefix
if makedir and not os.path.exists(dirname):
os.mkdir(dirname)
table.tablename = tablename
table.dsname = dsname
table.filename = filename = dirname + '/' + tablename + '.csv'
float_format = None
if precision is not None:
float_format = '%.{:d}g'.format(precision)
table.to_csv(filename,index=index,float_format=float_format)
return filename
## In [43]:
@extend
class Table:
def signature(self):
filename = self.filename
if os.path.exists(filename):
return (self.tablename,self.filename,signature(filename))
raise ValueError("Table {}: filename: {} - does not exist.".format(self.tablename,self.filename))
def signature(filename):
f = open(filename,mode='rb')
m = hashlib.sha256(f.read())
f.close()
return m.hexdigest()
## In [44]:
DataSource.DATASOURCE = None
__ds__ = DataSource()
## In [55]:
DataSource.DATASOURCE = None
__ds__ = DataSource()
## In [ ]:
| 31.926087
| 121
| 0.58137
|
2811350cc9b273bd0453e50d83f3384ffca8e0ba
| 5,345
|
py
|
Python
|
ParsersClasses/LuleshParser.py
|
UFRGS-CAROL/radiation-benchmarks-parsers
|
a39844ed3ed511f4f2672bc2e0c7e6f920dc7f2b
|
[
"Apache-2.0"
] | null | null | null |
ParsersClasses/LuleshParser.py
|
UFRGS-CAROL/radiation-benchmarks-parsers
|
a39844ed3ed511f4f2672bc2e0c7e6f920dc7f2b
|
[
"Apache-2.0"
] | null | null | null |
ParsersClasses/LuleshParser.py
|
UFRGS-CAROL/radiation-benchmarks-parsers
|
a39844ed3ed511f4f2672bc2e0c7e6f920dc7f2b
|
[
"Apache-2.0"
] | null | null | null |
import re
import sys
from Parser import Parser
class LuleshParser(Parser):
_box = 50
_hasThirdDimention = True
_iterations = None
def _jaccardCoefficient(self, errListJaccard):
return 0
def parseErrMethod(self, errString):
if self._box is None:
print ("box is None!!!\nerrString: ", errString)
print("header: ", self._header)
sys.exit(1)
try:
##ERR p: [58978] x_gold:4.950000000000000e-01 x_output:4.949996815262007e-01 y_gold:7.650000000000000e-01 y_output:7.649996815262007e-01 z_gold:4.950000000000000e-01 z_output:4.949996815262007e-01
m = re.match(
'.*ERR.*\[(\d+)\].*x_gold\:([0-9e\+\-\.]+).*x_output\:([0-9e\+\-\.]+).*y_gold\:([0-9e\+\-\.]+).*y_output\:([0-9e\+\-\.]+).*z_gold\:([0-9e\+\-\.]+).*z_output\:([0-9e\+\-\.]+).*',
errString)
if m:
pos = int(m.group(1))
boxSquare = self._box * self._box
posZ = int(pos / boxSquare)
posY = pos if int((pos - (posZ * boxSquare)) / self._box) == 0 else int((pos - (posZ * boxSquare)) / self._box)
posX = pos # if (pos-(posZ*boxSquare)-(posY*box)) == 0 else ((pos-(posZ*boxSquare)) / box)
xe = float(m.group(2))
xr = float(m.group(3))
ye = float(m.group(4))
yr = float(m.group(5))
ze = float(m.group(6))
zr = float(m.group(7))
# [posX, posY, posZ, vr, ve, xr, xe, yr, ye, zr, ze]
# print [posX, posY, posZ, xr, xe, yr, ye, zr, ze]
return [posX, posY, posZ, None, None, xr, xe, yr, ye, zr, ze]
else:
return None
except ValueError:
return None
def setSize(self, header):
#for lulesh
#structured:YES size:50 iterations:50
# print "\n", header
m = re.match(".*structured:YES.*size\:(\d+).*iterations:(\d+).*box:(\d+).*",header)
if m:
self._size = None
self._iterations = None
try:
self._size = int (m.group(1))
self._iterations = int(m.group(2))
self._box = int(m.group(3))
except:
self._size = None
self._iterations = None
else:
m = re.match(".*structured:YES.*size\:(\d+).*iterations:(\d+).*",header)
self._size = None
self._iterations = None
if m:
try:
self._size = int (m.group(1))
self._iterations = int(m.group(2))
except:
self._size = None
self._iterations = None
self._size = str(self._size) + str(self._iterations)
# return self._size
"""
LEGACY METHODS SECTION
"""
"""
legacy method
"""
# def _relativeErrorParser(self, errList):
# relErr = []
# zeroGold = 0
# zeroOut = 0
# self._cleanRelativeErrorAttributes()
# for err in errList:
# # [posX, posY, posZ, None, None, xr, xe, yr, ye, zr, ze]
#
# xr = err[5]
# xe = err[6]
# yr = err[7]
# ye = err[8]
# zr = err[9]
# ze = err[10]
# # print xr,xe,yr,ye,zr,ze
# # print err
# # absoluteErrV = abs(ve - vr)
# absoluteErrX = abs(xe - xr)
# absoluteErrY = abs(ye - yr)
# absoluteErrZ = abs(ze - zr)
# relErrorV = 0
# relErrorX = 0
# relErrorY = 0
# relErrorZ = 0
# # if abs(vr) < 1e-6:
# # zeroOut += 1
# if abs(xr) < 1e-6:
# zeroOut += 1
# if abs(yr) < 1e-6:
# zeroOut += 1
# if abs(zr) < 1e-6:
# zeroOut += 1
# # if abs(ve) < 1e-6:
# # zeroGold += 1
# # else:
# # relErrorV = abs( absoluteErrV / ve ) * 100
# if abs(xe) < 1e-6:
# zeroGold += 1
# else:
# relErrorX = abs(absoluteErrX / xe) * 100
# if abs(ye) < 1e-6:
# zeroGold += 1
# else:
# relErrorY = abs(absoluteErrY / ye) * 100
# if abs(ze) < 1e-6:
# zeroGold += 1
# else:
# relErrorZ = abs(absoluteErrZ / ze) * 100
#
# relError = relErrorX + relErrorY + relErrorZ # relErrorV +
# if relError > 0:
# relErr.append(relError)
# self._placeRelativeError(relError, err)
# if len(relErr) > 0:
# self._maxRelErr = max(relErr)
# self._minRelErr = min(relErr)
# self._avgRelErr = sum(relErr) / float(len(relErr))
#
# self._zeroOut = zeroOut
# self._zeroGold = zeroGold
"""
LEGACY METHODS SECTION
"""
"""
legacy method
"""
# def __init__(self, **kwargs):
# Parser.__init__(self, **kwargs)
"""
legacy method
"""
# def buildImageMethod(self):
# # type: (integer) -> boolean
# return False
| 33.198758
| 208
| 0.449579
|
c87e62bd97d388c6386bdfd8c0ca8aa158b4a1d5
| 1,105
|
py
|
Python
|
tests/at_destructor.py
|
microsoft/svirl
|
8d0da6a03ad20315a690a3c65bb8b60c196c3f3d
|
[
"MIT"
] | 6
|
2020-12-21T20:11:13.000Z
|
2022-03-21T07:55:33.000Z
|
tests/at_destructor.py
|
ivan-sadovsky/svirl
|
523abe9fcf2a5e9d192782d7aeb7093c86ef4036
|
[
"MIT"
] | 4
|
2021-07-15T20:12:55.000Z
|
2021-08-07T22:11:18.000Z
|
tests/at_destructor.py
|
ivan-sadovsky/svirl
|
523abe9fcf2a5e9d192782d7aeb7093c86ef4036
|
[
"MIT"
] | 9
|
2020-12-22T06:06:16.000Z
|
2022-03-25T17:26:55.000Z
|
import numpy as np
import sys, os
sys.path.append(os.path.abspath("../"))
from svirl import GLSolver
from common import *
cd_test_number, cd_test_passed = 0, 0
for i in range(10):
try:
gl = GLSolver(
Nx = np.random.randint(4, 1024),
Ny = np.random.randint(4, 1024),
dx = 0.2 + 0.2*np.random.rand(),
dy = 0.2 + 0.2*np.random.rand(),
gl_parameter = 1.0 if np.random.rand() > 0.5 else np.inf,
)
gl.vars.order_parameter = 1.0
gl.vars.randomize_order_parameter(level=0.5)
if not np.isposinf(gl.params.gl_parameter):
gl.params.gl_parameter = 1.0 + 3.0*np.random.rand()
gl.params.external_field = 0.01 + 0.1*np.random.rand()
gl.params.homogeneous_external_field = 0.01 + 0.1*np.random.rand()
apply_material_tiling(gl, verbose=False)
del gl
except:
pass
else:
cd_test_passed += 1
cd_test_number += 1
print_test_result('Constructor-destructor test', cd_test_number, cd_test_passed)
| 29.078947
| 80
| 0.580995
|
89e9f1b999404241218ce5d90d90b4adfa184b3f
| 1,295
|
py
|
Python
|
Defect_Prediction/gradient_descent.py
|
pujahabibi/SDP-Bayes
|
ae0fdba8b457a589be048af7c0c116923e56b19d
|
[
"MIT"
] | null | null | null |
Defect_Prediction/gradient_descent.py
|
pujahabibi/SDP-Bayes
|
ae0fdba8b457a589be048af7c0c116923e56b19d
|
[
"MIT"
] | null | null | null |
Defect_Prediction/gradient_descent.py
|
pujahabibi/SDP-Bayes
|
ae0fdba8b457a589be048af7c0c116923e56b19d
|
[
"MIT"
] | null | null | null |
__author__ = 'Kiki Rizki Arpiandi'
import numpy as np
import random
import matplotlib.pyplot as plt
class model:
def __init__(self, x,y):
self.y = y
self.x = x
self.theta0 = random.randrange(-5, 5, 1)
self.theta1 = random.randrange(-5, 5, 1)
def prediksi(self, x):
return (self.theta1 * x) + self.theta0
def Error(self):
return np.average((self.prediksi(self.x) - self.y) ** 2) / 2
def delta_J_delta_theta0(self):
return np.average((self.prediksi(self.x) - self.y))
def delta_J_delta_theta1(self):
return np.average((self.prediksi(self.x) - self.y) * self.x)
def plot(self):
plt.plot(self.x, reg.prediksi(self.x))
plt.plot(self.x, self.y, 'ro')
plt.show()
def do_gradient_descent(self):
error = 0
while (abs(reg.Error() - error) > 0.0000001):
error = reg.Error()
temp0 = self.theta0 - 0.01 * reg.delta_J_delta_theta0()
temp1 = self.theta1 - 0.01 * reg.delta_J_delta_theta1()
self.theta0 = temp0
self.theta1 = temp1
data_x = np.array([0., 3., 5., 6., 9.])
data_y = np.array([72., 95., 112., 77., 54.])
reg = model(data_x,data_y)
reg.do_gradient_descent()
reg.plot()
print(reg.theta1,reg.theta0)
| 28.152174
| 68
| 0.59305
|
d359e5670f8d135c72c5a7bf15964619d926d86b
| 209
|
py
|
Python
|
radiomics/__init__.py
|
dchansen/radiomics
|
ea34882df0127c0921f6f412fbb708d1c49b4f29
|
[
"Apache-2.0"
] | 11
|
2017-06-23T14:23:43.000Z
|
2020-09-04T01:36:58.000Z
|
radiomics/__init__.py
|
dchansen/radiomics
|
ea34882df0127c0921f6f412fbb708d1c49b4f29
|
[
"Apache-2.0"
] | null | null | null |
radiomics/__init__.py
|
dchansen/radiomics
|
ea34882df0127c0921f6f412fbb708d1c49b4f29
|
[
"Apache-2.0"
] | 11
|
2017-11-30T06:40:38.000Z
|
2020-12-31T12:44:52.000Z
|
from radiomics.features import *
__all__ = ['group1_features', 'tumour_features', 'wavelet_features', 'calculate_all_features',
'gray_level_cooccurrence_features', 'gray_level_runlength_features']
| 41.8
| 94
| 0.779904
|
d738c678fecd05f2450ff9c9d059e834960f0041
| 2,498
|
py
|
Python
|
doc/conf.py
|
carstenblank/qiskit-aws-braket-provider
|
539f0c75c2ccf1f6e5e981b92ea74f497fcba237
|
[
"Apache-2.0"
] | 7
|
2020-09-25T17:16:54.000Z
|
2021-05-20T10:42:52.000Z
|
doc/conf.py
|
carstenblank/qiskit-aws-braket-provider
|
539f0c75c2ccf1f6e5e981b92ea74f497fcba237
|
[
"Apache-2.0"
] | 4
|
2020-09-21T19:33:39.000Z
|
2020-09-22T12:21:11.000Z
|
doc/conf.py
|
carstenblank/qiskit-aws-braket-provider
|
539f0c75c2ccf1f6e5e981b92ea74f497fcba237
|
[
"Apache-2.0"
] | 1
|
2020-09-21T19:32:16.000Z
|
2020-09-21T19:32:16.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../src'))
# -- Project information -----------------------------------------------------
project = 'qiskit-aws-braket-provider'
copyright = '2020, Carsten Blank'
author = 'Carsten Blank'
# The full version, including alpha/beta/rc tags
release = '0.0.2'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinxcontrib.apidoc",
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.coverage"
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
htmlhelp_basename = "{}doc".format(project)
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
source_suffix = ".rst"
master_doc = "index"
napoleon_use_rtype = False
autoclass_content = "both"
autodoc_member_order = "bysource"
default_role = "py:obj"
apidoc_module_dir = "../src/qiskit_aws_braket_provider"
apidoc_output_dir = "_apidoc"
apidoc_separate_modules = True
apidoc_module_first = True
apidoc_extra_args = ["-f", "--implicit-namespaces", "-H", "API Reference"]
| 32.025641
| 79
| 0.681345
|
c6c2a118cec0d4fd745d1c6fa0bd6c57aba8fd8b
| 241
|
py
|
Python
|
examples/models/openvino_imagenet_ensemble/resources/combiner/ImageNetCombiner.py
|
masroorhasan/seldon-core
|
674e00cd4b740ee21ac3de00ab145ebd6ebf8b9e
|
[
"Apache-2.0"
] | null | null | null |
examples/models/openvino_imagenet_ensemble/resources/combiner/ImageNetCombiner.py
|
masroorhasan/seldon-core
|
674e00cd4b740ee21ac3de00ab145ebd6ebf8b9e
|
[
"Apache-2.0"
] | null | null | null |
examples/models/openvino_imagenet_ensemble/resources/combiner/ImageNetCombiner.py
|
masroorhasan/seldon-core
|
674e00cd4b740ee21ac3de00ab145ebd6ebf8b9e
|
[
"Apache-2.0"
] | null | null | null |
import logging
logger = logging.getLogger(__name__)
class ImageNetCombiner(object):
def aggregate(self, Xs, features_names):
print("ImageNet Combiner aggregate called")
logger.info(Xs)
return (Xs[0]+Xs[1])/2.0
| 21.909091
| 51
| 0.680498
|
7c08996f378827459d8a8041c178c1fb5e0846cc
| 896
|
bzl
|
Python
|
xls/build/pybind11.bzl
|
ted-xie/xls
|
ef48ade3403fffc6481ffd779e49aa7082ee268f
|
[
"Apache-2.0"
] | null | null | null |
xls/build/pybind11.bzl
|
ted-xie/xls
|
ef48ade3403fffc6481ffd779e49aa7082ee268f
|
[
"Apache-2.0"
] | null | null | null |
xls/build/pybind11.bzl
|
ted-xie/xls
|
ef48ade3403fffc6481ffd779e49aa7082ee268f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adapter for Google-internal pybind_extension rule vs OSS rule."""
load("//third_party/pybind11_bazel:build_defs.bzl", "pybind_extension")
def xls_pybind_extension(**kwargs):
py_deps = kwargs.pop("py_deps", [])
deps = kwargs.pop("deps", [])
kwargs["deps"] = py_deps + deps
pybind_extension(**kwargs)
| 37.333333
| 74
| 0.737723
|
b6431949684815d73defeb67511e3a9e91fbe497
| 3,850
|
py
|
Python
|
majestic-monolith-django/auth/docs.py
|
kokospapa8/majestic-monolith-django
|
a0879989a651ecef6761ee7fce619ab17738bb35
|
[
"Apache-2.0"
] | 1
|
2022-03-12T09:55:36.000Z
|
2022-03-12T09:55:36.000Z
|
majestic-monolith-django/auth/docs.py
|
kokospapa8/majestic-monolith-django
|
a0879989a651ecef6761ee7fce619ab17738bb35
|
[
"Apache-2.0"
] | 6
|
2022-03-09T10:42:44.000Z
|
2022-03-31T08:27:25.000Z
|
majestic-monolith-django/auth/docs.py
|
kokospapa8/majestic-monolith-django
|
a0879989a651ecef6761ee7fce619ab17738bb35
|
[
"Apache-2.0"
] | null | null | null |
from drf_yasg.utils import swagger_auto_schema
from core.docs import dict_response_schema
from .serializers import (
PhonenumberCheckSerializer,
SignupTokenRequestSerializer
)
doc_auth_check_phonenumber = swagger_auto_schema(
operation_id='auth_check_phonenumber',
operation_description='Called on first screen if phonenumber exists. '
'When you send phonenumber prepend "+82" to full number \n'
'If exists(200) -> go to signin, \n'
'if (404) -> go to signup page',
operation_summary="check phonenumber for signup or signin",
responses={
200: PhonenumberCheckSerializer,
400: 'invalid phonenumber type or This number is permanently banned',
404: 'phonenumber is valid for signup'
}
)
doc_auth_singup = swagger_auto_schema(
operation_id='auth_signup',
operation_description='Call this on signup process, \n'
'Phonenumber 본인인증 is finished \n'
'you must add +82 for each phonenumber ',
operation_summary="signup",
tags=["auth signup"],
responses={
201: dict_response_schema(
{
'user_profile': {'username': 'username'},
'uuid': '<uuid>',
'refresh': '',
'token': ''
}
),
400: 'phonenumber already exists \n'
'phonenumber not valid for signup. verify again \n'
'This number is permanently banned',
}
)
doc_auth_singin = swagger_auto_schema(
operation_id='auth_signin',
operation_description='Description: \n'
' - this api will only be used staff members\n\n'
'Params: \n\n'
'Permission: '
' - AllowAny\n\n'
'Link: ',
operation_summary="signin with password",
tags=["auth signin"],
responses={
200: "signin successful",
400: 'invalid credential \n'
}
)
doc_auth_singin_token_request = swagger_auto_schema(
operation_id='auth_signin_token_request',
operation_description='Call this on singin process, \n'
'if requested 5 times in 24 hours, request is blocked \n'
'01012341XXX bypass token creation in non-pord environment ',
operation_summary="signin token request",
tags=["auth signin"],
responses={
200: dict_response_schema({"timestamp_expires": "unix timestamp"}),
400: "invalid phonenumber\n"
"banned phonenumber",
429: 'This number is blocked for 24 hours \n'
'phonenumber does not exists \n'
'This number is permanently banned',
}
)
doc_auth_singin_token_confirm = swagger_auto_schema(
operation_id='auth_signin_token_request',
operation_description='Call this on singin process, \n'
'extra descritions....',
operation_summary="signin token confirm",
tags=["auth signin"],
responses={
200: dict_response_schema(
{
'refresh': '',
'token': ''
}
),
400: 'Invalid Token or token is not sent',
401: 'Token verification successful, but fail authentication'
}
)
doc_auth_unregister = swagger_auto_schema(
operation_id='auth_unregister',
operation_description='delete user data, \n'
'blacklist token, remove JWT from local storage',
operation_summary="unregister",
responses={
204: "unregistered"
}
)
doc_auth_signout = swagger_auto_schema(
operation_id='auth_signout',
operation_description='signout, \n'
'blacklist token, \n'
'remove JWT from local storage',
operation_summary="signout",
responses={
204: "sign out completed"
}
)
| 32.905983
| 87
| 0.605455
|
cf06ababc8ebc620cc2ee570a4f9fc54b26c892b
| 1,023
|
py
|
Python
|
terminfo/v/vt52.py
|
darkelf44/PyUtils
|
4072ff31d2b89dd604ef0e09eb90b7f3b5cf23df
|
[
"MIT"
] | null | null | null |
terminfo/v/vt52.py
|
darkelf44/PyUtils
|
4072ff31d2b89dd604ef0e09eb90b7f3b5cf23df
|
[
"MIT"
] | null | null | null |
terminfo/v/vt52.py
|
darkelf44/PyUtils
|
4072ff31d2b89dd604ef0e09eb90b7f3b5cf23df
|
[
"MIT"
] | null | null | null |
# Imports
from terminfo import TerminalModule
# Terminal: dec vt52
TerminalModule(['vt52'], use=[], backspaces_with_bs=True, init_tabs=8, lines=24, acs_chars=b'+h.k0affggolpnqprrss', cursor_home=b'\x1bH', key_backspace=b'\x08', newline=b'\r\n', scroll_reverse=b'\x1bI', exit_alt_charset_mode=b'\x1bG', keypad_local=b'\x1b>', enter_alt_charset_mode=b'\x1bF', keypad_xmit=b'\x1b=', user8=b'\x1b/[KL]', cursor_left=b'\x1bD', cursor_down=b'\x1bB', cursor_address=b'\x1bY%p1%{32}%+%c%p2%{32}%+%c', key_left=b'\x1bD', key_down=b'\x1bB', key_right=b'\x1bC', key_up=b'\x1bA', columns=80, bell=b'\x07', clear_screen=b'\x1bH\x1bJ', carriage_return=b'\r', cursor_right=b'\x1bC', cursor_up=b'\x1bA', clr_eos=b'\x1bJ', clr_eol=b'\x1bK', tab=b'\t', scroll_forward=b'\n', user9=b'\x1bZ', key_a1=b'\x1b?q', key_a3=b'\x1b?s', key_b2=b'\x1b?r', key_c1=b'\x1b?p', key_c3=b'\x1b?n', key_f0=b'\x1b?y', key_f1=b'\x1bP', key_f2=b'\x1bQ', key_f3=b'\x1bR', key_f5=b'\x1b?t', key_f6=b'\x1b?u', key_f7=b'\x1b?v', key_f8=b'\x1b?w', key_f9=b'\x1b?x')
| 170.5
| 954
| 0.690127
|
5ed61e55314a04e20ea0d7cc33bcdcb98684b368
| 4,100
|
py
|
Python
|
lib/report_base/assets_base/chamber.py
|
JinIgarashi/postgis2inventoryreport
|
5f1d595b889779137bfbef72c5d8814031a2d33c
|
[
"MIT"
] | null | null | null |
lib/report_base/assets_base/chamber.py
|
JinIgarashi/postgis2inventoryreport
|
5f1d595b889779137bfbef72c5d8814031a2d33c
|
[
"MIT"
] | 3
|
2020-04-15T13:01:33.000Z
|
2021-12-13T20:31:08.000Z
|
lib/report_base/assets_base/chamber.py
|
WASAC/postgis2inventoryreport
|
5f1d595b889779137bfbef72c5d8814031a2d33c
|
[
"MIT"
] | null | null | null |
from lib.report_base.assets_base.assets_base import AssetsBase
class Chambers(AssetsBase):
class Chamber(object):
def __init__(self, params):
self.id = params[0]
self.x = params[1]
self.y = params[2]
self.z = params[3]
self.chamber_size = params[4]
self.material = params[5]
self.construction_year = params[6]
self.status = params[7]
self.observation = params[8]
self.for_breakpressure = params[9]
self.has_clorination = params[10]
self.sector = params[11]
self.cell = params[12]
self.village = params[13]
def __init__(self, wss_id):
super().__init__(wss_id, "Chambers")
self.chamber_type = ''
def get_assets_info(self, db):
query = " SELECT "
query += " a.chamber_id, "
query += " round(cast(st_x(a.geom) as numeric),6) as x, "
query += " round(cast(st_y(a.geom) as numeric),6) as y, "
query += " cast(ST_Value(e.rast, 1, a.geom) as integer) as z, "
query += " a.chamber_size, "
query += " a.material, "
query += " COALESCE(a.rehabilitation_year, a.construction_year) as construction_year, "
query += " b.status, "
query += " a.observation, "
query += " CASE WHEN a.is_breakpressure = true THEN 'YES' ELSE 'NO' END as for_breakpressure, "
query += " CASE WHEN a.chlorination_unit = true THEN 'YES' ELSE 'NO' END as has_clorination, "
query += " h.sector, "
query += " g.cell, "
query += " f.village "
query += " FROM chamber a "
query += " INNER JOIN status b "
query += " ON a.status = b.code "
query += " INNER JOIN rwanda_dem_10m e "
query += " ON ST_Intersects(e.rast, a.geom) "
query += " INNER JOIN village f ON ST_Intersects(f.geom, a.geom) "
query += " INNER JOIN cell g ON f.cell_id = g.cell_id "
query += " INNER JOIN sector h ON f.sect_id = h.sect_id "
query += " WHERE a.chamber_type = '{0}' ".format(self.chamber_type)
query += " AND a.wss_id = {0}".format(self.wss_id)
result = db.execute(query)
self.assetsList = []
for data in result:
self.assetsList.append(Chambers.Chamber(data))
return self.assetsList
def add_title(self, doc):
doc.add_heading('List of {0}'.format(self.chamber_type), level=4)
def create_column_list(self):
return [#AssetsBase.Column('ID', 'id', ''),
AssetsBase.Column('X', 'x', ''),
AssetsBase.Column('Y', 'y', ''),
AssetsBase.Column('Z', 'z', ''),
AssetsBase.Column('Sector', 'sector', ''),
AssetsBase.Column('Cell', 'cell', ''),
AssetsBase.Column('Village', 'village', ''),
AssetsBase.Column('Construction', 'construction_year', ''),
AssetsBase.Column('Status', 'status', ''),
AssetsBase.Column('Size', 'chamber_size', ''),
AssetsBase.Column('Material', 'material', ''),
AssetsBase.Column('Break pressure', 'for_breakpressure', 'NO'),
AssetsBase.Column('Chlorination Unit', 'has_clorination', 'NO'),
AssetsBase.Column('Observation', 'observation', '')]
def create(self, db, doc):
chamber_type_list = ["Valve chamber", "Air release chamber", "Washout chamber",
"Break Pressure chamber", "PRV chamber",
"Starting chamber", "Collection chamber"]
for chamber_type in chamber_type_list:
self.chamber_type = chamber_type
if chamber_type_list.index(chamber_type) == 0:
self.add_main_title(doc)
self.get_assets_info(db)
if len(self.assetsList) > 0:
self.add_title(doc)
self.add_table(doc)
self.add_break(doc)
| 45.555556
| 108
| 0.540244
|
ae676549583491345a2bcbe4f67e98b71cc55663
| 1,741
|
py
|
Python
|
cctbx/omz/bfgs.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
cctbx/omz/bfgs.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
cctbx/omz/bfgs.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
def h0_scaling(sk, yk):
"Nocedal & Wright (1999) Eq. 8.20; same as Eq. 9.6"
yty = yk.dot(yk)
assert yty > 0
return yk.dot(sk) / yty
def h_update(hk, sk, yk):
"""Nocedal & Wright (1999) Eq. 9.2
Pre-condition: hk must be positive definite"""
yts = yk.dot(sk)
assert yts > 0
rho = 1 / yts
v = -rho * yk.matrix_outer_product(sk)
v.matrix_diagonal_add_in_place(1)
hl = v.matrix_transpose_multiply(hk).matrix_multiply(v)
hl += rho * sk.matrix_outer_product(sk)
return hl
def b_update(bk, sk, yk):
"""Nocedal & Wright (1999) Eq. 8.19
Pre-condition: bk must be positive definite"""
yts = yk.dot(sk)
assert yts > 0
bsstb = bk.matrix_multiply(sk.matrix_outer_product(sk)).matrix_multiply(bk)
stbs = sk.dot(bk.matrix_multiply(sk))
assert stbs > 0
yyt = yk.matrix_outer_product(yk)
bl = bk - bsstb / stbs + yyt / yts
return bl
class memory_element(object):
__slots__ = ["s", "y", "rho"]
def __init__(O, s, y):
O.s = s
O.y = y
yts = y.dot(s)
if (yts > 0):
O.rho = 1 / yts
else:
O.rho = None
def get(O):
return O.s, O.y, O.rho
def hg_two_loop_recursion(memory, hk0, gk):
"""Nocedal & Wright (1999) Algorithm 9.1
Pre-condition: hk0 must be positive definite"""
q = gk.deep_copy()
m = len(memory)
alpha = [None] * m
for i in xrange(m-1,-1,-1):
s, y, rho = memory[i].get()
alpha[i] = a = rho * s.dot(q)
q = q - a * y
if (hk0.is_square_matrix()):
r = hk0.matrix_multiply(q)
elif (hk0.is_trivial_1d()):
r = hk0 * q
else:
raise ValueError("Improper hk0")
for i in xrange(m):
s, y, rho = memory[i].get()
beta = rho * y.dot(r)
r = r + s * (alpha[i] - beta)
return r
| 24.871429
| 77
| 0.607697
|
da167431956a9f0b2d474f736b78875af8f2dfdd
| 27,102
|
py
|
Python
|
src/torch_core/vibo.py
|
gpoesia/variational-item-response-theory-public
|
6a0db81068695422dddec8832ce353879c5acb82
|
[
"MIT"
] | null | null | null |
src/torch_core/vibo.py
|
gpoesia/variational-item-response-theory-public
|
6a0db81068695422dddec8832ce353879c5acb82
|
[
"MIT"
] | null | null | null |
src/torch_core/vibo.py
|
gpoesia/variational-item-response-theory-public
|
6a0db81068695422dddec8832ce353879c5acb82
|
[
"MIT"
] | 1
|
2022-01-06T01:16:10.000Z
|
2022-01-06T01:16:10.000Z
|
import os
import time
import math
import numpy as np
from tqdm import tqdm
import sys
import csv
import json
import torch
from torch import optim
import torch.distributions as dist
import torch.nn.functional as F
from src.torch_core.models import (
VIBO_1PL,
VIBO_2PL,
VIBO_3PL,
)
from src.datasets import load_dataset, artificially_mask_dataset
from src.utils import AverageMeter, save_checkpoint
from src.config import OUT_DIR, IS_REAL_WORLD
from roar.pretraining import CharBERT, CharBERTClassifier
import environment
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--irt-model', type=str, default='1pl',
choices=['1pl', '2pl', '3pl'],
help='1pl|2pl|3pl (default: 1pl)')
parser.add_argument('--dataset', type=str, default='1pl_simulation',
choices=[
'1pl_simulation',
'2pl_simulation',
'3pl_simulation',
'critlangacq',
'duolingo',
'wordbank',
'pisa2015_science',
'json',
'roar',
],
help='which dataset to run on (default: 1pl_simulation)')
parser.add_argument('--ability-dim', type=int, default=1,
help='number of ability dimensions (default: 1)')
parser.add_argument('--ability-merge', type=str, default='product',
choices=['mean', 'product', 'transformer'],
help='mean|product|transformer (default: product)')
parser.add_argument('--conditional-posterior', action='store_true', default=False,
help='q(ability|item,response) vs q(ability|response)')
parser.add_argument('--generative-model', type=str, default='irt',
choices=['irt', 'link', 'deep', 'residual'],
help='irt|link|deep|residual (default: irt)')
parser.add_argument('--response-dist', type=str, default='bernoulli',
choices=['gaussian', 'bernoulli'],
help='gaussian|bernoulli (default: bernoulli)')
parser.add_argument('--drop-missing', action='store_true', default=False)
parser.add_argument('--artificial-missing-perc', type=float, default=0.,
help='how much to blank out so we can measure acc (default: 0)')
parser.add_argument('--mask-items', action='store_true', default=False,
help='mask items in the train/test split (default: masks students)')
parser.add_argument('--n-norm-flows', type=int, default=0,
help='Number of normalizing flows (default: 0)')
parser.add_argument('--no-infer-dict', action='store_true', default=False,
help='if true, skip infer dict collection (default: False)')
parser.add_argument('--no-marginal', action='store_true', default=False,
help='if true, skip marginal loglike computation (default: False)')
parser.add_argument('--no-test', action='store_true', default=False,
help='if true, skip test (default: False)')
parser.add_argument('--no-predictive', action='store_true', default=False,
help='if true, skip posterior predictive computation (default: False)')
parser.add_argument('--num-person', type=int, default=1000,
help='number of people in data (default: 1000)')
parser.add_argument('--num-item', type=int, default=100,
help='number of people in data (default: 100)')
parser.add_argument('--num-posterior-samples', type=int, default=400,
help='number of samples to use for analysis (default: 400)')
parser.add_argument('--hidden-dim', type=int, default=64,
help='number of hidden dims (default: 64)')
parser.add_argument('--max-num-person', help='limit the number of persons in dataset')
parser.add_argument('--max-num-item', help='limit the number of items in dataset')
parser.add_argument('--out-dir', type=str, default=OUT_DIR,
help='where to save chkpts (default: OUT_DIR)')
parser.add_argument('--out-results', type=str, default=None,
help='where to save results as JSON (default: print to stdout)')
parser.add_argument('--lr', type=float, default=5e-3,
help='default learning rate: 5e-3')
parser.add_argument('--batch-size', type=int, default=16, metavar='N',
help='input batch size for training (default: 16)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 100)')
parser.add_argument('--max-iters', type=int, default=-1, metavar='N',
help='number of maximum iterations (default: -1)')
parser.add_argument('--num-workers', type=int, default=0,
help='number of workers for data loading (default: 0)')
parser.add_argument('--anneal-kl', action='store_true', default=False,
help='anneal KL divergence (default: False)')
parser.add_argument('--beta-kl', type=float, default=1.0,
help='constant multiplier on KL (default: 1.0)')
parser.add_argument('--predict', type=str,
help='File with words/nonwords to predict parameters.')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--gpu-device', type=int, default=0,
help='which CUDA device to use (default: 0)')
parser.add_argument('--embed-conpole', type=str, default=False,
help='Use the given pre-trained ConPoLe model to embed problems.')
parser.add_argument('--embed-bert', type=str, default=False,
help='Use the given pre-trained BERT model to embed problems.')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training (default: False)')
args = parser.parse_args()
if args.n_norm_flows > 0:
args.no_infer_dict = True
args.no_predictive = True
if args.artificial_missing_perc > 0:
args.no_predictive = False
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if IS_REAL_WORLD[args.dataset]:
# these params are only for IRT simulation datasets
args.num_person = None
args.num_item = None
if args.max_num_person is not None:
args.max_num_person = int(args.max_num_person)
if args.max_num_item is not None:
args.max_num_item = int(args.max_num_item)
else:
args.max_num_person = None
args.max_num_item = None
out_file = 'VIBO_{}_{}_{}_{}_{}person_{}item_{}maxperson_{}maxitem_{}maskperc_{}ability_{}_{}_{}seed{}'.format(
args.irt_model,
args.dataset,
args.response_dist,
args.generative_model,
args.num_person,
args.num_item,
args.max_num_person,
args.max_num_item,
args.artificial_missing_perc,
args.ability_dim,
args.ability_merge,
('conpole_'
if args.embed_conpole
else ('bert'
if args.embed_bert
else '')),
'conditional_q' if args.conditional_posterior else 'unconditional_q',
args.seed,
)
args.out_dir = os.path.join(args.out_dir, out_file)
if not os.path.isdir(args.out_dir):
os.makedirs(args.out_dir)
device = torch.device("cuda" if args.cuda else "cpu")
if args.cuda: torch.cuda.set_device(args.gpu_device)
if args.response_dist == 'bernoulli':
dataset_name = args.dataset
else:
dataset_name = f'{args.dataset}_continuous'
train_dataset = load_dataset(
dataset_name,
train = True,
num_person = args.num_person,
num_item = args.num_item,
ability_dim = args.ability_dim,
max_num_person = args.max_num_person,
max_num_item = args.max_num_item,
)
test_dataset = load_dataset(
dataset_name,
train = False,
num_person = args.num_person,
num_item = args.num_item,
ability_dim = args.ability_dim,
max_num_person = args.max_num_person,
max_num_item = args.max_num_item,
)
if args.artificial_missing_perc > 0:
train_dataset = artificially_mask_dataset(
train_dataset,
args.artificial_missing_perc,
args.mask_items,
)
num_person = train_dataset.num_person
num_item = train_dataset.num_item
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size = args.batch_size,
shuffle = True,
num_workers = args.num_workers,
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size = args.batch_size,
shuffle = False,
num_workers = args.num_workers,
)
N_mini_batches = len(train_loader)
if args.max_iters != -1:
args.epochs = int(math.ceil(args.max_iters / float(len(train_loader))))
print(f'Found MAX_ITERS={args.max_iters}, setting EPOCHS={args.epochs}')
if args.irt_model == '1pl':
model_class = VIBO_1PL
elif args.irt_model == '2pl':
model_class = VIBO_2PL
elif args.irt_model == '3pl':
model_class = VIBO_3PL
else:
raise Exception(f'model {args.irt_model} not recognized')
if args.embed_conpole:
embedding_model = torch.load(args.embed_conpole, map_location=device)
embedding_model.to(device)
elif args.embed_bert:
embedding_model = torch.load(args.embed_bert, map_location=device)
embedding_model.to(device)
else:
embedding_model = None
model = model_class(
args.ability_dim,
num_item,
hidden_dim = args.hidden_dim,
ability_merge = args.ability_merge,
conditional_posterior = args.conditional_posterior,
generative_model = args.generative_model,
response_dist = args.response_dist,
replace_missing_with_prior = not args.drop_missing,
n_norm_flows = args.n_norm_flows,
embedding_model = embedding_model,
embed_conpole=args.embed_conpole,
embed_bert=args.embed_bert,
problems=train_dataset.problems,
).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
def get_annealing_factor(epoch, which_mini_batch):
if args.anneal_kl:
annealing_factor = \
(float(which_mini_batch + epoch * N_mini_batches + 1) /
float(args.epochs // 2 * N_mini_batches))
else:
annealing_factor = args.beta_kl
return annealing_factor
def train(epoch):
model.train()
train_loss = AverageMeter()
pbar = tqdm(total=len(train_loader))
for batch_idx, (index, response, problem_ids, mask) in enumerate(train_loader):
mb = response.size(0)
response = response.to(device)
mask = mask.long().to(device)
annealing_factor = get_annealing_factor(epoch, batch_idx)
optimizer.zero_grad()
if args.n_norm_flows > 0:
(
response, mask, response_mu,
ability_k, ability,
ability_mu, ability_logvar, ability_logabsdetjac,
item_feat_k, item_feat,
item_feat_mu, item_feat_logvar, item_feat_logabsdetjac,
) = model(response, mask)
loss = model.elbo(
response, mask, response_mu,
ability, ability_mu, ability_logvar,
item_feat, item_feat_mu, item_feat_logvar,
annealing_factor = annealing_factor,
use_kl_divergence = False,
ability_k = ability_k,
item_feat_k = item_feat_k,
ability_logabsdetjac = ability_logabsdetjac,
item_logabsdetjac = item_feat_logabsdetjac,
)
else:
outputs = model(response, mask)
loss = model.elbo(*outputs, annealing_factor=annealing_factor,
use_kl_divergence=True)
loss.backward()
optimizer.step()
train_loss.update(loss.item(), mb)
pbar.update()
pbar.set_postfix({'Loss': train_loss.avg})
pbar.close()
print('====> Train Epoch: {} Loss: {:.4f}'.format(epoch, train_loss.avg))
return train_loss.avg
def test(epoch):
model.eval()
test_loss = AverageMeter()
pbar = tqdm(total=len(test_loader))
with torch.no_grad():
for _, response, _, mask in test_loader:
mb = response.size(0)
response = response.to(device)
mask = mask.long().to(device)
if args.n_norm_flows > 0:
(
response, mask, response_mu,
ability_k, ability,
ability_mu, ability_logvar, ability_logabsdetjac,
item_feat_k, item_feat,
item_feat_mu, item_feat_logvar, item_feat_logabsdetjac,
) = model(response, mask)
loss = model.elbo(
response, mask, response_mu,
ability, ability_mu, ability_logvar,
item_feat, item_feat_mu, item_feat_logvar,
use_kl_divergence = False,
ability_k = ability_k,
item_feat_k = item_feat_k,
ability_logabsdetjac = ability_logabsdetjac,
item_logabsdetjac = item_feat_logabsdetjac,
)
else:
outputs = model(response, mask)
loss = model.elbo(*outputs)
test_loss.update(loss.item(), mb)
pbar.update()
pbar.set_postfix({'Loss': test_loss.avg})
pbar.close()
print('====> Test Epoch: {} Loss: {:.4f}'.format(epoch, test_loss.avg))
return test_loss.avg
def get_log_marginal_density(loader):
model.eval()
meter = AverageMeter()
pbar = tqdm(total=len(loader))
with torch.no_grad():
for _, response, _, mask in loader:
mb = response.size(0)
response = response.to(device)
mask = mask.long().to(device)
marginal = model.log_marginal(
response,
mask,
num_samples = args.num_posterior_samples,
)
marginal = torch.mean(marginal)
meter.update(marginal.item(), mb)
pbar.update()
pbar.set_postfix({'Marginal': meter.avg})
pbar.close()
print('====> Marginal: {:.4f}'.format(meter.avg))
return meter.avg
def sample_posterior_predictive(loader):
model.eval()
meter = AverageMeter()
pbar = tqdm(total=len(loader))
with torch.no_grad():
response_sample_set = []
for _, response, _, mask in loader:
mb = response.size(0)
response = response.to(device)
mask = mask.long().to(device)
_, ability_mu, ability_logvar, _, item_feat_mu, item_feat_logvar = \
model.encode(response, mask)
ability_scale = torch.exp(0.5 * ability_logvar)
item_feat_scale = torch.exp(0.5 * item_feat_logvar)
ability_posterior = dist.Normal(ability_mu, ability_scale)
item_feat_posterior = dist.Normal(item_feat_mu, item_feat_scale)
ability_samples = ability_posterior.sample([args.num_posterior_samples])
item_feat_samples = item_feat_posterior.sample([args.num_posterior_samples])
response_samples = []
for i in range(args.num_posterior_samples):
ability_i = ability_samples[i]
item_feat_i = item_feat_samples[i]
response_i = model.decode(ability_i, item_feat_i).cpu()
response_samples.append(response_i)
response_samples = torch.stack(response_samples)
response_sample_set.append(response_samples)
pbar.update()
response_sample_set = torch.cat(response_sample_set, dim=1)
pbar.close()
return {'response': response_sample_set}
def sample_posterior_mean(loader):
model.eval()
meter = AverageMeter()
pbar = tqdm(total=len(loader))
with torch.no_grad():
response_sample_set = []
for _, response, _, mask in loader:
mb = response.size(0)
response = response.to(device)
mask = mask.long().to(device)
_, ability_mu, _, _, item_feat_mu, _ = \
model.encode(response, mask)
response_sample = model.decode(ability_mu, item_feat_mu).cpu()
response_sample_set.append(response_sample.unsqueeze(0))
pbar.update()
response_sample_set = torch.cat(response_sample_set, dim=1)
pbar.close()
return {'response': response_sample_set}
def get_infer_dict(loader):
model.eval()
infer_dict = {}
with torch.no_grad():
ability_mus, item_feat_mus = [], []
ability_logvars, item_feat_logvars = [], []
pbar = tqdm(total=len(loader))
for _, response, _, mask in loader:
mb = response.size(0)
response = response.to(device)
mask = mask.long().to(device)
_, ability_mu, ability_logvar, _, item_feat_mu, item_feat_logvar = \
model.encode(response, mask)
ability_mus.append(ability_mu.cpu())
ability_logvars.append(ability_logvar.cpu())
item_feat_mus.append(item_feat_mu.cpu())
item_feat_logvars.append(item_feat_logvar.cpu())
pbar.update()
ability_mus = torch.cat(ability_mus, dim=0)
ability_logvars = torch.cat(ability_logvars, dim=0)
pbar.close()
infer_dict['ability_mu'] = ability_mus
infer_dict['ability_logvar'] = ability_logvars
infer_dict['item_feat_mu'] = item_feat_mu
infer_dict['item_feat_logvar'] = item_feat_logvar
return infer_dict
is_best, best_loss = False, np.inf
train_losses = np.zeros(args.epochs)
if not args.no_test:
test_losses = np.zeros(args.epochs)
train_times = np.zeros(args.epochs)
for epoch in range(args.epochs):
start_time = time.time()
train_loss = train(epoch)
end_time = time.time()
train_losses[epoch] = train_loss
train_times[epoch] = start_time - end_time
if not args.no_test:
test_loss = test(epoch)
test_losses[epoch] = test_loss
is_best = test_loss < best_loss
best_loss = min(test_loss, best_loss)
else:
is_best = train_loss < best_loss
best_loss = min(train_loss, best_loss)
save_checkpoint({
'model_state_dict': model.state_dict(),
'epoch': epoch,
'args': args,
}, is_best, folder=args.out_dir)
np.save(os.path.join(args.out_dir, 'train_losses.npy'), train_losses)
np.save(os.path.join(args.out_dir, 'train_times.npy'), train_times)
if not args.no_test:
np.save(os.path.join(args.out_dir, 'test_losses.npy'), test_losses)
for checkpoint_name in ['checkpoint.pth.tar', 'model_best.pth.tar']:
checkpoint = torch.load(os.path.join(args.out_dir, checkpoint_name))
model.load_state_dict(checkpoint['model_state_dict'])
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size = args.batch_size,
shuffle = False,
)
if not args.no_infer_dict:
infer_dict = get_infer_dict(train_loader)
checkpoint['infer_dict'] = infer_dict
if not args.no_predictive:
posterior_predict_samples = sample_posterior_predictive(train_loader)
checkpoint['posterior_predict_samples'] = posterior_predict_samples
if args.artificial_missing_perc > 0:
missing_indices = train_dataset.missing_indices
missing_labels = train_dataset.missing_labels
if np.ndim(missing_labels) == 1:
missing_labels = missing_labels[:, np.newaxis]
inferred_response = posterior_predict_samples['response'].mean(0)
inferred_response = torch.round(inferred_response)
correct, count = 0, 0
for missing_index, missing_label in zip(missing_indices, missing_labels):
inferred_label = inferred_response[missing_index[0], missing_index[1]]
if inferred_label.item() == missing_label[0]:
correct += 1
count += 1
missing_imputation_accuracy = correct / float(count)
checkpoint['missing_imputation_accuracy'] = missing_imputation_accuracy
model_name = "Amortized VIBO" if args.embed_bert or args.embed_conpole else "VIBO"
results = {
"seed": args.seed,
"model": model_name,
"missing_perc": args.artificial_missing_perc,
"accuracy": missing_imputation_accuracy,
}
# Compute empirical difficulty of each item.
empirical_difficulty = {}
for i, item in enumerate(train_dataset.problems):
responses = train_dataset.unmasked_responses[:, i]
# Filter missing responses.
responses = responses[responses != -1]
empirical_difficulty[item] = np.mean(responses)
results["empirical_difficulty"] = empirical_difficulty
# Compute IRT parameters for all items in the dataset.
with torch.no_grad():
mu, logvar = model.item_encoder.predict_new_items(train_dataset.problems)
mu = mu.cpu().numpy()
var = np.exp(logvar.cpu().numpy())
irt_params = {}
for i, item in enumerate(train_dataset.problems):
irt_params[item] = {'mu': mu, 'var': var}
results[irt_params] = irt_params
# Compute frequencies of items in train/test set.
observations = {}
for i, item in enumerate(train_dataset.problems):
responses = train_dataset.unmasked_responses[:, i]
item_mask = train_dataset.mask[:, i]
train_obs = (responses[item_mask] != -1).sum()
test_obs = (responses[1 - item_mask] != -1).sum()
observations[item] = {'train': train_obs, 'test': test_obs}
results[observations] = observations
if args.out_results is None:
print(json.dumps(results))
else:
with open(args.out_results) as f:
json.dump(results, f)
if args.predict:
with open(args.predict) as input_items:
rows = list(input_items.readlines())
rows = [r.strip() for r in rows]
with torch.no_grad():
mu, logvar = model.item_encoder.predict_new_items(rows)
mu = mu.cpu().numpy()
var = np.exp(logvar.cpu().numpy())
with open(args.predict + '.out', 'w') as out:
writer = csv.DictWriter(
out,
['item']
+ [f'mu_{i}' for i in range(mu.shape[1])]
+ [f'var_{i}' for i in range(var.shape[1])])
writer.writeheader()
for j, item in enumerate(rows):
d = {'item': item}
for i in range(mu.shape[1]):
d[f'mu_{i}'] = mu[j, i]
d[f'var_{i}'] = var[j, i]
writer.writerow(d)
sys.exit(0)
posterior_mean_samples = sample_posterior_mean(train_loader)
if args.artificial_missing_perc > 0:
missing_indices = train_dataset.missing_indices
missing_labels = train_dataset.missing_labels
if np.ndim(missing_labels) == 1:
missing_labels = missing_labels[:, np.newaxis]
inferred_response = posterior_mean_samples['response'].squeeze(0)
inferred_response = torch.round(inferred_response)
correct, count = 0, 0
for missing_index, missing_label in zip(missing_indices, missing_labels):
inferred_label = inferred_response[missing_index[0], missing_index[1]]
if inferred_label.item() == missing_label[0]:
correct += 1
count += 1
missing_imputation_accuracy = correct / float(count)
checkpoint['missing_imputation_accuracy_mean'] = missing_imputation_accuracy
print(f'Missing Imputation Accuracy from mean: {missing_imputation_accuracy}')
if not args.no_marginal:
train_logp = get_log_marginal_density(train_loader)
checkpoint['train_logp'] = train_logp
if not args.no_test:
test_logp = get_log_marginal_density(test_loader)
checkpoint['test_logp'] = test_logp
torch.save(checkpoint, os.path.join(args.out_dir, checkpoint_name))
print(f'Train time: {np.abs(train_times[:100]).sum()}')
| 40.511211
| 115
| 0.564017
|
ab0990d1278d2dae7dbf0cd9583de5a29291acba
| 2,335
|
py
|
Python
|
users/utils.py
|
ashbc/tgrsite
|
180e5eb9c72a7a331276fe3de150ea2eea2db51e
|
[
"ISC"
] | 1
|
2019-06-29T15:25:05.000Z
|
2019-06-29T15:25:05.000Z
|
users/utils.py
|
ashbc/tgrsite
|
180e5eb9c72a7a331276fe3de150ea2eea2db51e
|
[
"ISC"
] | 70
|
2017-06-21T13:13:57.000Z
|
2019-03-20T22:14:56.000Z
|
users/utils.py
|
ashbc/tgrsite
|
180e5eb9c72a7a331276fe3de150ea2eea2db51e
|
[
"ISC"
] | null | null | null |
import requests
from django.http import HttpRequest
from django.template import loader
from .models import Membership, Member
from .achievements import give_achievement, give_achievement_once
from premailer import Premailer
try:
from xml.etree.cElementTree import ElementTree, fromstring
except ImportError:
from xml.etree.ElementTree import ElementTree, fromstring
from notifications.tasks import send_mass_html_mail
from tgrsite import settings
if settings.DEBUG:
url = "http://"
else:
url = "https://"
url += settings.PRIMARY_HOST
transformer = Premailer(base_url=url, base_path=url,
disable_leftover_css=True, disable_validation=True, remove_unset_properties=True,
include_star_selectors=True, keep_style_tags=False, align_floating_images=False)
def sendRequestMailings(user, token, email):
request = HttpRequest()
request.META['HTTP_HOST'] = settings.PRIMARY_HOST
subject = "Membership Verification | Warwick Tabletop Games and Role-Playing Society"
text = loader.render_to_string("users/membership/plain-email.txt", {"user": user, "host": url, "token": token},
request)
html = loader.render_to_string("users/membership/email.html", {"user": user, "token": token},
request)
html = transformer.transform(html)
mails = [(subject, text, html, None, [email])]
send_mass_html_mail(mails, fail_silently=False)
def getApiMembers():
members_xml = requests.get(
"https://www.warwicksu.com/membershipapi/listMembers/" + settings.MEMBERSHIP_API_KEY + "/")
members_root = fromstring(members_xml.text.encode('utf-8'))
members = {}
for member in members_root:
id = member.find('UniqueID').text
email = member.find('EmailAddress').text
members[id] = email
return members
def updateMemberships():
members = getApiMembers()
for m in Membership.objects.all():
if m.verified:
if m.uni_id.lstrip('u') in members:
if not m.active:
m.active = True
m.save()
give_achievement(m.member, "verify_membership")
else:
if m.active:
m.active = False
m.save()
| 34.850746
| 115
| 0.652248
|
10f8e186cbafa1579125b7b983e983fa06639308
| 5,834
|
py
|
Python
|
asyncdagpi/http.py
|
mysistersbrother/asyncdagpi
|
17859e67c51fbcc6b1e3eb719d4269db39a7f612
|
[
"MIT"
] | null | null | null |
asyncdagpi/http.py
|
mysistersbrother/asyncdagpi
|
17859e67c51fbcc6b1e3eb719d4269db39a7f612
|
[
"MIT"
] | null | null | null |
asyncdagpi/http.py
|
mysistersbrother/asyncdagpi
|
17859e67c51fbcc6b1e3eb719d4269db39a7f612
|
[
"MIT"
] | null | null | null |
import asyncio
import logging
from typing import Dict
import aiohttp
from . import errors
from .image import Image
log = logging.getLogger(__name__)
error_dict = {
400: errors.ParameterError("Parameters passed were incorrect"),
413: errors.FileTooLarge("The Image Passed is too large"),
500: errors.ApiError("Internal Server Error"),
429: errors.RateLimited("You are being Rate_limited"),
403: errors.Unauthorised("403 Returned")
}
class HTTP:
"""
HTTP Client
-----------
Represents an HTTP client sending HTTP requests to the top.gg API.
.. _aiohttp session:
https://aiohttp.readthedocs.io/en/stable/client_reference.html#client-session
Parameters
----------
:param token: :class:`str`
A dagpi Token from https://dagpi.xyz
:param logging :class:`bool`
Wether or not to log dagpi
**kwargs:
**session : Optional[aiohttp session]
The session used to request to the API
**loop: Optional[asyncio loop]
The asyncio loop to use
"""
__slots__ = ("client", "base_url", "token", "loop",
"user_agent", "logging")
def __init__(self, token: str, logging_enabled: bool, **kwargs):
self.base_url = "https://api.dagpi.xyz"
self.token = token
self.logging = logging_enabled
self.loop = loop = kwargs.get('loop', None) or asyncio.get_event_loop()
self.client = kwargs.get('session') or aiohttp.ClientSession(loop=loop)
self.user_agent = "AsyncDagpi v{__version__} Python/Python/ \
{sys.version_info[0]}.{sys.version_info[1]} aiohttp/{2}"
async def data_request(self, url: str, **kwargs) -> Dict:
"""
url: :class:`str`
url to request
:return: :class:`Dict`
Python Dictionary
"""
if not self.token:
raise errors.Unauthorised("Please Provide a dagpi token")
headers = {
"Authorization": self.token,
'User-Agent': self.user_agent
}
request_url = self.base_url + "/data/" + url
if kwargs.get("image"):
request_url = self.base_url + "/image/"
async with self.client.get(request_url, headers=headers) as resp:
if 300 >= resp.status >= 200:
if resp.headers["Content-Type"] == "application/json":
js = await resp.json()
return js
else:
raise errors.ApiError(f"{resp.status}. \
Request was great but Dagpi did not send a JSON")
else:
try:
error = error_dict[resp.status]
raise error
except KeyError:
raise errors.ApiError("Unknown API Error Occurred")
async def image_request(self, url: str, params: dict) -> Image:
"""
url: :class:`str`
A string containing the URL
params: :class:`Dict`
A dictionary of the URL parameters
:return: :class:`asyncdagpi.Image`
Asyncdagpi Image Object
"""
if not self.token:
raise errors.Unauthorised("Please Provide a dagpi token")
headers = {
"Authorization": self.token,
'User-Agent': self.user_agent
}
request_url = self.base_url + "/image" + url
async with self.client.get(request_url, headers=headers,
params=params) as resp:
if 300 >= resp.status >= 200:
if resp.headers["Content-Type"].lower() in \
["image/png", "image/gif"]:
form = resp.headers["Content-Type"].replace("image/",
"")
resp_time = resp.headers["X-Process-Time"][:5]
raw_byte = await resp.read()
if self.logging:
log.info(
'[Dagpi Image] GET {} has returned {}'.format(
resp.url,
resp.status))
return Image(raw_byte, form, resp_time,
params.get("url"))
else:
raise errors.ApiError(f"{resp.status}. \
Request was great but Dagpi did not send an Image back")
else:
try:
error = error_dict[resp.status]
raise error
except KeyError:
js = await resp.json()
if resp.status == 415:
raise errors.ImageUnaccesible(415, js["message"])
elif resp.status == 400:
raise errors.ParameterError(400, js["message"])
elif resp.status == 422:
try:
mstr = ""
for val in js["detail"]:
base = "{} is {}".format(val["loc"][1],
val["type"])
mstr += (base + "\t")
raise errors.ParameterError(mstr)
except KeyError:
raise errors.ApiError(
"API was unable to manipulate the Image")
else:
raise errors.ApiError("Unknown API Error Occurred")
async def close(self):
await self.client.close()
| 37.63871
| 80
| 0.480288
|
46b8fed961089fbad47cbb8adc035ce78c1ad4b8
| 146
|
py
|
Python
|
summary/apps.py
|
ossteam8/oss8_proj
|
341ba45ed47d633665f9a8337cd8df7227cb16c2
|
[
"MIT"
] | 3
|
2021-06-08T08:38:13.000Z
|
2021-06-08T08:38:58.000Z
|
summary/apps.py
|
ossteam8/K-news_keyword
|
341ba45ed47d633665f9a8337cd8df7227cb16c2
|
[
"MIT"
] | 15
|
2021-06-04T16:33:34.000Z
|
2021-06-06T10:05:17.000Z
|
summary/apps.py
|
ossteam8/oss8_proj
|
341ba45ed47d633665f9a8337cd8df7227cb16c2
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class SummaryConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'summary'
| 20.857143
| 56
| 0.760274
|
d5e1234eb57dbf45453528152123f7479b515139
| 718
|
py
|
Python
|
dynamics_learning/training/configs.py
|
wuphilipp/replay-overshooting
|
184b1537c22ebc2f614677be8fe171de785bda42
|
[
"MIT"
] | null | null | null |
dynamics_learning/training/configs.py
|
wuphilipp/replay-overshooting
|
184b1537c22ebc2f614677be8fe171de785bda42
|
[
"MIT"
] | null | null | null |
dynamics_learning/training/configs.py
|
wuphilipp/replay-overshooting
|
184b1537c22ebc2f614677be8fe171de785bda42
|
[
"MIT"
] | 1
|
2022-03-01T07:51:20.000Z
|
2022-03-01T07:51:20.000Z
|
from dataclasses import dataclass
from typing import Callable, Optional
from dynamics_learning.networks.estimator import EstimatorConfig
LearningRateScheduler = Callable[[int, float], float]
@dataclass(frozen=True)
class ExpConfig:
"""All parameters for training.
This must be serializable.
"""
model: EstimatorConfig
ramp_iters: int
batch_size: int
epochs: int
base_learning_rate: float
gradient_clip_max_norm: Optional[float] = None
const_var: bool = False
log_iterations_simple: int = 50
log_iterations_images: int = 100
git_commit_hash: Optional[str] = None
learning_rate_function: Optional[LearningRateScheduler] = None
name: Optional[str] = None
| 25.642857
| 66
| 0.743733
|
2d3a15e126c060fc06d2b68ca1a73685769cde7b
| 510
|
py
|
Python
|
get_board_dict.py
|
sen-lee/detection_license_plate
|
940772656d014da20006b077a4264b3f7ace80f9
|
[
"Apache-2.0"
] | null | null | null |
get_board_dict.py
|
sen-lee/detection_license_plate
|
940772656d014da20006b077a4264b3f7ace80f9
|
[
"Apache-2.0"
] | null | null | null |
get_board_dict.py
|
sen-lee/detection_license_plate
|
940772656d014da20006b077a4264b3f7ace80f9
|
[
"Apache-2.0"
] | null | null | null |
import json
from detectron2.structures import BoxMode
def get_board_dicts(imgdir):
json_file = imgdir+"/dataset.json" #Fetch the json file
with open(json_file) as f:
dataset_dicts = json.load(f)
for i in dataset_dicts:
filename = i["file_name"]
i["file_name"] = imgdir+"/"+filename
for j in i["annotations"]:
j["bbox_mode"] = BoxMode.XYWH_ABS #Setting the required Box Mode
j["category_id"] = int(j["category_id"])
return dataset_dicts
| 36.428571
| 76
| 0.647059
|
91330e9d19630f924c0ffddbeaf6997c352f38d5
| 27,185
|
py
|
Python
|
docformatter.py
|
JimArnow/docformatter
|
3d7e92b3de82600f30d422c4f57a7b8566e24325
|
[
"MIT"
] | null | null | null |
docformatter.py
|
JimArnow/docformatter
|
3d7e92b3de82600f30d422c4f57a7b8566e24325
|
[
"MIT"
] | null | null | null |
docformatter.py
|
JimArnow/docformatter
|
3d7e92b3de82600f30d422c4f57a7b8566e24325
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (C) 2012-2019 Steven Myint
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Formats docstrings to follow PEP 257."""
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
import collections
import io
import locale
import os
import re
import signal
import sys
import textwrap
import tokenize
import sysconfig
import untokenize
__version__ = '1.3.1'
try:
unicode
except NameError:
unicode = str
HEURISTIC_MIN_LIST_ASPECT_RATIO = .4
CR = '\r'
LF = '\n'
CRLF = '\r\n'
_PYTHON_LIBS = set(sysconfig.get_paths().values())
class FormatResult(object):
"""Possible exit codes."""
ok = 0
error = 1
interrupted = 2
check_failed = 3
def format_code(source, **kwargs):
"""Return source code with docstrings formatted.
Wrap summary lines if summary_wrap_length is greater than 0.
See "_format_code()" for parameters.
"""
try:
original_newline = find_newline(source.splitlines(True))
code = _format_code(source, **kwargs)
return normalize_line_endings(code.splitlines(True), original_newline)
except (tokenize.TokenError, IndentationError):
return source
def _format_code(source,
summary_wrap_length=79,
description_wrap_length=72,
pre_summary_newline=False,
make_summary_multi_line=False,
post_description_blank=False,
force_wrap=False,
line_range=None,
given_when_then=False,):
"""Return source code with docstrings formatted."""
if not source:
return source
if line_range is not None:
assert line_range[0] > 0 and line_range[1] > 0
def in_range(start, end):
"""Return True if start/end is in line_range."""
if line_range is None:
return True
return any(line_range[0] <= line_no <= line_range[1]
for line_no in range(start, end + 1))
modified_tokens = []
sio = io.StringIO(source)
previous_token_string = ''
previous_token_type = None
only_comments_so_far = True
for (token_type,
token_string,
start,
end,
line) in tokenize.generate_tokens(sio.readline):
if (
token_type == tokenize.STRING and
token_string.startswith(('"', "'")) and
(previous_token_type == tokenize.INDENT or
only_comments_so_far) and
in_range(start[0], end[0])
):
if only_comments_so_far:
indentation = ''
else:
indentation = previous_token_string
token_string = format_docstring(
indentation,
token_string,
summary_wrap_length=summary_wrap_length,
description_wrap_length=description_wrap_length,
pre_summary_newline=pre_summary_newline,
make_summary_multi_line=make_summary_multi_line,
post_description_blank=post_description_blank,
force_wrap=force_wrap,
fix_given_when_then=given_when_then,
)
if token_type not in [tokenize.COMMENT, tokenize.NEWLINE, tokenize.NL]:
only_comments_so_far = False
previous_token_string = token_string
previous_token_type = token_type
modified_tokens.append(
(token_type, token_string, start, end, line))
return untokenize.untokenize(modified_tokens)
def format_docstring(indentation, docstring,
summary_wrap_length=0,
description_wrap_length=0,
pre_summary_newline=False,
make_summary_multi_line=False,
post_description_blank=False,
force_wrap=False,
fix_given_when_then=True,
):
"""Return formatted version of docstring.
Wrap summary lines if summary_wrap_length is greater than 0.
Relevant parts of PEP 257:
- For consistency, always use triple double quotes around docstrings.
- Triple quotes are used even though the string fits on one line.
- Multi-line docstrings consist of a summary line just like a one-line
docstring, followed by a blank line, followed by a more elaborate
description.
- Unless the entire docstring fits on a line, place the closing quotes
on a line by themselves.
Replace Given:, When:, Then: with :Given:, :When:, :Then: to format as a reStructuredText
Field List if fix_given_when_then is True.
"""
contents = strip_docstring(docstring)
if fix_given_when_then:
contents = format_given_when_then(contents)
# Skip if there are nested triple double quotes
if contents.count('"""'):
return docstring
# Do not modify things that start with doctests.
if contents.lstrip().startswith('>>>'):
return docstring
summary, description = split_summary_and_description(contents)
# Leave docstrings with underlined summaries alone.
if remove_section_header(description).strip() != description.strip():
return docstring
if not force_wrap and is_some_sort_of_list(summary):
# Something is probably not right with the splitting.
return docstring
if description:
# Compensate for triple quotes by temporarily prepending 3 spaces.
# This temporary prepending is undone below.
if pre_summary_newline:
initial_indent = indentation
else:
initial_indent = 3 * ' ' + indentation
return '''\
"""{pre_summary}{summary}
{description}{post_description}
{indentation}"""\
'''.format(
pre_summary=('\n' + indentation if pre_summary_newline
else ''),
summary=wrap_summary(normalize_summary(summary),
wrap_length=summary_wrap_length,
initial_indent=initial_indent,
subsequent_indent=indentation).lstrip(),
description=wrap_description(description,
indentation=indentation,
wrap_length=description_wrap_length,
force_wrap=force_wrap),
post_description=('\n' if post_description_blank else ''),
indentation=indentation)
else:
if make_summary_multi_line:
beginning = '"""\n' + indentation
ending = '\n' + indentation + '"""'
summary_wrapped = wrap_summary(
normalize_summary(contents),
wrap_length=summary_wrap_length,
initial_indent=indentation,
subsequent_indent=indentation).strip()
return '{beginning}{summary}{ending}'.format(
beginning=beginning,
summary=summary_wrapped,
ending=ending
)
else:
return wrap_summary('"""' + normalize_summary(contents) + '"""',
wrap_length=summary_wrap_length,
initial_indent=indentation,
subsequent_indent=indentation).strip()
def reindent(text, indentation):
"""Return reindented text that matches indentation."""
if '\t' not in indentation:
text = text.expandtabs()
text = textwrap.dedent(text)
return '\n'.join(
[(indentation + line).rstrip()
for line in text.splitlines()]).rstrip() + '\n'
def format_given_when_then(text: str) -> str:
"""If Given:, When: or Then: are in string, replace with :Given:, :When:, :Then:"""
for string in ['Given', 'When', 'Then']:
# Find all occurrences of incorrect version
if re.search(string + ':', text) and not re.search(':' + string + ':', text):
text = text.replace(string, ':' + string)
return text
def is_probably_beginning_of_sentence(line):
"""Return True if this line begins a new sentence."""
# Check heuristically for a parameter list.
for token in ['@', '-', r'\*']:
if re.search(r'\s' + token + r'\s', line):
return True
stripped_line = line.strip()
is_beginning_of_sentence = re.match(r'[^\w"\'`\(\)]', stripped_line)
is_pydoc_ref = re.match(r'^:\w+:', stripped_line)
return is_beginning_of_sentence and not is_pydoc_ref
def split_summary_and_description(contents):
"""Split docstring into summary and description.
Return tuple (summary, description).
"""
split_lines = contents.rstrip().splitlines()
for index in range(1, len(split_lines)):
found = False
if not split_lines[index].strip():
# Empty line separation would indicate the rest is the description.
found = True
elif is_probably_beginning_of_sentence(split_lines[index]):
# Symbol on second line probably is a description with a list.
found = True
if found:
return ('\n'.join(split_lines[:index]).strip(),
'\n'.join(split_lines[index:]).rstrip())
# Break on first sentence.
split = split_first_sentence(contents)
if split[0].strip() and split[1].strip():
return (
split[0].strip(),
_find_shortest_indentation(
split[1].splitlines()[1:]) + split[1].strip()
)
return (contents, '')
def split_first_sentence(text):
"""Split text into first sentence and the rest.
Return a tuple (sentence, rest).
"""
sentence = ''
rest = text
delimiter = ''
previous_delimiter = ''
while rest:
split = re.split(r'(\s)', rest, maxsplit=1)
if len(split) == 3:
word = split[0]
delimiter = split[1]
rest = split[2]
else:
assert len(split) == 1
word = split[0]
delimiter = ''
rest = ''
sentence += previous_delimiter + word
if sentence.endswith(('e.g.', 'i.e.',
'Dr.',
'Mr.', 'Mrs.', 'Ms.')):
# Ignore false end of sentence.
pass
elif sentence.endswith(('.', '?', '!')):
break
elif sentence.endswith(':') and delimiter == '\n':
# Break on colon if it ends the line. This is a heuristic to detect
# the beginning of some parameter list afterwards.
break
previous_delimiter = delimiter
delimiter = ''
return (sentence, delimiter + rest)
def is_some_sort_of_list(text):
"""Return True if text looks like a list."""
split_lines = text.rstrip().splitlines()
# TODO: Find a better way of doing this.
# Very large number of lines but short columns probably means a list of
# items.
if len(split_lines) / max([len(line.strip()) for line in split_lines] +
[1]) > HEURISTIC_MIN_LIST_ASPECT_RATIO:
return True
for line in split_lines:
if (
re.match(r'\s*$', line) or
# "1. item"
re.match(r'\s*[0-9]\.', line) or
# "@parameter"
re.match(r'\s*[\-*:=@]', line) or
# "parameter - description"
re.match(r'.*\s+[\-*:=@]\s+', line) or
# "parameter: description"
re.match(r'\s*\S+[\-*:=@]\s+', line) or
# "parameter:\n description"
re.match(r'\s*\S+:\s*$', line) or
# "parameter -- description"
re.match(r'\s*\S+\s+--\s+', line)
):
return True
return False
def is_some_sort_of_code(text):
"""Return True if text looks like code."""
return any(len(word) > 50 for word in text.split())
def _find_shortest_indentation(lines):
"""Return most shortest indentation."""
assert not isinstance(lines, str)
indentation = None
for line in lines:
if line.strip():
non_whitespace_index = len(line) - len(line.lstrip())
_indent = line[:non_whitespace_index]
if indentation is None or len(_indent) < len(indentation):
indentation = _indent
return indentation or ''
def find_newline(source):
"""Return type of newline used in source.
Input is a list of lines.
"""
assert not isinstance(source, unicode)
counter = collections.defaultdict(int)
for line in source:
if line.endswith(CRLF):
counter[CRLF] += 1
elif line.endswith(CR):
counter[CR] += 1
elif line.endswith(LF):
counter[LF] += 1
return (sorted(counter, key=counter.get, reverse=True) or [LF])[0]
def normalize_line(line, newline):
"""Return line with fixed ending, if ending was present in line.
Otherwise, does nothing.
"""
stripped = line.rstrip('\n\r')
if stripped != line:
return stripped + newline
return line
def normalize_line_endings(lines, newline):
"""Return fixed line endings.
All lines will be modified to use the most common line ending.
"""
return ''.join([normalize_line(line, newline) for line in lines])
def strip_docstring(docstring):
"""Return contents of docstring."""
docstring = docstring.strip()
quote_types = ["'''", '"""', "'", '"']
for quote in quote_types:
if docstring.startswith(quote) and docstring.endswith(quote):
return docstring.split(quote, 1)[1].rsplit(quote, 1)[0].strip()
raise ValueError('We only handle strings that start with quotes')
def normalize_summary(summary):
"""Return normalized docstring summary."""
# Remove newlines
summary = re.sub(r'\s*\n\s*', ' ', summary.rstrip())
# Add period at end of sentence
if summary and (summary[-1].isalnum() or summary[-1] in ['"', "'"]) and (not summary.startswith("#")):
summary += '.'
return summary
def wrap_summary(summary, initial_indent, subsequent_indent, wrap_length):
"""Return line-wrapped summary text."""
if wrap_length > 0:
return '\n'.join(
textwrap.wrap(summary,
width=wrap_length,
initial_indent=initial_indent,
subsequent_indent=subsequent_indent)).strip()
else:
return summary
def wrap_description(text, indentation, wrap_length, force_wrap):
"""Return line-wrapped description text.
We only wrap simple descriptions. We leave doctests, multi-paragraph
text, and bulleted lists alone.
"""
text = strip_leading_blank_lines(text)
# Do not modify doctests at all.
if '>>>' in text:
return text
text = reindent(text, indentation).rstrip()
# Ignore possibly complicated cases.
if wrap_length <= 0 or (not force_wrap and
(is_some_sort_of_list(text) or
is_some_sort_of_code(text))):
return text
return indentation + '\n'.join(
textwrap.wrap(textwrap.dedent(text),
width=wrap_length,
initial_indent=indentation,
subsequent_indent=indentation)).strip()
def remove_section_header(text):
r"""Return text with section header removed.
>>> remove_section_header('----\nfoo\nbar\n')
'foo\nbar\n'
>>> remove_section_header('===\nfoo\nbar\n')
'foo\nbar\n'
"""
stripped = text.lstrip()
if not stripped:
return text
first = stripped[0]
if not (
first.isalnum() or
first.isspace() or
stripped.splitlines()[0].strip(first).strip()
):
return stripped.lstrip(first).lstrip()
return text
def strip_leading_blank_lines(text):
"""Return text with leading blank lines removed."""
split = text.splitlines()
found = 0
for index, line in enumerate(split):
if line.strip():
found = index
break
return '\n'.join(split[found:])
def open_with_encoding(filename, encoding, mode='r'):
"""Return opened file with a specific encoding."""
return io.open(filename, mode=mode, encoding=encoding,
newline='') # Preserve line endings
def detect_encoding(filename):
"""Return file encoding."""
try:
with open(filename, 'rb') as input_file:
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0]
# Check for correctness of encoding.
with open_with_encoding(filename, encoding) as input_file:
input_file.read()
return encoding
except (SyntaxError, LookupError, UnicodeDecodeError):
return 'latin-1'
def format_file(filename, args, standard_out):
"""Run format_code() on a file.
Return: one of the FormatResult codes.
"""
encoding = detect_encoding(filename)
with open_with_encoding(filename, encoding=encoding) as input_file:
source = input_file.read()
formatted_source = _format_code_with_args(source, args)
if source != formatted_source:
if args.check:
return FormatResult.check_failed
elif args.in_place:
with open_with_encoding(filename, mode='w',
encoding=encoding) as output_file:
output_file.write(formatted_source)
else:
import difflib
diff = difflib.unified_diff(
source.splitlines(),
formatted_source.splitlines(),
'before/' + filename,
'after/' + filename,
lineterm='')
standard_out.write('\n'.join(list(diff) + ['']))
return FormatResult.ok
def _format_code_with_args(source, args):
"""Run format_code with parsed command-line arguments."""
return format_code(
source,
summary_wrap_length=args.wrap_summaries,
description_wrap_length=args.wrap_descriptions,
pre_summary_newline=args.pre_summary_newline,
make_summary_multi_line=args.make_summary_multi_line,
post_description_blank=args.post_description_blank,
force_wrap=args.force_wrap,
line_range=args.line_range,
given_when_then=args.given_when_then,
)
def _main(argv, standard_out, standard_error, standard_in):
"""Run internal main entry point."""
import argparse
parser = argparse.ArgumentParser(description=__doc__, prog='docformatter')
changes = parser.add_mutually_exclusive_group()
changes.add_argument('-i', '--in-place', action='store_true',
help='make changes to files instead of printing '
'diffs')
changes.add_argument('-c', '--check', action='store_true',
help='only check and report incorrectly formatted '
'files')
parser.add_argument('-r', '--recursive', action='store_true',
help='drill down directories recursively')
parser.add_argument('-e', '--exclude', nargs="*",
help='exclude directories and files by names')
parser.add_argument('--wrap-summaries', default=79, type=int,
metavar='length',
help='wrap long summary lines at this length; '
'set to 0 to disable wrapping '
'(default: %(default)s)')
parser.add_argument('--wrap-descriptions', default=72, type=int,
metavar='length',
help='wrap descriptions at this length; '
'set to 0 to disable wrapping '
'(default: %(default)s)')
parser.add_argument('--blank', dest='post_description_blank',
action='store_true',
help='add blank line after description')
parser.add_argument('--pre-summary-newline',
action='store_true',
help='add a newline before the summary of a '
'multi-line docstring')
parser.add_argument('--make-summary-multi-line',
action='store_true',
help='add a newline before and after the summary of a '
'one-line docstring')
parser.add_argument('--force-wrap', action='store_true',
help='force descriptions to be wrapped even if it may '
'result in a mess')
parser.add_argument('--range', metavar='line', dest='line_range',
default=None, type=int, nargs=2,
help='apply docformatter to docstrings between these '
'lines; line numbers are indexed at 1')
parser.add_argument('--given-when-then',
action='store_true',
help='Reformat Given:, When:, Then: into :Given:, :When:, :Then: '
'to support parsing as reStructuredText Field List in the docs.'
)
parser.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('files', nargs='+',
help="files to format or '-' for standard in")
args = parser.parse_args(argv[1:])
if args.line_range:
if args.line_range[0] <= 0:
parser.error('--range must be positive numbers')
if args.line_range[0] > args.line_range[1]:
parser.error('First value of --range should be less than or equal '
'to the second')
if '-' in args.files:
_format_standard_in(args,
parser=parser,
standard_out=standard_out,
standard_in=standard_in)
else:
return _format_files(args,
standard_out=standard_out,
standard_error=standard_error)
def _format_standard_in(args, parser, standard_out, standard_in):
"""Print formatted text to standard out."""
if len(args.files) > 1:
parser.error('cannot mix standard in and regular files')
if args.in_place:
parser.error('--in-place cannot be used with standard input')
if args.recursive:
parser.error('--recursive cannot be used with standard input')
encoding = None
source = standard_in.read()
if not isinstance(source, unicode):
encoding = standard_in.encoding or _get_encoding()
source = source.decode(encoding)
formatted_source = _format_code_with_args(source, args=args)
if encoding:
formatted_source = formatted_source.encode(encoding)
standard_out.write(formatted_source)
def _get_encoding():
"""Return preferred encoding."""
return locale.getpreferredencoding() or sys.getdefaultencoding()
def find_py_files(sources, recursive, exclude=None):
"""Find Python source files.
Parameters
- sources: iterable with paths as strings.
- recursive: drill down directories if True.
- exclude: string based on which directories and files are excluded.
Return: yields paths to found files.
"""
def not_hidden(name):
"""Return True if file 'name' isn't .hidden."""
return not name.startswith('.')
def is_excluded(name, exclude):
"""Return True if file 'name' is excluded."""
if not exclude:
return False
for e in exclude:
if re.search(re.escape(str(e)), name, re.IGNORECASE):
return True
return False
for name in sorted(sources):
if recursive and os.path.isdir(name):
for root, dirs, children in os.walk(unicode(name)):
dirs[:] = [d for d in dirs if not_hidden(d) and not is_excluded(d, _PYTHON_LIBS)]
dirs[:] = sorted([d for d in dirs if not is_excluded(d, exclude)])
files = sorted([f for f in children if not_hidden(f) and not is_excluded(f, exclude)])
for filename in files:
if filename.endswith('.py') and not is_excluded(root, exclude):
yield os.path.join(root, filename)
else:
yield name
def _format_files(args, standard_out, standard_error):
"""Format multiple files.
Return: one of the FormatResult codes.
"""
outcomes = collections.Counter()
for filename in find_py_files(set(args.files), args.recursive, args.exclude):
try:
result = format_file(filename, args=args,
standard_out=standard_out)
outcomes[result] += 1
if result == FormatResult.check_failed:
print(unicode(filename), file=standard_error)
except IOError as exception:
outcomes[FormatResult.error] += 1
print(unicode(exception), file=standard_error)
return_codes = [ # in order of preference
FormatResult.error,
FormatResult.check_failed,
FormatResult.ok,
]
for code in return_codes:
if outcomes[code]:
return code
def main():
"""Run main entry point."""
try:
# Exit on broken pipe.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError: # pragma: no cover
# SIGPIPE is not available on Windows.
pass
try:
return _main(sys.argv,
standard_out=sys.stdout,
standard_error=sys.stderr,
standard_in=sys.stdin)
except KeyboardInterrupt:
return FormatResult.interrupted # pragma: no cover
if __name__ == '__main__':
sys.exit(main())
| 33.520345
| 106
| 0.594703
|
d35571ec1dd03c551813734d086259ed2f4a9ae0
| 1,736
|
py
|
Python
|
teleavon_examples/assistant.py
|
GramaBell/Teleavon
|
8377399f726dc8cee76accb303b26be669fe3a0a
|
[
"MIT"
] | null | null | null |
teleavon_examples/assistant.py
|
GramaBell/Teleavon
|
8377399f726dc8cee76accb303b26be669fe3a0a
|
[
"MIT"
] | null | null | null |
teleavon_examples/assistant.py
|
GramaBell/Teleavon
|
8377399f726dc8cee76accb303b26be669fe3a0a
|
[
"MIT"
] | null | null | null |
"""
This file is only the "core" of the bot. It is responsible for loading the
plugins module and initializing it. You may obtain the plugins by running:
git clone https://github.com/Lonami/TeleavonianBotExt plugins
In the same folder where this file lives. As a result, the directory should
look like the following:
assistant.py
plugins/
...
"""
import asyncio
import os
import sys
import time
from telethon import TelegramClient
try:
# Standalone script assistant.py with folder plugins/
import plugins
except ImportError:
try:
# Running as a module with `python -m assistant` and structure:
#
# assistant/
# __main__.py (this file)
# plugins/ (cloned)
from . import plugins
except ImportError:
print('could not load the plugins module, does the directory exist '
'in the correct location?', file=sys.stderr)
exit(1)
def get_env(name, message, cast=str):
if name in os.environ:
return os.environ[name]
while True:
value = input(message)
try:
return cast(value)
except ValueError as e:
print(e, file=sys.stderr)
time.sleep(1)
API_ID = get_env('TG_API_ID', 'Enter your API ID: ', int)
API_HASH = get_env('TG_API_HASH', 'Enter your API hash: ')
TOKEN = get_env('TG_TOKEN', 'Enter the bot token: ')
NAME = TOKEN.split(':')[0]
async def main():
bot = TelegramClient(NAME, API_ID, API_HASH)
await bot.start(bot_token=TOKEN)
try:
await plugins.init(bot)
await bot.run_until_disconnected()
finally:
await bot.disconnect()
if __name__ == '__main__':
asyncio.run(main())
| 24.450704
| 76
| 0.636521
|
b4f65cba3fd3f434a90d78b838f9f414ec9d8035
| 3,152
|
py
|
Python
|
BasicModule.py
|
birm/HistoQC
|
1384a69b7f37bae59191302c0a8283e79d9f502b
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
BasicModule.py
|
birm/HistoQC
|
1384a69b7f37bae59191302c0a8283e79d9f502b
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
BasicModule.py
|
birm/HistoQC
|
1384a69b7f37bae59191302c0a8283e79d9f502b
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import logging
import os
from BaseImage import printMaskHelper
from skimage.morphology import remove_small_objects, binary_opening, disk
from skimage import io, color
import matplotlib.pyplot as plt
def getBasicStats(s, params):
logging.info(f"{s['filename']} - \tgetBasicStats")
osh = s["os_handle"]
s.addToPrintList("type", osh.properties.get("openslide.vendor", "NA"))
s.addToPrintList("levels", osh.properties.get("openslide.level-count", "NA"))
s.addToPrintList("height", osh.properties.get("openslide.level[0].height", "NA"))
s.addToPrintList("width", osh.properties.get("openslide.level[0].width", "NA"))
s.addToPrintList("mpp_x", osh.properties.get("openslide.mpp-x", "NA"))
s.addToPrintList("mpp_y", osh.properties.get("openslide.mpp-y", "NA"))
s.addToPrintList("comment", osh.properties.get("openslide.comment", "NA").replace("\n", " ").replace("\r", " "))
return
def finalComputations(s, params):
mask = s["img_mask_use"]
s.addToPrintList("pixels_to_use", str(len(mask.nonzero()[0])))
def finalProcessingSpur(s, params):
logging.info(f"{s['filename']} - \tfinalProcessingSpur")
disk_radius = int(params.get("disk_radius", "25"))
selem = disk(disk_radius)
mask = s["img_mask_use"]
mask_opened = binary_opening(mask, selem)
mask_spur = ~mask_opened & mask
io.imsave(s["outdir"] + os.sep + s["filename"] + "_spur.png", mask_spur * 255)
prev_mask = s["img_mask_use"]
s["img_mask_use"] = mask_opened
s.addToPrintList("spur_pixels",
printMaskHelper(params.get("mask_statistics", s["mask_statistics"]), prev_mask, s["img_mask_use"]))
if len(s["img_mask_use"].nonzero()[0]) == 0: # add warning in case the final tissue is empty
logging.warning(
f"{s['filename']} - After BasicModule.finalProcessingSpur NO tissue remains detectable! Downstream modules likely to be incorrect/fail")
s["warnings"].append(
f"After BasicModule.finalProcessingSpur NO tissue remains detectable! Downstream modules likely to be incorrect/fail")
def finalProcessingArea(s, params):
logging.info(f"{s['filename']} - \tfinalProcessingArea")
area_thresh = int(params.get("area_thresh", "1000"))
mask = s["img_mask_use"]
mask_opened = remove_small_objects(mask, min_size=area_thresh)
mask_removed_area = ~mask_opened & mask
io.imsave(s["outdir"] + os.sep + s["filename"] + "_areathresh.png", mask_removed_area * 255)
prev_mask = s["img_mask_use"]
s["img_mask_use"] = mask_opened > 0
s.addToPrintList("areaThresh",
printMaskHelper(params.get("mask_statistics", s["mask_statistics"]), prev_mask, s["img_mask_use"]))
if len(s["img_mask_use"].nonzero()[0]) == 0: # add warning in case the final tissue is empty
logging.warning(
f"{s['filename']} - After BasicModule.finalProcessingArea NO tissue remains detectable! Downstream modules likely to be incorrect/fail")
s["warnings"].append(
f"After BasicModule.finalProcessingArea NO tissue remains detectable! Downstream modules likely to be incorrect/fail")
| 43.777778
| 148
| 0.688452
|
318fd7db85f7c971cea2b0f26fd29d04e0649e20
| 872
|
py
|
Python
|
request-management-api/migrations/versions/fc26a7b65a2a_.py
|
bcgov/foi-flow
|
7f9897b3aad4ba91fbc8edcb8f526906efb490df
|
[
"Apache-2.0"
] | null | null | null |
request-management-api/migrations/versions/fc26a7b65a2a_.py
|
bcgov/foi-flow
|
7f9897b3aad4ba91fbc8edcb8f526906efb490df
|
[
"Apache-2.0"
] | 1,579
|
2021-04-14T18:27:45.000Z
|
2022-03-31T23:49:42.000Z
|
request-management-api/migrations/versions/fc26a7b65a2a_.py
|
bcgov/foi-flow
|
7f9897b3aad4ba91fbc8edcb8f526906efb490df
|
[
"Apache-2.0"
] | 1
|
2022-03-01T20:17:47.000Z
|
2022-03-01T20:17:47.000Z
|
"""empty message
Revision ID: fc26a7b65a2a
Revises: 1347453b75e6
Create Date: 2021-11-10 19:07:40.807103
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'fc26a7b65a2a'
down_revision = '1347453b75e6'
branch_labels = None
depends_on = None
def upgrade():
op.drop_constraint('FOIMinistryRequestDocuments_pkey', 'FOIMinistryRequestDocuments', type_='primary')
op.create_primary_key(
"FOIMinistryRequestDocuments_pkey", "FOIMinistryRequestDocuments",
["foiministrydocumentid", "version"]
)
def downgrade():
op.drop_constraint('FOIMinistryRequestDocuments_pkey', 'FOIMinistryRequestDocuments', type_='primary')
op.create_primary_key(
"FOIMinistryRequestDocuments_pkey", "FOIMinistryRequestDocuments",
["foiministrydocumentid"]
)
| 26.424242
| 110
| 0.728211
|
559d0f020eff118f42190da00de8eccd81003ea9
| 4,212
|
py
|
Python
|
apps/win/xplorer2/xplorer2.py
|
PetrKryslUCSD/knausj_talon_pk
|
6612adb1794e0b02ce8b1c2b478b74cd6858954b
|
[
"MIT"
] | 1
|
2020-11-13T18:02:12.000Z
|
2020-11-13T18:02:12.000Z
|
apps/win/xplorer2/xplorer2.py
|
PetrKryslUCSD/knausj_talon_pk
|
6612adb1794e0b02ce8b1c2b478b74cd6858954b
|
[
"MIT"
] | null | null | null |
apps/win/xplorer2/xplorer2.py
|
PetrKryslUCSD/knausj_talon_pk
|
6612adb1794e0b02ce8b1c2b478b74cd6858954b
|
[
"MIT"
] | null | null | null |
from talon import Context, Module, actions, imgui, settings, ui, app
import os
mod = Module()
apps = mod.apps
# An explanation on how to get the title set up:
# Petr Krysl 2021
# Q: Could I also ask about the title bar of the window: could I set
# it to whatever I want?
# A: There is a registry tweak called DPF_NOTITLE, you can look it
# up in a file called REGISTRY.TXT in the xplorer2 installation folder,
# but to turn it on requires registry editing after you quit xplorer2.
apps.xplorer2 = """
os: windows
and app.name: /xplorer².*/
"""
ctx = Context()
ctx.matches = r"""
app: xplorer2
"""
user_path = os.path.expanduser("~")
directories_to_remap = {}
directories_to_exclude = {}
if app.platform == "windows":
is_windows = True
import ctypes
GetUserNameEx = ctypes.windll.secur32.GetUserNameExW
NameDisplay = 3
size = ctypes.pointer(ctypes.c_ulong(0))
GetUserNameEx(NameDisplay, None, size)
nameBuffer = ctypes.create_unicode_buffer(size.contents.value)
GetUserNameEx(NameDisplay, nameBuffer, size)
one_drive_path = os.path.expanduser(os.path.join("~", "OneDrive"))
# this is probably not the correct way to check for onedrive, quick and dirty
if os.path.isdir(os.path.expanduser(os.path.join("~", r"OneDrive\Desktop"))):
default_folder = os.path.join("~", "Desktop")
directories_to_remap = {
"Desktop": os.path.join(one_drive_path, "Desktop"),
"Documents": os.path.join(one_drive_path, "Documents"),
"Downloads": os.path.join(user_path, "Downloads"),
"Music": os.path.join(user_path, "Music"),
"OneDrive": one_drive_path,
"Pictures": os.path.join(one_drive_path, "Pictures"),
"Videos": os.path.join(user_path, "Videos"),
}
else:
# todo use expanduser for cross platform support
directories_to_remap = {
"Desktop": os.path.join(user_path, "Desktop"),
"Documents": os.path.join(user_path, "Documents"),
"Downloads": os.path.join(user_path, "Downloads"),
"Music": os.path.join(user_path, "Music"),
"OneDrive": one_drive_path,
"Pictures": os.path.join(user_path, "Pictures"),
"Videos": os.path.join(user_path, "Videos"),
}
if nameBuffer.value:
directories_to_remap[nameBuffer.value] = user_path
directories_to_exclude = [
"",
"Run",
"Task Switching",
"Task View",
"This PC",
"File Explorer",
"Program Manager",
]
@ctx.action_class("user")
class user_actions:
def file_manager_current_path():
path = ui.active_window().title
if path in directories_to_remap:
path = directories_to_remap[path]
if path in directories_to_exclude:
actions.user.file_manager_hide_pickers()
path = ""
return path
def file_manager_terminal_here():
actions.key("f10")
def file_manager_show_properties():
"""Shows the properties for the file"""
actions.key("alt-enter")
def file_manager_open_directory(path: str):
"""opens the directory that's already visible in the view"""
# xplorer2 has a special key for that (not ctrl-L!)
actions.key("shift-tab")
actions.insert(path)
actions.key("enter")
def file_manager_select_directory(path: str):
"""selects the directory"""
actions.insert(path)
def file_manager_new_folder(name: str):
"""Creates a new folder in a gui filemanager or inserts the command to do so for terminals"""
# xplorer2 has a special key for that
actions.key("home")
actions.key("f8")
actions.insert(name)
def file_manager_open_file(path: str):
"""opens the file"""
actions.key("home")
actions.insert(path)
actions.key("enter")
def file_manager_select_file(path: str):
"""selects the file"""
actions.key("home")
actions.insert(path)
def file_manager_open_volume(volume: str):
"""file_manager_open_volume"""
actions.user.file_manager_open_directory(volume)
| 31.2
| 101
| 0.63509
|
b1d4b12d5701901c8149ba73bdd34cf1c6d9ec4a
| 1,069
|
py
|
Python
|
src/rewrite_event_states.py
|
matpalm/drivebot
|
2b6c30209f7a50e289fa70b68fdc93f5e2bd7e88
|
[
"MIT"
] | 70
|
2016-02-20T02:59:14.000Z
|
2021-12-30T04:19:09.000Z
|
src/rewrite_event_states.py
|
matpalm/drivebot
|
2b6c30209f7a50e289fa70b68fdc93f5e2bd7e88
|
[
"MIT"
] | 1
|
2016-05-03T15:57:58.000Z
|
2016-05-04T13:55:53.000Z
|
src/rewrite_event_states.py
|
matpalm/drivebot
|
2b6c30209f7a50e289fa70b68fdc93f5e2bd7e88
|
[
"MIT"
] | 17
|
2016-02-20T03:53:46.000Z
|
2021-03-17T07:38:18.000Z
|
#!/usr/bin/env python
import json, sys
import states
#sonar_to_state = states.FurthestSonar() # OrderingSonars()
#sonar_to_state = states.OrderingSonars()
#sonar_to_state = states.StateHistory(states.FurthestSonar(), history_length=4)
sonar_to_state = states.StateHistory(states.StandardisedSonars(mean=59.317, std=37.603),
history_length=int(sys.argv[1]))
for line in sys.stdin:
episode = json.loads(line)
rewritten_episode = []
last_state = None
for event in episode:
# for the very first even we need to use ranges1 to decide state1
# but from then on we just copy the last value across
if last_state is None:
event['state_1'] = sonar_to_state.state_given_new_ranges(event['ranges_1'])
else:
event['state_1'] = last_state
# update state2
event['state_2'] = sonar_to_state.state_given_new_ranges(event['ranges_2'])
last_state = event['state_2']
rewritten_episode.append(event)
print json.dumps(rewritten_episode)
| 32.393939
| 89
| 0.674462
|
e527e8fa048bfbb1c71a79954ac8e25291068120
| 1,767
|
py
|
Python
|
lc/0946_ValidStackSequences.py
|
xiangshiyin/coding-challenge
|
a75a644b96dec1b6c7146b952ca4333263f0a461
|
[
"Apache-2.0"
] | null | null | null |
lc/0946_ValidStackSequences.py
|
xiangshiyin/coding-challenge
|
a75a644b96dec1b6c7146b952ca4333263f0a461
|
[
"Apache-2.0"
] | null | null | null |
lc/0946_ValidStackSequences.py
|
xiangshiyin/coding-challenge
|
a75a644b96dec1b6c7146b952ca4333263f0a461
|
[
"Apache-2.0"
] | null | null | null |
# class Solution:
# def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
# n = len(pushed)
# # exception
# if n == 0:
# return True
# # create a hash table of value-index pairs
# tb = {v:i for i,v in enumerate(pushed)}
# visited = set()
# # traverse popped
# i = 0
# while i < n:
# if i == 0:
# stack = [j for j in range(tb[popped[i]])]
# # print(stack)
# else:
# if tb[popped[i]] > tb[popped[i - 1]]:
# if len(stack) > 0:
# for j in range(stack[-1] + 1, tb[popped[i]]):
# if j not in visited:
# stack.append(j)
# else:
# for j in range(tb[popped[i - 1]] + 1, tb[popped[i]]):
# if j not in visited:
# stack.append(j)
# else:
# if tb[popped[i]] != stack[-1]:
# return False
# else:
# stack.pop()
# # print(stack)
# visited.add(tb[popped[i]])
# i += 1
# return True
class Solution:
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
# exception
if len(pushed) == 0:
return True
stack = []
i = 0
for v in pushed:
stack.append(v)
while stack and stack[-1] == popped[i]:
i += 1
stack.pop()
return len(stack) == 0
| 32.127273
| 85
| 0.372949
|
9531e990d630dfc72004cf478725ba022da17c93
| 7,132
|
py
|
Python
|
designate/backend/impl_akamai_v2.py
|
ChukwuemekaAham/designate
|
2f17dd20e260c3b8a36a5034077275c796abb60b
|
[
"Apache-2.0"
] | 1
|
2022-02-18T11:19:35.000Z
|
2022-02-18T11:19:35.000Z
|
designate/backend/impl_akamai_v2.py
|
sapcc/designate
|
c3f084751006a2fe7562f137930542c4759d6fd9
|
[
"Apache-2.0"
] | null | null | null |
designate/backend/impl_akamai_v2.py
|
sapcc/designate
|
c3f084751006a2fe7562f137930542c4759d6fd9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Cloudification GmbH
#
# Author: Sergey Kraynev <contact@cloudification.io>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from urllib import parse as urlparse
import requests
from akamai import edgegrid
from oslo_log import log as logging
from designate import exceptions
from designate.backend import base
LOG = logging.getLogger(__name__)
class AkamaiClient(object):
def __init__(self, client_token=None, client_secret=None,
access_token=None, host=None):
session = requests.Session()
self.baseurl = 'https://%s' % host
self.client_token = client_token
self.client_secret = client_secret
self.access_token = access_token
session.auth = edgegrid.EdgeGridAuth(
client_token=self.client_token,
client_secret=self.client_secret,
access_token=self.access_token
)
self.http = session
def gen_url(self, url_path):
return urlparse.urljoin(self.baseurl, url_path)
def post(self, payloads):
url_path = payloads.pop('url')
return self.http.post(url=self.gen_url(url_path), **payloads)
def get(self, url_path):
return self.http.get(url=self.gen_url(url_path))
def build_masters_field(self, masters):
# Akamai v2 supports only ip and hostnames. Ports could not be
# specified explicitly. 53 will be used by default
return [master.host for master in masters]
def gen_tsig_payload(self, target):
return {
'name': target.options.get('tsig_key_name'),
'algorithm': target.options.get('tsig_key_algorithm'),
'secret': target.options.get('tsig_key_secret'),
}
def gen_create_payload(self, zone, masters, contract_id, gid, tenant_id,
target):
if contract_id is None:
raise exceptions.Backend(
'contractId is required for zone creation')
masters = self.build_masters_field(masters)
body = {
'zone': zone['name'],
'type': 'secondary',
'comment': 'Created by Designate for Tenant %s' % tenant_id,
'masters': masters,
}
# Add tsigKey if it exists
if target.options.get('tsig_key_name'):
# It's not mentioned in doc, but json schema supports specification
# TsigKey in the same zone creation body
body.update({'tsigKey': self.gen_tsig_payload(target)})
params = {
'contractId': contract_id,
'gid': gid,
}
return {
'url': 'config-dns/v2/zones',
'params': params,
'json': body,
}
def create_zone(self, payload):
result = self.post(payload)
# NOTE: ignore error about duplicate SZ in AKAMAI
if result.status_code == 409 and result.reason == 'Conflict':
LOG.info("Can't create zone %s because it already exists",
payload['json']['zone'])
elif not result.ok:
json_res = result.json()
raise exceptions.Backend(
'Zone creation failed due to: %s' % json_res['detail'])
@staticmethod
def gen_delete_payload(zone_name, force):
return {
'url': '/config-dns/v2/zones/delete-requests',
'params': {'force': force},
'json': {'zones': [zone_name]},
}
def delete_zone(self, zone_name):
# - try to delete with force=True
# - if we get Forbidden error - try to delete it with Checks logic
result = self.post(
self.gen_delete_payload(zone_name, force=True))
if result.status_code == 403 and result.reason == 'Forbidden':
result = self.post(
self.gen_delete_payload(zone_name, force=False))
if result.ok:
request_id = result.json().get('requestId')
LOG.info('Run soft delete for zone (%s) and requestId (%s)',
zone_name, request_id)
if request_id is None:
reason = 'requestId missed in response'
raise exceptions.Backend(
'Zone deletion failed due to: %s' % reason)
self.validate_deletion_is_complete(request_id)
if not result.ok and result.status_code != 404:
reason = result.json().get('detail') or result.json()
raise exceptions.Backend(
'Zone deletion failed due to: %s' % reason)
def validate_deletion_is_complete(self, request_id):
check_url = '/config-dns/v2/zones/delete-requests/%s' % request_id
deleted = False
attempt = 0
while not deleted and attempt < 10:
result = self.get(check_url)
deleted = result.json()['isComplete']
attempt += 1
time.sleep(1.0)
if not deleted:
raise exceptions.Backend(
'Zone was not deleted after %s attempts' % attempt)
class AkamaiBackend(base.Backend):
__plugin_name__ = 'akamai_v2'
__backend_status__ = 'untested'
def __init__(self, target):
super(AkamaiBackend, self).__init__(target)
self._host = self.options.get('host', '127.0.0.1')
self._port = int(self.options.get('port', 53))
self.client = self.init_client()
def init_client(self):
baseurl = self.options.get('akamai_host', '127.0.0.1')
client_token = self.options.get('akamai_client_token', 'admin')
client_secret = self.options.get('akamai_client_secret', 'admin')
access_token = self.options.get('akamai_access_token', 'admin')
return AkamaiClient(client_token, client_secret, access_token, baseurl)
def create_zone(self, context, zone):
"""Create a DNS zone"""
LOG.debug('Create Zone')
contract_id = self.options.get('akamai_contract_id')
gid = self.options.get('akamai_gid')
project_id = context.project_id or zone.tenant_id
# Take list of masters from pools.yaml
payload = self.client.gen_create_payload(
zone, self.masters, contract_id, gid, project_id, self.target)
self.client.create_zone(payload)
self.mdns_api.notify_zone_changed(
context, zone, self._host, self._port, self.timeout,
self.retry_interval, self.max_retries, self.delay)
def delete_zone(self, context, zone):
"""Delete a DNS zone"""
LOG.debug('Delete Zone')
self.client.delete_zone(zone['name'])
| 35.66
| 79
| 0.616938
|
633af3d20fdc7ca8b4999959744a84bb7858d995
| 5,969
|
py
|
Python
|
dicodile/update_z/distributed_sparse_encoder.py
|
rprimet/dicodile
|
ad30e87b7797fd0083d7cb4edc3fade9825c8339
|
[
"BSD-3-Clause"
] | null | null | null |
dicodile/update_z/distributed_sparse_encoder.py
|
rprimet/dicodile
|
ad30e87b7797fd0083d7cb4edc3fade9825c8339
|
[
"BSD-3-Clause"
] | null | null | null |
dicodile/update_z/distributed_sparse_encoder.py
|
rprimet/dicodile
|
ad30e87b7797fd0083d7cb4edc3fade9825c8339
|
[
"BSD-3-Clause"
] | 1
|
2021-01-13T13:31:21.000Z
|
2021-01-13T13:31:21.000Z
|
import weakref
import numpy as np
from mpi4py import MPI
from ..utils import constants
from ..utils.csc import compute_objective
from ..workers.mpi_workers import MPIWorkers
from ..utils import debug_flags as flags
from ..utils.debugs import main_check_beta
from ..utils.shape_helpers import get_valid_support
from .dicod import recv_z_hat, recv_z_nnz
from .dicod import _gather_run_statistics
from .dicod import _send_task, _send_D, _send_signal
from .dicod import recv_cost, recv_sufficient_statistics
class DistributedSparseEncoder:
def __init__(self, n_workers, w_world='auto', hostfile=None, verbose=0):
# check the parameters
if w_world != 'auto':
assert n_workers % w_world == 0, (
"`w_world={}` should divide the number of jobs `n_workers={}` "
"used.".format(w_world, n_workers))
# Store the parameters
self.n_workers = n_workers
self.w_world = w_world
self.hostfile = hostfile
self.verbose = verbose
def init_workers(self, X, D_hat, reg, params, z0=None, DtD=None):
# compute the partition for the signals
assert D_hat.ndim - 1 == X.ndim, (D_hat.shape, X.shape)
n_channels, *sig_support = X.shape
n_atoms, n_channels, *atom_support = self.D_shape = D_hat.shape
# compute effective n_workers to not have smaller worker support than
# 4 times the atom_support
valid_support = get_valid_support(sig_support, atom_support)
max_n_workers = np.prod(np.maximum(
1, np.array(valid_support) // (2 * np.array(atom_support))
))
effective_n_workers = min(max_n_workers, self.n_workers)
self.effective_n_workers = effective_n_workers
# Create the workers with MPI
self.workers = MPIWorkers(effective_n_workers, hostfile=self.hostfile)
self.workers.send_command(constants.TAG_WORKER_RUN_DICODILE,
verbose=self.verbose)
w_world = self.w_world
if self.w_world != 'auto' and self.w_world > effective_n_workers:
w_world = effective_n_workers
self.params = params.copy()
self.params['reg'] = reg
self.params['precomputed_DtD'] = DtD is not None
self.params['verbose'] = self.verbose
self.workers.send_command(constants.TAG_DICODILE_SET_TASK,
verbose=self.verbose)
self.t_init, self.workers_segments = _send_task(
self.workers, X, D_hat, z0, DtD, w_world, self.params
)
def set_worker_D(self, D, DtD=None):
msg = "The support of the dictionary cannot be changed on an encoder."
assert D.shape[1:] == self.D_shape[1:], msg
self.D_shape = D.shape
if self.params['precomputed_DtD'] and DtD is None:
raise ValueError("The pre-computed value DtD need to be passed "
"each time D is updated.")
self.workers.send_command(constants.TAG_DICODILE_SET_D,
verbose=self.verbose)
_send_D(self.workers, D, DtD)
def set_worker_params(self, params=None, **kwargs):
if params is None:
assert kwargs is not {}
params = kwargs
self.params.update(params)
self.workers.send_command(constants.TAG_DICODILE_SET_PARAMS,
verbose=self.verbose)
self.workers.comm.bcast(self.params, root=MPI.ROOT)
def set_worker_signal(self, X, z0=None):
n_atoms, n_channels, *atom_support = self.D_shape
if self.is_same_signal(X):
return
self.workers.send_command(constants.TAG_DICODILE_SET_SIGNAL,
verbose=self.verbose)
self.workers_segments = _send_signal(self.workers, self.w_world,
atom_support, X, z0)
self._ref_X = weakref.ref(X)
def process_z_hat(self):
self.workers.send_command(constants.TAG_DICODILE_COMPUTE_Z_HAT,
verbose=self.verbose)
if flags.CHECK_WARM_BETA:
main_check_beta(self.workers.comm, self.workers_segments)
# Then wait for the end of the computation
self.workers.comm.Barrier()
return _gather_run_statistics(self.workers.comm, self.workers_segments,
verbose=self.verbose)
def get_cost(self):
self.workers.send_command(constants.TAG_DICODILE_GET_COST,
verbose=self.verbose)
return recv_cost(self.workers.comm)
def get_z_hat(self):
self.workers.send_command(constants.TAG_DICODILE_GET_Z_HAT,
verbose=self.verbose)
return recv_z_hat(self.workers.comm,
self.D_shape[0],
self.workers_segments)
def get_z_nnz(self):
self.workers.send_command(constants.TAG_DICODILE_GET_Z_NNZ,
verbose=self.verbose)
return recv_z_nnz(self.workers.comm, self.D_shape[0])
def get_sufficient_statistics(self):
self.workers.send_command(
constants.TAG_DICODILE_GET_SUFFICIENT_STAT,
verbose=self.verbose)
return recv_sufficient_statistics(self.workers.comm, self.D_shape)
def release_workers(self):
self.workers.send_command(
constants.TAG_DICODILE_STOP)
def shutdown_workers(self):
self.workers.shutdown_workers()
def check_cost(self, X, D_hat, reg):
cost = self.get_cost()
z_hat = self.get_z_hat()
cost_2 = compute_objective(X, z_hat, D_hat, reg)
assert np.isclose(cost, cost_2), (cost, cost_2)
print("check cost ok", cost, cost_2)
def is_same_signal(self, X):
if not hasattr(self, '_ref_X') or self._ref_X() is not X:
return False
return True
| 38.019108
| 79
| 0.628581
|
c828ef55fcd574dc6365093301b878c63bfb8567
| 8,543
|
py
|
Python
|
tests/components/vera/test_init.py
|
edofullin/core
|
106dc4d28ad59cb192c60fc7a354cafa86899ea4
|
[
"Apache-2.0"
] | 4
|
2020-08-10T20:02:24.000Z
|
2022-01-31T02:14:22.000Z
|
tests/components/vera/test_init.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 78
|
2020-07-23T07:13:08.000Z
|
2022-03-31T06:02:04.000Z
|
tests/components/vera/test_init.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 4
|
2017-01-10T04:17:33.000Z
|
2021-09-02T16:37:24.000Z
|
"""Vera tests."""
from unittest.mock import MagicMock
import pytest
import pyvera as pv
from requests.exceptions import RequestException
from homeassistant.components.vera import (
CONF_CONTROLLER,
CONF_EXCLUDE,
CONF_LIGHTS,
DOMAIN,
)
from homeassistant.config_entries import ENTRY_STATE_NOT_LOADED
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from .common import ComponentFactory, ConfigSource, new_simple_controller_config
from tests.common import MockConfigEntry, mock_registry
async def test_init(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_device1: pv.VeraBinarySensor = MagicMock(spec=pv.VeraBinarySensor)
vera_device1.device_id = 1
vera_device1.vera_device_id = vera_device1.device_id
vera_device1.name = "first_dev"
vera_device1.is_tripped = False
entity1_id = "binary_sensor.first_dev_1"
await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(
config={CONF_CONTROLLER: "http://127.0.0.1:111"},
config_source=ConfigSource.CONFIG_FLOW,
serial_number="first_serial",
devices=(vera_device1,),
),
)
entity_registry = er.async_get(hass)
entry1 = entity_registry.async_get(entity1_id)
assert entry1
assert entry1.unique_id == "vera_first_serial_1"
async def test_init_from_file(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_device1: pv.VeraBinarySensor = MagicMock(spec=pv.VeraBinarySensor)
vera_device1.device_id = 1
vera_device1.vera_device_id = vera_device1.device_id
vera_device1.name = "first_dev"
vera_device1.is_tripped = False
entity1_id = "binary_sensor.first_dev_1"
await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(
config={CONF_CONTROLLER: "http://127.0.0.1:111"},
config_source=ConfigSource.FILE,
serial_number="first_serial",
devices=(vera_device1,),
),
)
entity_registry = er.async_get(hass)
entry1 = entity_registry.async_get(entity1_id)
assert entry1
assert entry1.unique_id == "vera_first_serial_1"
async def test_multiple_controllers_with_legacy_one(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test multiple controllers with one legacy controller."""
vera_device1: pv.VeraBinarySensor = MagicMock(spec=pv.VeraBinarySensor)
vera_device1.device_id = 1
vera_device1.vera_device_id = vera_device1.device_id
vera_device1.name = "first_dev"
vera_device1.is_tripped = False
entity1_id = "binary_sensor.first_dev_1"
vera_device2: pv.VeraBinarySensor = MagicMock(spec=pv.VeraBinarySensor)
vera_device2.device_id = 2
vera_device2.vera_device_id = vera_device2.device_id
vera_device2.name = "second_dev"
vera_device2.is_tripped = False
entity2_id = "binary_sensor.second_dev_2"
# Add existing entity registry entry from previous setup.
entity_registry = mock_registry(hass)
entity_registry.async_get_or_create(
domain="switch", platform=DOMAIN, unique_id="12"
)
await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(
config={CONF_CONTROLLER: "http://127.0.0.1:111"},
config_source=ConfigSource.FILE,
serial_number="first_serial",
devices=(vera_device1,),
),
)
await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(
config={CONF_CONTROLLER: "http://127.0.0.1:222"},
config_source=ConfigSource.CONFIG_FLOW,
serial_number="second_serial",
devices=(vera_device2,),
),
)
entity_registry = er.async_get(hass)
entry1 = entity_registry.async_get(entity1_id)
assert entry1
assert entry1.unique_id == "1"
entry2 = entity_registry.async_get(entity2_id)
assert entry2
assert entry2.unique_id == "vera_second_serial_2"
async def test_unload(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_device1: pv.VeraBinarySensor = MagicMock(spec=pv.VeraBinarySensor)
vera_device1.device_id = 1
vera_device1.vera_device_id = vera_device1.device_id
vera_device1.name = "first_dev"
vera_device1.is_tripped = False
await vera_component_factory.configure_component(
hass=hass, controller_config=new_simple_controller_config()
)
entries = hass.config_entries.async_entries(DOMAIN)
assert entries
for config_entry in entries:
assert await hass.config_entries.async_unload(config_entry.entry_id)
assert config_entry.state == ENTRY_STATE_NOT_LOADED
async def test_async_setup_entry_error(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
def setup_callback(controller: pv.VeraController) -> None:
controller.get_devices.side_effect = RequestException()
controller.get_scenes.side_effect = RequestException()
await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(setup_callback=setup_callback),
)
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_CONTROLLER: "http://127.0.0.1"},
options={},
unique_id="12345",
)
entry.add_to_hass(hass)
assert not await hass.config_entries.async_setup(entry.entry_id)
@pytest.mark.parametrize(
["options"],
[
[{CONF_LIGHTS: [4, 10, 12, "AAA"], CONF_EXCLUDE: [1, "BBB"]}],
[{CONF_LIGHTS: ["4", "10", "12", "AAA"], CONF_EXCLUDE: ["1", "BBB"]}],
],
)
async def test_exclude_and_light_ids(
hass: HomeAssistant, vera_component_factory: ComponentFactory, options
) -> None:
"""Test device exclusion, marking switches as lights and fixing the data type."""
vera_device1: pv.VeraBinarySensor = MagicMock(spec=pv.VeraBinarySensor)
vera_device1.device_id = 1
vera_device1.vera_device_id = 1
vera_device1.name = "dev1"
vera_device1.is_tripped = False
entity_id1 = "binary_sensor.dev1_1"
vera_device2: pv.VeraBinarySensor = MagicMock(spec=pv.VeraBinarySensor)
vera_device2.device_id = 2
vera_device2.vera_device_id = 2
vera_device2.name = "dev2"
vera_device2.is_tripped = False
entity_id2 = "binary_sensor.dev2_2"
vera_device3: pv.VeraSwitch = MagicMock(spec=pv.VeraSwitch)
vera_device3.device_id = 3
vera_device3.vera_device_id = 3
vera_device3.name = "dev3"
vera_device3.category = pv.CATEGORY_SWITCH
vera_device3.is_switched_on = MagicMock(return_value=False)
entity_id3 = "switch.dev3_3"
vera_device4: pv.VeraSwitch = MagicMock(spec=pv.VeraSwitch)
vera_device4.device_id = 4
vera_device4.vera_device_id = 4
vera_device4.name = "dev4"
vera_device4.category = pv.CATEGORY_SWITCH
vera_device4.is_switched_on = MagicMock(return_value=False)
vera_device4.get_brightness = MagicMock(return_value=0)
vera_device4.get_color = MagicMock(return_value=[0, 0, 0])
vera_device4.is_dimmable = True
entity_id4 = "light.dev4_4"
component_data = await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(
config_source=ConfigSource.CONFIG_ENTRY,
devices=(vera_device1, vera_device2, vera_device3, vera_device4),
config={**{CONF_CONTROLLER: "http://127.0.0.1:123"}, **options},
),
)
# Assert the entries were setup correctly.
config_entry = next(iter(hass.config_entries.async_entries(DOMAIN)))
assert config_entry.options[CONF_LIGHTS] == [4, 10, 12]
assert config_entry.options[CONF_EXCLUDE] == [1]
update_callback = component_data.controller_data[0].update_callback
update_callback(vera_device1)
update_callback(vera_device2)
update_callback(vera_device3)
update_callback(vera_device4)
await hass.async_block_till_done()
assert hass.states.get(entity_id1) is None
assert hass.states.get(entity_id2) is not None
assert hass.states.get(entity_id3) is not None
assert hass.states.get(entity_id4) is not None
| 34.035857
| 86
| 0.721292
|
536ed0c53782dfd193aba23e9d9ecf208d008823
| 2,202
|
py
|
Python
|
speech/setup.py
|
omaray/gcloud-python
|
87a13aaa140842111df2f76529a1b9ce4b6d28a6
|
[
"Apache-2.0"
] | null | null | null |
speech/setup.py
|
omaray/gcloud-python
|
87a13aaa140842111df2f76529a1b9ce4b6d28a6
|
[
"Apache-2.0"
] | null | null | null |
speech/setup.py
|
omaray/gcloud-python
|
87a13aaa140842111df2f76529a1b9ce4b6d28a6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import find_packages
from setuptools import setup
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj:
README = file_obj.read()
# NOTE: This is duplicated throughout and we should try to
# consolidate.
SETUP_BASE = {
'author': 'Google Cloud Platform',
'author_email': 'jjg+google-cloud-python@google.com',
'scripts': [],
'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',
'license': 'Apache 2.0',
'platforms': 'Posix; MacOS X; Windows',
'include_package_data': True,
'zip_safe': False,
'classifiers': [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet',
],
}
REQUIREMENTS = [
'google-cloud-core >= 0.20.0',
'gapic-google-cloud-speech-v1beta1 >= 0.11.1, < 0.12.0',
'grpc-google-cloud-speech-v1beta1 >= 0.11.1, < 0.12.0',
]
setup(
name='google-cloud-speech',
version='0.20.0dev',
description='Python Client for Google Cloud Speech',
long_description=README,
namespace_packages=[
'google',
'google.cloud',
],
packages=find_packages(),
install_requires=REQUIREMENTS,
**SETUP_BASE
)
| 31.014085
| 74
| 0.66485
|
8e352b73dbab2896ca5f0681999fc552fd149c6a
| 766
|
py
|
Python
|
IQR.py
|
Elonisme/Bigdates
|
55459fe060d2ce0696fd9df43e0a07fc21550bd7
|
[
"Apache-2.0"
] | null | null | null |
IQR.py
|
Elonisme/Bigdates
|
55459fe060d2ce0696fd9df43e0a07fc21550bd7
|
[
"Apache-2.0"
] | null | null | null |
IQR.py
|
Elonisme/Bigdates
|
55459fe060d2ce0696fd9df43e0a07fc21550bd7
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import scipy.stats
class IQR:
def Calculate_IQR(selfs):
Q1 = scipy.stats.norm(0,1).ppf(0.25)
Q3 = scipy.stats.norm(0,1).ppf(0.75)
Upperfence = scipy.stats.norm(0,1).cdf(Q3+1.5*(Q3-Q1))
Lowerfence = scipy.stats.norm(0,1).cdf(Q1-1.5*(Q3-Q1))
probUL = round(Upperfence-Lowerfence,4)
probOutLiers = 1-probUL
print(u'Q1-μ= %.4f\u03C3,Q3-μ=%.4f'%(Q1,Q3))
print(u'IQR = Q3-Q1= %.4f\u03C3'%(Q3-Q1))
print(u'Q3+1.5xIQR-μ=%.4f\u03C3'%(Q3+1.5*(Q3-Q1)))
print(u'Q1-1.5xIQR-μ=%.4fu03C3'%(Q1-1.5*Q3-Q1))
print(u'P(Q1-1.5xIPR<x<Q3+1.5xIQR)=%.4f'%(probUL))
print(u'在上下限之外的概率=%.4f%%'%(100*probOutLiers))
if __name__=='__main__':
I = IQR()
I.Calculate_IQR()
| 34.818182
| 62
| 0.570496
|
9889e66d6a69e3ab731ccf0f87ca35bb9ce918bd
| 3,788
|
py
|
Python
|
src/regnet/data/kitti/image_rescale.py
|
markrofail/multi-modal-deep-learning-for-vehicle-sensor-data-abstraction-and-attack-detection
|
2f252c072f3091bb27506978dd90311f7f82f386
|
[
"MIT"
] | null | null | null |
src/regnet/data/kitti/image_rescale.py
|
markrofail/multi-modal-deep-learning-for-vehicle-sensor-data-abstraction-and-attack-detection
|
2f252c072f3091bb27506978dd90311f7f82f386
|
[
"MIT"
] | 6
|
2020-09-25T22:41:00.000Z
|
2021-06-08T21:50:37.000Z
|
src/regnet/data/kitti/image_rescale.py
|
markrofail/multi-modal-deep-learning-for-vehicle-sensor-data-abstraction-and-attack-detection
|
2f252c072f3091bb27506978dd90311f7f82f386
|
[
"MIT"
] | null | null | null |
import multiprocessing
from pathlib import Path
import numpy as np
from joblib import Parallel, delayed
from PIL import Image as im
from scipy.stats import entropy as entropy_helper
from tqdm import tqdm
from src.helpers import paths
from src.helpers.flags import Verbose
config = paths.config.read(paths.config.regnet())
IMAGE_SIZE = config['DATA_INFORMATION']['IMAGE_SIZE']
FAKE_KITTI_SIZE = (1392, 512)
REAL_KITTI_SIZE = (1242, 375)
OLD_SCALE_DIM = (1696, 512)
CALC_SCALE_DIM = (1216, 367)
SCALE_DIM = CALC_SCALE_DIM
# SCALE_DIM = OLD_SCALE_DIM
def entropy(image):
rgbHistogram = np.array(image.histogram())
print(np.prod(rgbHistogram.shape))
if np.prod(rgbHistogram.shape) <= 256:
rgbHistogram = np.tile(rgbHistogram, 3)
rgbHistogram = np.reshape(rgbHistogram, (3, 256)).astype(float)
ent = np.zeros((3, 1), dtype=float)
for i in range(3):
total = np.sum(rgbHistogram[i])
rgbHistogram[i] = np.divide(rgbHistogram[i], total)
ent[i] = entropy_helper(rgbHistogram[i], base=2)
ent = np.average(ent)
return ent
def _get_all_depth_frames(frames):
return [str(paths.depth.raw_frame(*frame_info)) for frame_info in frames]
def _get_all_camera_frames(frames):
return [str(paths.rgb.external_frame(*frame_info)) for frame_info in frames]
def _generate_output_paths(image_paths):
image_paths = np.array(image_paths)
replace_dir = np.vectorize(lambda x: x.replace('external', 'interim'))
output_paths = replace_dir(image_paths)
replace_dir = np.vectorize(lambda x: x.replace('raw', 'interim'))
output_paths = replace_dir(output_paths)
replace_dir = np.vectorize(lambda x: x.replace('image_02/data', 'rgb'))
output_paths = replace_dir(output_paths)
replace_dir = np.vectorize(lambda x: x.replace('depth_maps', 'depth'))
output_paths = replace_dir(output_paths)
return np.column_stack((image_paths, output_paths))
def preproccess_image(input_path, output_path, scale_dim=SCALE_DIM, final_dim=IMAGE_SIZE,
verbose=Verbose.NORMAL, keep=False):
input_path, output_path = Path(input_path), Path(output_path)
if not input_path.exists():
return
img = im.open(input_path)
# scaling the image
scaled_img = img.resize(scale_dim, resample=im.NEAREST)
# cropping the image
old_width, old_height = scale_dim
new_width, new_height = final_dim
bounds = ((old_width - new_width) // 2, (old_height - new_height) // 2,
(old_width + new_width) // 2, (old_height + new_height) // 2)
croped_img = scaled_img.crop(bounds)
output_path.parent.mkdir(exist_ok=True, parents=True) # ensure directory exists
croped_img.save(str(output_path), format='PNG')
if verbose == Verbose.DEBUG:
ent1 = entropy(img)
ent2 = entropy(croped_img)
diff = 100 - int(ent1 / ent2 * 100)
print('{}% information lost'.format(diff))
if not keep:
external_path = paths.DATA_EXTERNAL_PATH
if input_path.exists() and external_path not in input_path.parents:
input_path.unlink()
def preproccess_frames(frames, verbose=Verbose.NORMAL, keep=False):
input_paths = list()
# grab all depthmap paths
depth_frames = _get_all_depth_frames(frames)
input_paths.extend(depth_frames)
# grab all rgb images paths
camera_frames = _get_all_camera_frames(frames)
input_paths.extend(camera_frames)
# gnerate ouputpath corresponding to each inputpath
image_paths = _generate_output_paths(input_paths)
if verbose > Verbose.SILENT:
info = '# processing images ' # for logging purposes
image_paths = tqdm(image_paths, ascii=True, desc=info)
n_jobs = multiprocessing.cpu_count() // 2
Parallel(n_jobs=n_jobs)(
delayed(preproccess_image)(input_path, output_path, verbose=verbose, keep=keep)
for input_path, output_path in image_paths)
| 30.548387
| 89
| 0.734952
|
f8731136926d5b5bb762c6ae855de2811f1a2c7a
| 2,288
|
py
|
Python
|
stashed/_appindicator.py
|
PlayerG9/PySysTray
|
7bd98fbc75c815551edbf27efa031cad8643ecdd
|
[
"MIT"
] | 1
|
2022-01-13T21:51:04.000Z
|
2022-01-13T21:51:04.000Z
|
stashed/_appindicator.py
|
PlayerG9/PySysTray
|
7bd98fbc75c815551edbf27efa031cad8643ecdd
|
[
"MIT"
] | null | null | null |
stashed/_appindicator.py
|
PlayerG9/PySysTray
|
7bd98fbc75c815551edbf27efa031cad8643ecdd
|
[
"MIT"
] | null | null | null |
# coding=utf-8
r"""
pystray
Copyright (C) 2021 PlayerG9
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option) any
later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
try:
gi.require_version('AppIndicator3', '0.1')
from gi.repository import AppIndicator3 as AppIndicator
except ValueError:
gi.require_version('AyatanaAppIndicator3', '0.1')
from gi.repository import AyatanaAppIndicator3 as AppIndicator
from ._util.gtk import GtkIcon, mainloop
from . import _base
class Icon(GtkIcon):
# We expand the menu on primary button click, and we do not even support
# empty menus
HAS_DEFAULT_ACTION = False
def __init__(self, *args, **kwargs):
super(Icon, self).__init__(*args, **kwargs)
self._appindicator = None
if self.icon:
self._update_icon()
@mainloop
def _show(self):
self._appindicator = AppIndicator.Indicator.new(
self.name,
'',
AppIndicator.IndicatorCategory.APPLICATION_STATUS)
self._appindicator.set_status(AppIndicator.IndicatorStatus.ACTIVE)
self._appindicator.set_icon(self._icon_path)
self._appindicator.set_menu(
self._menu_handle or self._create_default_menu())
@mainloop
def _hide(self):
self._appindicator = None
@mainloop
def _update_icon(self):
self._remove_fs_icon()
self._update_fs_icon()
if self._appindicator:
self._appindicator.set_icon(self._icon_path)
@mainloop
def _update_title(self):
self._appindicator.set_title(self.title)
def _finalize(self):
super(Icon, self)._finalize()
del self._appindicator
| 29.333333
| 77
| 0.706731
|
0831dab47fc59cac578c67ad57ae418e88ca6388
| 5,361
|
py
|
Python
|
spirit/extra/project_template/project_name/settings/base.py
|
xingetouzi/Spirit
|
0d298f0b608c3f3a2dbbc152a8da042eef6d2e05
|
[
"MIT"
] | 1
|
2020-12-08T01:09:30.000Z
|
2020-12-08T01:09:30.000Z
|
spirit/extra/project_template/project_name/settings/base.py
|
xingetouzi/Spirit
|
0d298f0b608c3f3a2dbbc152a8da042eef6d2e05
|
[
"MIT"
] | 1
|
2018-03-26T07:11:52.000Z
|
2018-03-26T07:11:52.000Z
|
spirit/extra/project_template/project_name/settings/base.py
|
xingetouzi/Spirit
|
0d298f0b608c3f3a2dbbc152a8da042eef6d2e05
|
[
"MIT"
] | 6
|
2018-06-25T02:17:53.000Z
|
2020-12-08T01:09:32.000Z
|
# -*- coding: utf-8 -*-
"""
Django settings for {{ project_name }} project.
Generated by 'django-admin startproject' using Django {{ django_version }}.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
from __future__ import unicode_literals
import os
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'spirit.core',
'spirit.admin',
'spirit.search',
'spirit.user',
'spirit.user.admin',
'spirit.user.auth',
'spirit.category',
'spirit.category.admin',
'spirit.topic',
'spirit.topic.admin',
'spirit.topic.favorite',
'spirit.topic.moderate',
'spirit.topic.notification',
'spirit.topic.poll', # todo: remove in Spirit v0.6
'spirit.topic.private',
'spirit.topic.unread',
'spirit.comment',
'spirit.comment.bookmark',
'spirit.comment.flag',
'spirit.comment.flag.admin',
'spirit.comment.history',
'spirit.comment.like',
'spirit.comment.poll',
'djconfig',
'haystack',
]
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'spirit.core.middleware.XForwardedForMiddleware',
'spirit.user.middleware.TimezoneMiddleware',
'spirit.user.middleware.LastIPMiddleware',
'spirit.user.middleware.LastSeenMiddleware',
'spirit.user.middleware.ActiveUserMiddleware',
'spirit.core.middleware.PrivateForumMiddleware',
'djconfig.middleware.DjConfigMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
'djconfig.context_processors.config',
],
},
},
]
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'spirit_cache',
},
'st_rate_limit': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'spirit_rl_cache',
'TIMEOUT': None
}
}
AUTHENTICATION_BACKENDS = [
'spirit.user.auth.backends.UsernameAuthBackend',
'spirit.user.auth.backends.EmailAuthBackend',
]
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(BASE_DIR, 'st_search'),
},
}
ROOT_URLCONF = '{{ project_name }}.urls'
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
LOGIN_URL = 'spirit:user:auth:login'
LOGIN_REDIRECT_URL = 'spirit:user:update'
# Internationalization
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Send an email to the site admins
# on error when DEBUG=False,
# log to console on error always.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'class': 'django.utils.log.AdminEmailHandler',
'filters': ['require_debug_false'],
},
'console': {
'class': 'logging.StreamHandler',
},
'file': {
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'django.log'),
},
},
'loggers': {
'': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
}
}
| 27.492308
| 86
| 0.641671
|
7d0cbaf7d7eb01d3365004b46f88713faaacb07b
| 438
|
py
|
Python
|
kernel/__init__.py
|
omarsou/kernel_method_kaggle_challenge
|
0f2e85166112b231699d9c9f7e3ae894e5ff7766
|
[
"Apache-2.0"
] | 1
|
2021-03-26T08:40:07.000Z
|
2021-03-26T08:40:07.000Z
|
kernel/__init__.py
|
omarsou/kernel_method_kaggle_challenge
|
0f2e85166112b231699d9c9f7e3ae894e5ff7766
|
[
"Apache-2.0"
] | null | null | null |
kernel/__init__.py
|
omarsou/kernel_method_kaggle_challenge
|
0f2e85166112b231699d9c9f7e3ae894e5ff7766
|
[
"Apache-2.0"
] | null | null | null |
from kernel.local_alignment_kernel import LocalAlignmentKernel
from kernel.spectrum_kernel import SpectrumKernel, SumSpectrumKernel
from kernel.substring_kernel import SubstringKernel
from kernel.base_kernel import KernelIPImplicit, KernelIPExplicit, SumKernelIPExplicit
__all__ = ["LocalAlignmentKernel", "SpectrumKernel", "SubstringKernel", "KernelIPImplicit",
"KernelIPExplicit", "SumKernelIPExplicit", "SumSpectrumKernel"]
| 62.571429
| 91
| 0.842466
|
7d0424d33e2eb7d4e5540ab4e5e895a5f07c6779
| 2,011
|
py
|
Python
|
frame_grabber.py
|
brix4dayz/TRiCAM2.0
|
716f154403c8c0aa903d7391bf4c14d45c778a22
|
[
"MIT"
] | 1
|
2015-08-11T20:50:36.000Z
|
2015-08-11T20:50:36.000Z
|
frame_grabber.py
|
brix4dayz/TRiCAM2.0
|
716f154403c8c0aa903d7391bf4c14d45c778a22
|
[
"MIT"
] | null | null | null |
frame_grabber.py
|
brix4dayz/TRiCAM2.0
|
716f154403c8c0aa903d7391bf4c14d45c778a22
|
[
"MIT"
] | null | null | null |
"""
frame_grabber.py
>>> python frame_grabber.py [from dir] [to dir] [frame capture rate]
"""
import hpidol as hp
import cv2, sys, shutil
import numpy as np
import scipy.misc, os
import pandas as pd
from collections import Counter
from PIL import Image
def post_img(image):
return hp.recognize_logos(image)
def get_logos(job_id):
return hp.get_logos_result(job_id)
def do_videos(from_dir, to_dir, save_time = 1):
for video_name in os.listdir(from_dir):
csv_file = to_dir + "\\" + video_name[:-4] + ".csv"
if not os.path.isfile(csv_file):
f = open(csv_file, 'w')
f.write("video_time,job_id\n")
video_file = from_dir + "/" + video_name
cap = cv2.VideoCapture(video_file)
seconds_from_start = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
video_time = cap.get(cv2.cv.CV_CAP_PROP_POS_MSEC)
if ((video_time/1000) - seconds_from_start) > 1:
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#frame = cv2.equalizeHist(frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
scipy.misc.imsave("temp.jpg",frame)
#frame = Image.open("temp.jpg")
job_id = post_img(open("temp.jpg", 'rb'))
os.remove("temp.jpg")
f.write(str(video_time/1000) + "," + str(job_id['jobID']) + "\n")
seconds_from_start += save_time
f.close()
def get_logos_matrix(from_dir, to_file):
row_names = []
for csv in os.listdir(from_dir):
row_names.append(csv[:-4])
master_frame = pd.DataFrame(index = row_names)
for csv in os.listdir(from_dir):
csv_file = from_dir + "/" + csv
df = pd.read_csv(csv_file)
found_logos = []
for item in df["job_id"]:
logo = get_logos(item)
if (logo is not None) and logo != []:
print logo[0]
found_logos.append(logo[0])
for item in found_logos:
if item not in master_frame:
master_frame[item] = 0
master_frame[item][csv[:-4]] = int(master_frame[item][csv[:-4]]) + 1
master_frame.to_csv(to_file)
return pd.DataFrame.to_dict(master_frame)
| 25.782051
| 71
| 0.671308
|
7bdd1e0032319806b6c7e8699633bb3daf220ab1
| 5,855
|
py
|
Python
|
django/contrib/databrowse/plugins/calendars.py
|
Miserlou/django
|
35ddeee45573de57ae3c791bf36496b4a7028ddf
|
[
"BSD-3-Clause"
] | 2
|
2015-08-03T22:04:14.000Z
|
2015-08-03T22:04:16.000Z
|
django/contrib/databrowse/plugins/calendars.py
|
econchick/django
|
86c5c0154f69728eba4aad6204621f07cdd3459d
|
[
"BSD-3-Clause"
] | null | null | null |
django/contrib/databrowse/plugins/calendars.py
|
econchick/django
|
86c5c0154f69728eba4aad6204621f07cdd3459d
|
[
"BSD-3-Clause"
] | 1
|
2019-07-15T02:35:16.000Z
|
2019-07-15T02:35:16.000Z
|
from __future__ import unicode_literals
from django import http
from django.db import models
from django.contrib.databrowse.datastructures import EasyModel
from django.contrib.databrowse.sites import DatabrowsePlugin
from django.shortcuts import render_to_response
from django.utils.html import format_html, format_html_join
from django.utils.text import capfirst
from django.utils.encoding import force_unicode
from django.views.generic import dates
from django.utils import datetime_safe
class DateViewMixin(object):
allow_empty = False
allow_future = True
root_url = None
model = None
field = None
def get_context_data(self, **kwargs):
context = super(DateViewMixin, self).get_context_data(**kwargs)
context.update({
'root_url': self.root_url,
'model': self.model,
'field': self.field
})
return context
class DayView(DateViewMixin, dates.DayArchiveView):
template_name = 'databrowse/calendar_day.html'
class MonthView(DateViewMixin, dates.MonthArchiveView):
template_name = 'databrowse/calendar_month.html'
class YearView(DateViewMixin, dates.YearArchiveView):
template_name = 'databrowse/calendar_year.html'
class IndexView(DateViewMixin, dates.ArchiveIndexView):
template_name = 'databrowse/calendar_main.html'
class CalendarPlugin(DatabrowsePlugin):
def __init__(self, field_names=None):
self.field_names = field_names
def field_dict(self, model):
"""
Helper function that returns a dictionary of all DateFields or
DateTimeFields in the given model. If self.field_names is set, it takes
take that into account when building the dictionary.
"""
if self.field_names is None:
return dict([(f.name, f) for f in model._meta.fields if isinstance(f, models.DateField)])
else:
return dict([(f.name, f) for f in model._meta.fields if isinstance(f, models.DateField) and f.name in self.field_names])
def model_index_html(self, request, model, site):
fields = self.field_dict(model)
if not fields:
return ''
return format_html('<p class="filter"><strong>View calendar by:</strong> {0}</p>',
format_html_join(', ', '<a href="calendars/{0}/">{1}</a>',
((f.name, force_unicode(capfirst(f.verbose_name))) for f in fields.values())))
def urls(self, plugin_name, easy_instance_field):
if isinstance(easy_instance_field.field, models.DateField):
d = easy_instance_field.raw_value
return ['%s%s/%s/%s/%s/%s/' % (
easy_instance_field.model.url(),
plugin_name, easy_instance_field.field.name,
str(d.year),
datetime_safe.new_date(d).strftime('%b').lower(),
d.day)]
def model_view(self, request, model_databrowse, url):
self.model, self.site = model_databrowse.model, model_databrowse.site
self.fields = self.field_dict(self.model)
# If the model has no DateFields, there's no point in going further.
if not self.fields:
raise http.Http404('The requested model has no calendars.')
if url is None:
return self.homepage_view(request)
url_bits = url.split('/')
if url_bits[0] in self.fields:
return self.calendar_view(request, self.fields[url_bits[0]], *url_bits[1:])
raise http.Http404('The requested page does not exist.')
def homepage_view(self, request):
easy_model = EasyModel(self.site, self.model)
field_list = self.fields.values()
field_list.sort(key=lambda k:k.verbose_name)
return render_to_response('databrowse/calendar_homepage.html', {
'root_url': self.site.root_url,
'model': easy_model,
'field_list': field_list
})
def calendar_view(self, request, field, year=None, month=None, day=None):
easy_model = EasyModel(self.site, self.model)
root_url = self.site.root_url
if day is not None:
return DayView.as_view(
year=year, month=month, day=day,
date_field=field.name,
queryset=easy_model.get_query_set(),
root_url=root_url,
model=easy_model,
field=field
)(request)
elif month is not None:
return MonthView.as_view(
year=year, month=month,
date_field=field.name,
queryset=easy_model.get_query_set(),
root_url=root_url,
model=easy_model,
field=field
)(request)
elif year is not None:
return YearView.as_view(
year=year,
date_field=field.name,
queryset=easy_model.get_query_set(),
root_url=root_url,
model=easy_model,
field=field
)(request)
else:
return IndexView.as_view(
date_field=field.name,
queryset=easy_model.get_query_set(),
root_url=root_url,
model=easy_model,
field=field
)(request)
assert False, ('%s, %s, %s, %s' % (field, year, month, day))
| 39.560811
| 132
| 0.570111
|
c5d235a4bc7f8a2aa2247a25e8a3f9e53b5e53cc
| 2,453
|
py
|
Python
|
docs/conf.py
|
Golman-Rahmanifar/desdeo-problem
|
ed2ab8d2d1d831692c21ec7956dd9380a3dc45aa
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
Golman-Rahmanifar/desdeo-problem
|
ed2ab8d2d1d831692c21ec7956dd9380a3dc45aa
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
Golman-Rahmanifar/desdeo-problem
|
ed2ab8d2d1d831692c21ec7956dd9380a3dc45aa
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
sys.path.insert(0, os.path.abspath("../desdeo_problem"))
# -- Project information -----------------------------------------------------
project = "desdeo_problem"
copyright = "2020, Multiobjective Optimization Group"
author = "Multiobjective Optimization Group"
# The full version, including alpha/beta/rc tags
release = "1.0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints",
"sphinx.ext.autosummary",
"sphinx.ext.autosectionlabel",
"sphinx_automodapi.automodapi",
"sphinx.ext.graphviz",
"sphinx.ext.viewcode",
"recommonmark",
"nbsphinx",
"sphinx.ext.mathjax",
]
numpydoc_show_class_members = False
source_suffix = [".rst", ".md"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_sourcelink_suffix = ""
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
master_doc = "index"
| 32.706667
| 79
| 0.674684
|
987d340cc7ccfdbc45ff4766c970e8b01b722c80
| 7,334
|
py
|
Python
|
garminexport/backup.py
|
Wave89/garminexport
|
248de4b26ed63bac4477044c4ec2a382eaea98ed
|
[
"Apache-2.0"
] | null | null | null |
garminexport/backup.py
|
Wave89/garminexport
|
248de4b26ed63bac4477044c4ec2a382eaea98ed
|
[
"Apache-2.0"
] | null | null | null |
garminexport/backup.py
|
Wave89/garminexport
|
248de4b26ed63bac4477044c4ec2a382eaea98ed
|
[
"Apache-2.0"
] | null | null | null |
"""Module with methods useful when backing up activities.
"""
import codecs
import json
import logging
import os
from datetime import datetime
log = logging.getLogger(__name__)
supported_export_formats = ["json_summary", "json_details", "gpx", "tcx", "fit"]
"""The range of supported export formats for activities."""
format_suffix = {
"json_summary": "_summary.json",
"json_details": "_details.json",
"gpx": ".gpx",
"tcx": ".tcx",
"fit": ".fit"
}
"""A table that maps export formats to their file format extensions."""
not_found_file = ".not_found"
"""A file that lists all tried but failed export attempts. The lines in
the file are the would-have-been file names, had the exports been successful.
An entry in the ``.not_found`` file is a strong indication of an
activity-format that simply doesn't exist and therefore should not be retried
on the next backup run. One such scenario is for manually created activities,
which cannot be exported to ``.fit`` format."""
def export_filename(activity, export_format):
"""Returns a destination file name to use for a given activity that is
to be exported to a given format. Exported files follow this pattern:
``<timestamp>_<activity_id>_<suffix>``.
For example: ``2015-02-17T05:45:00+00:00_123456789.tcx``
:param activity: An activity tuple `(id, starttime)`
:type activity: tuple of `(int, datetime)`
:param export_format: The export format (see :attr:`export_formats`)
:type export_format: str
:return: The file name to use for the exported activity.
:rtype: str
"""
fn = "{time}_{id}{suffix}".format(
id=activity[0],
time=activity[1].isoformat(),
suffix=format_suffix[export_format])
return fn.replace(':', '_') if os.name == 'nt' else fn
def need_backup(activities, backup_dir, export_formats=None):
"""From a given set of activities, return all activities that haven't been
backed up in a given set of export formats.
Activities are considered already backed up if they, for each desired
export format, have an activity file under the ``backup_dir`` *or*
if the activity file is listed in the ``.not_found`` file in the backup
directory.
:param activities: A list of activity tuples `(id, starttime)`
:type activities: list of tuples of `(int, datetime)`
:param backup_dir: Destination directory for exported activities.
:type backup_dir: str
:keyword export_formats: Which format(s) to export to. Could be any
of: 'json_summary', 'json_details', 'gpx', 'tcx', 'fit'.
:type export_formats: list of str
:return: All activities that need to be backed up.
:rtype: set of tuples of `(int, datetime)`
"""
need_backup = set()
backed_up = os.listdir(backup_dir) + _not_found_activities(backup_dir)
# get all activities missing at least one export format
for activity in activities:
activity_files = [export_filename(activity, f) for f in export_formats]
if any(f not in backed_up for f in activity_files):
need_backup.add(activity)
return need_backup
def _not_found_activities(backup_dir):
# consider all entries in <backup_dir>/.not_found as backed up
# (or rather, as tried but failed back ups)
failed_activities = []
_not_found = os.path.join(backup_dir, not_found_file)
if os.path.isfile(_not_found):
with open(_not_found, mode="r") as f:
failed_activities = [line.strip() for line in f.readlines()]
log.debug("%d tried but failed activities in %s", len(failed_activities), _not_found)
return failed_activities
def download(client, activity, retryer, backup_dir, export_formats=None):
"""Exports a Garmin Connect activity to a given set of formats
and saves the resulting file(s) to a given backup directory.
In case a given format cannot be exported for the activity, the
file name will be appended to the :attr:`not_found_file` in the
backup directory (to prevent it from being retried on subsequent
backup runs).
:param client: A :class:`garminexport.garminclient.GarminClient`
instance that is assumed to be connected.
:type client: :class:`garminexport.garminclient.GarminClient`
:param activity: An activity tuple `(id, starttime)`
:type activity: tuple of `(int, datetime)`
:param retryer: A :class:`garminexport.retryer.Retryer` instance that
will handle failed download attempts.
:type retryer: :class:`garminexport.retryer.Retryer`
:param backup_dir: Backup directory path (assumed to exist already).
:type backup_dir: str
:keyword export_formats: Which format(s) to export to. Could be any
of: 'json_summary', 'json_details', 'gpx', 'tcx', 'fit'.
:type export_formats: list of str
"""
id = activity[0]
if 'json_summary' in export_formats:
log.debug("getting json summary for %s", id)
activity_summary = retryer.call(client.get_activity_summary, id)
dest = os.path.join(
backup_dir, export_filename(activity, 'json_summary'))
with codecs.open(dest, encoding="utf-8", mode="w") as f:
f.write(json.dumps(activity_summary, ensure_ascii=False, indent=4))
if 'json_details' in export_formats:
log.debug("getting json details for %s", id)
activity_details = retryer.call(client.get_activity_details, id)
dest = os.path.join(backup_dir, export_filename(activity, 'json_details'))
with codecs.open(dest, encoding="utf-8", mode="w") as f:
f.write(json.dumps(activity_details, ensure_ascii=False, indent=4))
not_found_path = os.path.join(backup_dir, not_found_file)
with open(not_found_path, mode="a") as not_found:
if 'gpx' in export_formats:
if not os.path.isfile('./backup/'+export_filename(activity, 'gpx')):
log.debug("getting gpx for %s", id)
activity_gpx = retryer.call(client.get_activity_gpx, id)
dest = os.path.join(backup_dir, export_filename(activity, 'gpx'))
if activity_gpx is None:
not_found.write(os.path.basename(dest) + "\n")
else:
with codecs.open(dest, encoding="utf-8", mode="w") as f:
f.write(activity_gpx)
else:
log.debug("file already exist gpx for %s", id)
if 'tcx' in export_formats:
log.debug("getting tcx for %s", id)
activity_tcx = retryer.call(client.get_activity_tcx, id)
dest = os.path.join(backup_dir, export_filename(activity, 'tcx'))
if activity_tcx is None:
not_found.write(os.path.basename(dest) + "\n")
else:
with codecs.open(dest, encoding="utf-8", mode="w") as f:
f.write(activity_tcx)
if 'fit' in export_formats:
log.debug("getting fit for %s", id)
activity_fit = retryer.call(client.get_activity_fit, id)
dest = os.path.join(
backup_dir, export_filename(activity, 'fit'))
if activity_fit is None:
not_found.write(os.path.basename(dest) + "\n")
else:
with open(dest, mode="wb") as f:
f.write(activity_fit)
| 43.141176
| 89
| 0.663621
|
cf01d655e9bc9ff3305d3daf9e8af5668c611fdf
| 2,626
|
py
|
Python
|
lib/toute/utils/payload/filters.py
|
luiscastilho/toute
|
56207d3d92b8bc1066e115c285cf79f96be3e249
|
[
"MIT"
] | 3
|
2021-01-04T02:12:33.000Z
|
2021-09-22T14:41:39.000Z
|
lib/toute/utils/payload/filters.py
|
luiscastilho/toute
|
56207d3d92b8bc1066e115c285cf79f96be3e249
|
[
"MIT"
] | 6
|
2020-08-04T22:34:12.000Z
|
2020-10-10T12:40:14.000Z
|
lib/toute/utils/payload/filters.py
|
luiscastilho/toute
|
56207d3d92b8bc1066e115c285cf79f96be3e249
|
[
"MIT"
] | 1
|
2020-08-04T21:59:43.000Z
|
2020-08-04T21:59:43.000Z
|
from toute.utils.payload.meta import BaseFilterQuery, MetaFilterQuery
from toute.utils.payload.exception import NoFilter
from six import with_metaclass
FILTERS = {
'and_': ['_filter'],
'bool': {
'kwargs': ({('must', 'must_not', 'should'): ['_filter']},)
},
'exists': {
'args': ('field',)
},
'geo_bounding_box': {
'field': True,
'kwargs': ('top_left', 'bottom_right')
},
'geo_distance': {
'field': True,
'kwargs': ('lat', 'lon')
},
'geo_distance_range': {
'field': True,
'kwargs': ('lat', 'lon')
},
'geo_polygon': {
'field': True,
'args': ({'points': []},)
},
'geo_shape': {
'field': True,
'kwargs': ('type', {'coordinates': []}),
'field_process': lambda q: {'shape': q}
},
'geohash_shell': {
'field': True,
'kwargs': ('lat', 'lon',)
},
'has_child': {
'args': ('type',),
'kwargs': ({'query': '_query', 'filter': '_filter'},)
},
'has_parent': {
'args': ('parent_type',),
'kwargs': ({'query': '_query', 'filter': '_filter'},)
},
'ids': {
'args': ({'values': []},),
'kwargs': ('type',)
},
'indices': {
'args': ({'indices': []},),
'kwargs': ({('filter', 'no_match_filter'): '_filter'},)
},
'limit': {
'args': ('value',)
},
'match_all': {},
'missing': {
'args': ('field',)
},
'nested': {
'args': ('path', {'filter': '_filter'}),
},
'not_': {
'kwargs': ({'query': '_query', 'filter': '_filter'},)
},
'or_': ['_filter'],
'prefix': {
'field': True,
'args': ('value',)
},
'range': {
'field': True,
'kwargs': ('gte', 'gt', 'lte', 'lt')
},
'regexp': {
'field': True,
'args': ('value',),
'kwargs': ('flags', 'max_determinized_states')
},
'script': {
'args': ('script',)
},
'term': {
'field': True,
'args': ('value',)
},
'terms': {
'field': True,
'value_only': True,
'args': ({'value': []},)
},
'type': {
'args': ('value',)
}
}
class Filter(with_metaclass(MetaFilterQuery, BaseFilterQuery)):
_ee_type = 'filter'
_definitions = FILTERS
_exception = NoFilter
@classmethod
def query(cls, query, cache=False):
if cache:
return cls('fquery', {
'query': query,
'_cache': True
})
else:
return cls('query', query)
| 22.834783
| 69
| 0.436786
|
432801469906a143c19b7d124c3c05e063e8ab94
| 9,913
|
py
|
Python
|
hydrogit/compilation.py
|
gydrogen/hydrogen
|
6c448b67471ce2bbef12a36a0182b58ac56a7da3
|
[
"MIT"
] | null | null | null |
hydrogit/compilation.py
|
gydrogen/hydrogen
|
6c448b67471ce2bbef12a36a0182b58ac56a7da3
|
[
"MIT"
] | null | null | null |
hydrogit/compilation.py
|
gydrogen/hydrogen
|
6c448b67471ce2bbef12a36a0182b58ac56a7da3
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import subprocess
import os
import fileinput
import re
import shutil
cmake_utils_dir = (Path(__file__).parent /
'llvm-ir-cmake-utils' / 'cmake').resolve()
assert cmake_utils_dir.exists()
hydrogit_target_tag = '_hydrogit'
class CompileManager:
def __init__(self, language, tmp):
self.language = language
self.tmp = tmp
self.versions_built = []
def build_all(self, verbose, with_cmake, rule):
'''
Run compilation step for all versions.
'''
for version_path in Path(self.tmp).iterdir():
if version_path.is_dir() and version_path.name != "cloned":
ver = Version(version_path, self.language)
try:
if with_cmake:
ver.build_cmake(verbose)
else:
ver.build_make(verbose, rule)
except Exception as msg:
print(f'{ver.version}: Error({msg}) - skipping')
continue
print(f'{ver.version}: Built successfully')
self.versions_built.append(ver)
assert len(self.versions_built) > 0, \
'No versions built'
class Version:
def __init__(self, root, language):
assert root.exists()
self.root = root
self.version = self.root.stem
self.build_path = self.root / './build'
self.cmake_path = self.root / '.'
self.llvm_ir_path = self.build_path / 'llvm-ir'
self.c_paths = []
self.bc_paths = []
self.language = language
def build_make(self, verbose, rule):
print('Hydrogit cleaning...')
subprocess.run(['rm', '*.bc'],
stdout=None if verbose else subprocess.DEVNULL,
stderr=None if verbose else subprocess.DEVNULL,
cwd=self.root)
subprocess.run(['make', 'clean'],
stdout=None if verbose else subprocess.DEVNULL,
stderr=None if verbose else subprocess.DEVNULL,
cwd=self.root)
print('Hydrogit configuring')
configure_proc = subprocess.run(
['bash', 'configure'],
stdout=None if verbose else subprocess.DEVNULL,
stderr=None if verbose else subprocess.DEVNULL,
cwd=self.root
)
assert configure_proc.returncode == 0, \
f'configure returned error code {configure_proc.returncode}'
print('Hydrogit configuring again')
subprocess.run(['rm', '*.bc'],
stdout=None if verbose else subprocess.DEVNULL,
stderr=None if verbose else subprocess.DEVNULL,
cwd=self.root)
subprocess.run(['make', 'clean'],
stdout=None if verbose else subprocess.DEVNULL,
stderr=None if verbose else subprocess.DEVNULL,
cwd=self.root)
print('Hydrogit running make')
make_proc = subprocess.run([
'make',
rule,
'CC=clang',
'CPPFLAGS=-O0 -Xclang -disable-O0-optnone -g -flto',
'LDFLAGS=-flto -fuse-ld=lld -Wl,-save-temps'
],
stdout=None if verbose else subprocess.DEVNULL,
stderr=None if verbose else subprocess.DEVNULL,
cwd=self.root)
assert make_proc.returncode == 0, \
f'make returned error code {make_proc.returncode}'
# Invoke llvm-dis
filename = next(self.root.glob('**/*.precodegen.bc'), None)
assert filename, \
f'no intermediate found in {self.root}'
outfile = f'{filename.parent/filename.stem[0:filename.stem.find(".")]}{hydrogit_target_tag}.bc'
llvmdis_proc = subprocess.run([
'llvm-dis',
filename,
'-o',
outfile,
],
stdout=None if verbose else subprocess.DEVNULL,
stderr=None if verbose else subprocess.DEVNULL)
assert llvmdis_proc.returncode == 0, \
f'llvm-dis returned error code {llvmdis_proc.returncode}'
print(f'{self.version}: Gathering files...')
self.glob_files()
def build_cmake(self, verbose):
# Set up build path
self.setup_build_path()
# Transform CMakeLists.txt
root_cmakelist = self.cmake_path / 'CMakeLists.txt'
assert root_cmakelist.exists(), \
f'CMakeLists.txt not found in {str(root_cmakelist)}'
self.transform_cmakelists(root_cmakelist)
# Run CMake and collect the output
print(f'{self.version}: Running CMake...')
targets = self.cmake(verbose)
print(f'{self.version}: Building...')
self.make_cmake(targets, verbose)
print(f'{self.version}: Gathering files...')
self.glob_files()
def setup_build_path(self):
self.build_path.mkdir(exist_ok=True)
def glob_files(self):
'''
Gather sources and compiled bytecode for this version
'''
# gather C sources
if self.language == 'C':
for p in (self.root).glob('**/*.c'):
self.c_paths.append(p)
# for p in (self.root / 'src').glob('**/*.c'):
# self.c_paths.append(p)
# gather C++ sources
elif self.language == 'CXX':
for p in (self.root).glob('*.cpp'):
self.c_paths.append(p)
for p in (self.root / 'src').glob('**/*.cpp'):
self.c_paths.append(p)
assert any(self.c_paths), \
f'No {self.language} sources found'
# gather compiled bytecode
self.bc_paths = list(self.root.glob(f'**/*{hydrogit_target_tag}.bc'))
assert any(self.bc_paths), \
f'output bytecode not found in path {str(self.root)}'
def transform_cmakelists(self, cmakelists):
'''
Transform given CMakeLists.txt and return the llvmlink target name
'''
assert cmakelists.exists(), \
f'CMakeLists.txt not found at path {str(cmakelists)}'
with cmakelists.open('a') as file:
ir_gen = f'''
#{'='*10}LLVM IR generation
list(APPEND CMAKE_MODULE_PATH "{cmake_utils_dir}")
include(LLVMIRUtil)
enable_language(C)
get_directory_property(_allTargets BUILDSYSTEM_TARGETS)
foreach(_target ${{_allTargets}})
get_target_property(_type ${{_target}} TYPE)
message(STATUS "Hydrogit saw target ${{_target}} type ${{_type}}")
if((_type STREQUAL "EXECUTABLE") OR (_type STREQUAL "STATIC_LIBRARY") OR (_type STREQUAL "SHARED_LIBRARY"))
message(STATUS "Hydrogit adding IR for target ${{_target}} type ${{_type}}")
set_target_properties(${{_target}} PROPERTIES LINKER_LANGUAGE C)
add_compile_options(-c -O0 -Xclang -disable-O0-optnone -g -emit-llvm -S)
llvmir_attach_bc_target(${{_target}}_bc ${{_target}})
add_dependencies(${{_target}}_bc ${{_target}})
llvmir_attach_link_target(${{_target}}{hydrogit_target_tag} ${{_target}}_bc -S)
endif()
endforeach(_target ${{_allTargets}})
# end LLVM IR generation
#{'='*10}'''
file.write(ir_gen)
def cmake(self, verbose):
'''
Run CMake with the given target
'''
stdout = None if verbose else subprocess.DEVNULL
stderr = None if verbose else subprocess.DEVNULL
compile_env = os.environ.copy()
if self.language == 'C':
compile_env['CC'] = 'clang'
elif self.language == 'CXX':
compile_env['CXX'] = 'clang++'
cmake_proc = subprocess.run(args=[
'cmake',
'-B', str(self.build_path),
str(self.cmake_path)
],
stdout=stdout,
stderr=stderr,
text=True,
env=compile_env
)
assert cmake_proc.returncode == 0, \
f'CMake step returned error code {cmake_proc.returncode}'
assert self.llvm_ir_path.exists(), \
f'LLVM IR output directory {str(self.llvm_ir_path)} does not exist'
target_bcs = list(self.llvm_ir_path.glob(f'*{hydrogit_target_tag}'))
assert any(target_bcs), \
f'No CMake output found in path {str(self.llvm_ir_path)}'
for bc in target_bcs:
assert bc.exists(), \
f'CMake output not found for LLVM IR target {bc}'
targets = [bc.stem for bc in target_bcs]
return targets
def make_cmake(self, targets, verbose):
for target in targets:
try:
print(f'{self.version}: Building target {target}...',
end='', flush=True)
args = [
'cmake',
'--build',
str(self.build_path),
'--target', target,
]
if verbose:
args.append('--verbose') # show make output
stdout = None if verbose else subprocess.DEVNULL
stderr = None if verbose else subprocess.DEVNULL
build_proc = subprocess.run(
args=args,
stdout=stdout,
stderr=stderr,
text=True
)
assert build_proc.returncode == 0, \
f'Build step returned error code {build_proc.returncode}'
print('done')
except Exception as ex:
# Print the newline & bubble if there's an error while processing
print()
if str(ex):
print(f'{self.version}: {target}: Error({ex}) - skipping')
else:
raise ex
def main():
v = Version(Path('./tmp/7642d172e10a890975696d28278e5192d81afc5b'), 'C', '.', './build')
v.build_make(True)
v.glob_files()
print(v.bc_paths)
if __name__ == '__main__':
main()
| 34.301038
| 111
| 0.562494
|
dda9cf04d933f7a81d10c2233682bcea27c7aaaf
| 12,660
|
py
|
Python
|
exchangelib/version.py
|
mishmashclone/ecederstrand-exchangelib
|
1bbae0e527dc82a45bf3b5946b438d69de96c20f
|
[
"BSD-2-Clause"
] | null | null | null |
exchangelib/version.py
|
mishmashclone/ecederstrand-exchangelib
|
1bbae0e527dc82a45bf3b5946b438d69de96c20f
|
[
"BSD-2-Clause"
] | null | null | null |
exchangelib/version.py
|
mishmashclone/ecederstrand-exchangelib
|
1bbae0e527dc82a45bf3b5946b438d69de96c20f
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
import re
from .errors import TransportError, ResponseMessageError, InvalidTypeError
from .util import xml_to_str, TNS
log = logging.getLogger(__name__)
# Legend for dict:
# Key: shortname
# Values: (EWS API version ID, full name)
# 'shortname' comes from types.xsd and is the official version of the server, corresponding to the version numbers
# supplied in SOAP headers. 'API version' is the version name supplied in the RequestServerVersion element in SOAP
# headers and describes the EWS API version the server implements. Valid values for this element are described here:
# https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/requestserverversion
VERSIONS = {
'Exchange2007': ('Exchange2007', 'Microsoft Exchange Server 2007'),
'Exchange2007_SP1': ('Exchange2007_SP1', 'Microsoft Exchange Server 2007 SP1'),
'Exchange2007_SP2': ('Exchange2007_SP1', 'Microsoft Exchange Server 2007 SP2'),
'Exchange2007_SP3': ('Exchange2007_SP1', 'Microsoft Exchange Server 2007 SP3'),
'Exchange2010': ('Exchange2010', 'Microsoft Exchange Server 2010'),
'Exchange2010_SP1': ('Exchange2010_SP1', 'Microsoft Exchange Server 2010 SP1'),
'Exchange2010_SP2': ('Exchange2010_SP2', 'Microsoft Exchange Server 2010 SP2'),
'Exchange2010_SP3': ('Exchange2010_SP2', 'Microsoft Exchange Server 2010 SP3'),
'Exchange2013': ('Exchange2013', 'Microsoft Exchange Server 2013'),
'Exchange2013_SP1': ('Exchange2013_SP1', 'Microsoft Exchange Server 2013 SP1'),
'Exchange2015': ('Exchange2015', 'Microsoft Exchange Server 2015'),
'Exchange2015_SP1': ('Exchange2015_SP1', 'Microsoft Exchange Server 2015 SP1'),
'Exchange2016': ('Exchange2016', 'Microsoft Exchange Server 2016'),
'Exchange2019': ('Exchange2019', 'Microsoft Exchange Server 2019'),
}
# Build a list of unique API versions, used when guessing API version supported by the server. Use reverse order so we
# get the newest API version supported by the server.
API_VERSIONS = sorted({v[0] for v in VERSIONS.values()}, reverse=True)
class Build:
"""Holds methods for working with build numbers."""
# List of build numbers here: https://docs.microsoft.com/en-us/exchange/new-features/build-numbers-and-release-dates
API_VERSION_MAP = {
8: {
0: 'Exchange2007',
1: 'Exchange2007_SP1',
2: 'Exchange2007_SP1',
3: 'Exchange2007_SP1',
},
14: {
0: 'Exchange2010',
1: 'Exchange2010_SP1',
2: 'Exchange2010_SP2',
3: 'Exchange2010_SP2',
},
15: {
0: 'Exchange2013', # Minor builds starting from 847 are Exchange2013_SP1, see api_version()
1: 'Exchange2016',
2: 'Exchange2019',
20: 'Exchange2016', # This is Office365. See issue #221
},
}
__slots__ = 'major_version', 'minor_version', 'major_build', 'minor_build'
def __init__(self, major_version, minor_version, major_build=0, minor_build=0):
if not isinstance(major_version, int):
raise InvalidTypeError('major_version', major_version, int)
if not isinstance(minor_version, int):
raise InvalidTypeError('minor_version', minor_version, int)
if not isinstance(major_build, int):
raise InvalidTypeError('major_build', major_build, int)
if not isinstance(minor_build, int):
raise InvalidTypeError('minor_build', minor_build, int)
self.major_version = major_version
self.minor_version = minor_version
self.major_build = major_build
self.minor_build = minor_build
if major_version < 8:
raise ValueError(f"Exchange major versions below 8 don't support EWS ({self})")
@classmethod
def from_xml(cls, elem):
xml_elems_map = {
'major_version': 'MajorVersion',
'minor_version': 'MinorVersion',
'major_build': 'MajorBuildNumber',
'minor_build': 'MinorBuildNumber',
}
kwargs = {}
for k, xml_elem in xml_elems_map.items():
v = elem.get(xml_elem)
if v is None:
raise ValueError()
kwargs[k] = int(v) # Also raises ValueError
return cls(**kwargs)
@classmethod
def from_hex_string(cls, s):
"""Parse a server version string as returned in an autodiscover response. The process is described here:
https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/serverversion-pox#example
The string is a hex string that, converted to a 32-bit binary, encodes the server version. The rules are:
* The first 4 bits contain the version number structure version. Can be ignored
* The next 6 bits contain the major version number
* The next 6 bits contain the minor version number
* The next bit contains a flag. Can be ignored
* The next 15 bits contain the major build number
:param s:
"""
bin_s = f'{int(s, 16):032b}' # Convert string to 32-bit binary string
major_version = int(bin_s[4:10], 2)
minor_version = int(bin_s[10:16], 2)
build_number = int(bin_s[17:32], 2)
return cls(major_version=major_version, minor_version=minor_version, major_build=build_number)
def api_version(self):
if EXCHANGE_2013_SP1 <= self < EXCHANGE_2016:
return 'Exchange2013_SP1'
try:
return self.API_VERSION_MAP[self.major_version][self.minor_version]
except KeyError:
raise ValueError(f'API version for build {self} is unknown')
def fullname(self):
return VERSIONS[self.api_version()][1]
def __cmp__(self, other):
# __cmp__ is not a magic method in Python3. We'll just use it here to implement comparison operators
c = (self.major_version > other.major_version) - (self.major_version < other.major_version)
if c != 0:
return c
c = (self.minor_version > other.minor_version) - (self.minor_version < other.minor_version)
if c != 0:
return c
c = (self.major_build > other.major_build) - (self.major_build < other.major_build)
if c != 0:
return c
return (self.minor_build > other.minor_build) - (self.minor_build < other.minor_build)
def __eq__(self, other):
return self.__cmp__(other) == 0
def __hash__(self):
return hash(repr(self))
def __ne__(self, other):
return self.__cmp__(other) != 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __gt__(self, other):
return self.__cmp__(other) > 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __str__(self):
return f'{self.major_version}.{self.minor_version}.{self.major_build}.{self.minor_build}'
def __repr__(self):
return self.__class__.__name__ \
+ repr((self.major_version, self.minor_version, self.major_build, self.minor_build))
# Helpers for comparison operations elsewhere in this package
EXCHANGE_2007 = Build(8, 0)
EXCHANGE_2007_SP1 = Build(8, 1)
EXCHANGE_2010 = Build(14, 0)
EXCHANGE_2010_SP1 = Build(14, 1)
EXCHANGE_2010_SP2 = Build(14, 2)
EXCHANGE_2013 = Build(15, 0)
EXCHANGE_2013_SP1 = Build(15, 0, 847)
EXCHANGE_2016 = Build(15, 1)
EXCHANGE_2019 = Build(15, 2)
EXCHANGE_O365 = Build(15, 20)
class Version:
"""Holds information about the server version."""
__slots__ = 'build', 'api_version'
def __init__(self, build, api_version=None):
if api_version is None:
if not isinstance(build, Build):
raise InvalidTypeError('build', build, Build)
self.api_version = build.api_version()
else:
if not isinstance(build, (Build, type(None))):
raise InvalidTypeError('build', build, Build)
if not isinstance(api_version, str):
raise InvalidTypeError('api_version', api_version, str)
self.api_version = api_version
self.build = build
@property
def fullname(self):
return VERSIONS[self.api_version][1]
@classmethod
def guess(cls, protocol, api_version_hint=None):
"""Ask the server which version it has. We haven't set up an Account object yet, so we generate requests
by hand. We only need a response header containing a ServerVersionInfo element.
To get API version and build numbers from the server, we need to send a valid SOAP request. We can't do that
without a valid API version. To solve this chicken-and-egg problem, we try all possible API versions that this
package supports, until we get a valid response.
:param protocol:
:param api_version_hint: (Default value = None)
"""
from .services import ResolveNames
# The protocol doesn't have a version yet, so default to latest supported version if we don't have a hint.
api_version = api_version_hint or API_VERSIONS[0]
log.debug('Asking server for version info using API version %s', api_version)
# We don't know the build version yet. Hopefully, the server will report it in the SOAP header. Lots of
# places expect a version to have a build, so this is a bit dangerous, but passing a fake build around is also
# dangerous. Make sure the call to ResolveNames does not require a version build.
protocol.config.version = Version(build=None, api_version=api_version)
# Use ResolveNames as a minimal request to the server to test if the version is correct. If not, ResolveNames
# will try to guess the version automatically.
name = str(protocol.credentials) if protocol.credentials and str(protocol.credentials) else 'DUMMY'
try:
list(ResolveNames(protocol=protocol).call(unresolved_entries=[name]))
except ResponseMessageError as e:
# We may have survived long enough to get a new version
if not protocol.config.version.build:
raise TransportError(f'No valid version headers found in response ({e!r})')
if not protocol.config.version.build:
raise TransportError('No valid version headers found in response')
return protocol.config.version
@staticmethod
def _is_invalid_version_string(version):
# Check if a version string is bogus, e.g. V2_, V2015_ or V2018_
return re.match(r'V[0-9]{1,4}_.*', version)
@classmethod
def from_soap_header(cls, requested_api_version, header):
info = header.find(f'{{{TNS}}}ServerVersionInfo')
if info is None:
raise TransportError(f'No ServerVersionInfo in header: {xml_to_str(header)!r}')
try:
build = Build.from_xml(elem=info)
except ValueError:
raise TransportError(f'Bad ServerVersionInfo in response: {xml_to_str(header)!r}')
# Not all Exchange servers send the Version element
api_version_from_server = info.get('Version') or build.api_version()
if api_version_from_server != requested_api_version:
if cls._is_invalid_version_string(api_version_from_server):
# For unknown reasons, Office 365 may respond with an API version strings that is invalid in a request.
# Detect these so we can fallback to a valid version string.
log.debug('API version "%s" worked but server reports version "%s". Using "%s"', requested_api_version,
api_version_from_server, requested_api_version)
api_version_from_server = requested_api_version
else:
# Trust API version from server response
log.debug('API version "%s" worked but server reports version "%s". Using "%s"', requested_api_version,
api_version_from_server, api_version_from_server)
return cls(build=build, api_version=api_version_from_server)
def __eq__(self, other):
if self.api_version != other.api_version:
return False
if self.build and not other.build:
return False
if other.build and not self.build:
return False
return self.build == other.build
def __repr__(self):
return self.__class__.__name__ + repr((self.build, self.api_version))
def __str__(self):
return f'Build={self.build}, API={self.api_version}, Fullname={self.fullname}'
| 44.734982
| 120
| 0.661769
|
35061f85dd455fa6d43f89bc51f8a39d6bf57f88
| 17,867
|
py
|
Python
|
intersight/model/thermal_policy.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/model/thermal_policy.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/model/thermal_policy.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.display_names import DisplayNames
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
from intersight.model.organization_organization_relationship import OrganizationOrganizationRelationship
from intersight.model.policy_abstract_config_profile_relationship import PolicyAbstractConfigProfileRelationship
from intersight.model.policy_abstract_policy import PolicyAbstractPolicy
from intersight.model.thermal_policy_all_of import ThermalPolicyAllOf
globals()['DisplayNames'] = DisplayNames
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
globals()['OrganizationOrganizationRelationship'] = OrganizationOrganizationRelationship
globals()['PolicyAbstractConfigProfileRelationship'] = PolicyAbstractConfigProfileRelationship
globals()['PolicyAbstractPolicy'] = PolicyAbstractPolicy
globals()['ThermalPolicyAllOf'] = ThermalPolicyAllOf
class ThermalPolicy(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'THERMAL.POLICY': "thermal.Policy",
},
('object_type',): {
'THERMAL.POLICY': "thermal.Policy",
},
('fan_control_mode',): {
'BALANCED': "Balanced",
'LOWPOWER': "LowPower",
'HIGHPOWER': "HighPower",
'MAXIMUMPOWER': "MaximumPower",
'ACOUSTIC': "Acoustic",
},
}
validations = {
('description',): {
'max_length': 1024,
'regex': {
'pattern': r'^$|^[a-zA-Z0-9]+[\x00-\xFF]*$', # noqa: E501
},
},
('name',): {
'regex': {
'pattern': r'^[a-zA-Z0-9_.:-]{1,64}$', # noqa: E501
},
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'fan_control_mode': (str,), # noqa: E501
'organization': (OrganizationOrganizationRelationship,), # noqa: E501
'profiles': ([PolicyAbstractConfigProfileRelationship], none_type,), # noqa: E501
'account_moid': (str,), # noqa: E501
'create_time': (datetime,), # noqa: E501
'domain_group_moid': (str,), # noqa: E501
'mod_time': (datetime,), # noqa: E501
'moid': (str,), # noqa: E501
'owners': ([str], none_type,), # noqa: E501
'shared_scope': (str,), # noqa: E501
'tags': ([MoTag], none_type,), # noqa: E501
'version_context': (MoVersionContext,), # noqa: E501
'ancestors': ([MoBaseMoRelationship], none_type,), # noqa: E501
'parent': (MoBaseMoRelationship,), # noqa: E501
'permission_resources': ([MoBaseMoRelationship], none_type,), # noqa: E501
'display_names': (DisplayNames,), # noqa: E501
'description': (str,), # noqa: E501
'name': (str,), # noqa: E501
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'fan_control_mode': 'FanControlMode', # noqa: E501
'organization': 'Organization', # noqa: E501
'profiles': 'Profiles', # noqa: E501
'account_moid': 'AccountMoid', # noqa: E501
'create_time': 'CreateTime', # noqa: E501
'domain_group_moid': 'DomainGroupMoid', # noqa: E501
'mod_time': 'ModTime', # noqa: E501
'moid': 'Moid', # noqa: E501
'owners': 'Owners', # noqa: E501
'shared_scope': 'SharedScope', # noqa: E501
'tags': 'Tags', # noqa: E501
'version_context': 'VersionContext', # noqa: E501
'ancestors': 'Ancestors', # noqa: E501
'parent': 'Parent', # noqa: E501
'permission_resources': 'PermissionResources', # noqa: E501
'display_names': 'DisplayNames', # noqa: E501
'description': 'Description', # noqa: E501
'name': 'Name', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ThermalPolicy - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "thermal.Policy", must be one of ["thermal.Policy", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "thermal.Policy", must be one of ["thermal.Policy", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
fan_control_mode (str): Sets the Fan Control Mode of the System. High Power, Maximum Power and Acoustic modes are only supported for Cisco UCS X series Chassis. * `Balanced` - The fans run faster when needed based on the heat generated by the server. When possible, the fans returns to the minimum required speed. * `LowPower` - The Fans run at the minimum speed required to keep the server cool. * `HighPower` - The fans are kept at higher speed to emphasizes performance over power consumption. This Mode is only supported for UCS X series Chassis. * `MaximumPower` - The fans are always kept at maximum speed. This option provides the most cooling and consumes the most power. This Mode is only supported for UCS X series Chassis. * `Acoustic` - The fan speed is reduced to reduce noise levels in acoustic-sensitive environments. This Mode is only supported for UCS X series Chassis.. [optional] if omitted the server will use the default value of "Balanced" # noqa: E501
organization (OrganizationOrganizationRelationship): [optional] # noqa: E501
profiles ([PolicyAbstractConfigProfileRelationship], none_type): An array of relationships to policyAbstractConfigProfile resources.. [optional] # noqa: E501
account_moid (str): The Account ID for this managed object.. [optional] # noqa: E501
create_time (datetime): The time when this managed object was created.. [optional] # noqa: E501
domain_group_moid (str): The DomainGroup ID for this managed object.. [optional] # noqa: E501
mod_time (datetime): The time when this managed object was last modified.. [optional] # noqa: E501
moid (str): The unique identifier of this Managed Object instance.. [optional] # noqa: E501
owners ([str], none_type): [optional] # noqa: E501
shared_scope (str): Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.. [optional] # noqa: E501
tags ([MoTag], none_type): [optional] # noqa: E501
version_context (MoVersionContext): [optional] # noqa: E501
ancestors ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
parent (MoBaseMoRelationship): [optional] # noqa: E501
permission_resources ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
display_names (DisplayNames): [optional] # noqa: E501
description (str): Description of the policy.. [optional] # noqa: E501
name (str): Name of the concrete policy.. [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "thermal.Policy")
object_type = kwargs.get('object_type', "thermal.Policy")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
PolicyAbstractPolicy,
ThermalPolicyAllOf,
],
'oneOf': [
],
}
| 54.639144
| 1,678
| 0.638663
|
09de13b3106b2432757a59c7a617d8b0f59c24dd
| 5,784
|
py
|
Python
|
detectron2/modeling/da_heads/da_roi_head.py
|
Sage0116/detectron2
|
34c70af9e561269b016196d87f30d80f5d61fa3c
|
[
"Apache-2.0"
] | null | null | null |
detectron2/modeling/da_heads/da_roi_head.py
|
Sage0116/detectron2
|
34c70af9e561269b016196d87f30d80f5d61fa3c
|
[
"Apache-2.0"
] | null | null | null |
detectron2/modeling/da_heads/da_roi_head.py
|
Sage0116/detectron2
|
34c70af9e561269b016196d87f30d80f5d61fa3c
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, List, Optional
from detectron2.layers import ShapeSpec
from detectron2.modeling.roi_heads import StandardROIHeads, ROI_HEADS_REGISTRY, FastRCNNOutputLayers, build_box_head
from detectron2.modeling.poolers import ROIPooler
from detectron2.structures import Instances, Boxes
from detectron2.config import configurable
import torch
import torch.nn as nn
@ROI_HEADS_REGISTRY.register()
class DAROIHeads(StandardROIHeads):
@configurable
def __init__(self,
*,
box_in_features: List[str],
box_pooler: ROIPooler,
box_head: nn.Module,
box_predictor: nn.Module,
context_regularization_feat: List[str],
context_regularization_on: bool,
mask_in_features: Optional[List[str]] = None,
mask_pooler: Optional[ROIPooler] = None,
mask_head: Optional[nn.Module] = None,
keypoint_in_features: Optional[List[str]] = None,
keypoint_pooler: Optional[ROIPooler] = None,
keypoint_head: Optional[nn.Module] = None,
train_on_pred_boxes: bool = False,
**kwargs,):
super().__init__(box_in_features=box_in_features, box_pooler=box_pooler, box_head=box_head, box_predictor=box_predictor, \
mask_in_features=mask_in_features, mask_pooler=mask_pooler, mask_head=mask_head, keypoint_in_features=keypoint_in_features, \
keypoint_pooler=keypoint_pooler, keypoint_head=keypoint_head, train_on_pred_boxes=train_on_pred_boxes, \
**kwargs,)
self.context_regularization_feat = context_regularization_feat
self.context_regularization_on = context_regularization_on
@classmethod
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg, input_shape)
ret["context_regularization_feat"] = cfg.MODEL.ROI_HEADS.CONTEXT_REGULARIZATION_FEATURES
ret["context_regularization_on"] = cfg.MODEL.ROI_HEADS.CONTEXT_REGULARIZATION_ON
return ret
@classmethod
def _init_box_head(cls, cfg, input_shape):
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
in_channels = [input_shape[f].channels for f in in_features]
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
box_head = build_box_head(
cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)
)
assert cfg.MODEL.DOMAIN_ADAPTATION_ON or not cfg.MODEL.ROI_HEADS.CONTEXT_REGULARIZATION_ON, 'when using context regularization, network must have domain adapatation head'
n = 1
if cfg.MODEL.ROI_HEADS.CONTEXT_REGULARIZATION_ON:
if cfg.MODEL.DA_HEADS.GLOBAL_ALIGNMENT_ON:
n += 1
if cfg.MODEL.DA_HEADS.LOCAL_ALIGNMENT_ON:
n += 1
box_predictor = FastRCNNOutputLayers(cfg, ShapeSpec(channels=n * box_head.output_shape.channels))
return {
"box_in_features": in_features,
"box_pooler": box_pooler,
"box_head": box_head,
"box_predictor": box_predictor,
}
def _forward_box(self, features: Dict[str, torch.Tensor], proposals: List[Instances]):
"""
Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`,
the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
# context vector regularization, local and global alignment features
if self.context_regularization_on:
reg_feat = [features[f].view(1,-1) for f in self.context_regularization_feat if not isinstance(features.get(f), type(None)) ]
features = [features[f] for f in self.box_in_features]
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features = self.box_head(box_features)
if self.context_regularization_on:
box_features = torch.cat([f.repeat(box_features.size(0), 1) for f in reg_feat] + [box_features], 1)
predictions = self.box_predictor(box_features)
del box_features
if self.training:
losses = self.box_predictor.losses(predictions, proposals)
if self.train_on_pred_boxes:
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals
)
for proposals_per_image, pred_boxes_per_image in zip(proposals, pred_boxes):
proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
return losses
else:
pred_instances, _ = self.box_predictor.inference(predictions, proposals)
return pred_instances
| 46.645161
| 178
| 0.674447
|
4bf1e0a69edadc4ced7923335f702f39adf315bb
| 2,561
|
py
|
Python
|
evmoscan/app.py
|
onlyalt/evmos_contribution
|
bdd76957d79c2041da46e51c7d2b4102da03da83
|
[
"MIT"
] | null | null | null |
evmoscan/app.py
|
onlyalt/evmos_contribution
|
bdd76957d79c2041da46e51c7d2b4102da03da83
|
[
"MIT"
] | null | null | null |
evmoscan/app.py
|
onlyalt/evmos_contribution
|
bdd76957d79c2041da46e51c7d2b4102da03da83
|
[
"MIT"
] | 2
|
2021-12-13T21:52:15.000Z
|
2022-02-25T16:02:15.000Z
|
import sqlite3
import time
import os
from flask import Flask, render_template, jsonify
from evmoscan.etl.utils import *
DB_FILE_PATH = os.getenv('DB_FILE_PATH')
def creat_app():
app = Flask(__name__)
if DB_FILE_PATH:
dbconn = sqlite3.connect(DB_FILE_PATH, isolation_level=None) # autocommits
app.cur = dbconn.cursor()
return app
app = creat_app()
MAX_BLOCK_DISPLAY = 10
@app.route("/")
def index():
current_block_number = get_latest_block_number()
current_gas_price = get_gas_price()
latest_blocks = [block_loader(json.loads(get_block_info_rpc(current_block_number - i))) for i in range(MAX_BLOCK_DISPLAY)]
latest_transactions = [transaction_loader(json.loads(get_transaction_info_rpc(item))) for sublist in [x.transactions for x in latest_blocks] for item in sublist] # flatten list of list
gas_fees = [x.gas_price / 1000000000 for x in latest_transactions]
avg_block_time = (latest_blocks[0].timestamp - latest_blocks[-1].timestamp) / (len(latest_blocks) - 1)
tps = len(latest_transactions) / (latest_blocks[0].timestamp - latest_blocks[-1].timestamp)
validators, n_validators = get_validators()
current_time = time.time()
return render_template(
"index.html",
hash=hash,
current_gas_price=current_gas_price,
current_block_number=current_block_number,
latest_blocks=latest_blocks,
current_time=current_time,
validators=validators,
n_validators=n_validators,
latest_transactions=latest_transactions,
gas_fees=gas_fees,
avg_block_time=avg_block_time,
tps=tps,
)
@app.route("/validator/<addr>")
def validator(addr):
validator_ = get_validator(addr)
delegations = get_delegation(addr)
return render_template("validator.html", validator=validator_, delegations=delegations)
@app.route("/tx/<hash>")
def transaction(hash):
transaction_ = transaction_loader(json.loads(get_transaction_info_rpc(hash)))
return render_template("transaction.html", transaction=transaction_)
@app.route("/tx/<hash>/<tracer>")
def tracer(hash, tracer):
traces = trace_tx(hash, tracer_name=tracer)
return jsonify(traces)
@app.route("/block/<block_number>")
def block(block_number):
block_ = block_loader(json.loads(get_block_info_rpc(block_number)))
return render_template("block.html", block=block_)
@app.route("/proposal/<prop_id>")
def proposal(prop_id):
proposal_ = get_proposal(prop_id)
return render_template("proposal.html", proposal=proposal_)
| 31.231707
| 189
| 0.728622
|
8160ee5d1fe0ec45377fb18709a7d93d35ca021c
| 491
|
py
|
Python
|
models/coattention/__init__.py
|
Minys233/GCN-BMP
|
21b64a3c8cc9bc33718ae09c65aa917e575132eb
|
[
"MIT"
] | null | null | null |
models/coattention/__init__.py
|
Minys233/GCN-BMP
|
21b64a3c8cc9bc33718ae09c65aa917e575132eb
|
[
"MIT"
] | null | null | null |
models/coattention/__init__.py
|
Minys233/GCN-BMP
|
21b64a3c8cc9bc33718ae09c65aa917e575132eb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/5/2019 5:41 PM
# @Author : chinshin
# @FileName: __init__.py.
import sys
from os.path import abspath, dirname
ROOT_PATH = dirname(dirname(dirname(abspath(__file__))))
sys.path.insert(0, ROOT_PATH)
from models.coattention.alternating_coattention import AlternatingCoattention
from models.coattention.parallel_coattention import ParallelCoattention
from models.coattention.global_coattention import GlobalCoattention
| 35.071429
| 78
| 0.771894
|
9cda899e4191f987bbb2038fd7d44a11d0e3b890
| 1,844
|
py
|
Python
|
cross3d/studiomax/_Py3dsMax_setup/site-packages/py3dsMax_startup.py
|
vedantirb/cross3d
|
e27c2dc80bf607ef7ccf3970b713bfea8211228b
|
[
"MIT"
] | 129
|
2016-07-26T16:00:56.000Z
|
2021-08-07T03:44:41.000Z
|
cross3d/studiomax/_Py3dsMax_setup/site-packages/py3dsMax_startup.py
|
vedantirb/cross3d
|
e27c2dc80bf607ef7ccf3970b713bfea8211228b
|
[
"MIT"
] | 3
|
2016-08-26T01:37:03.000Z
|
2019-08-13T01:46:01.000Z
|
cross3d/studiomax/_Py3dsMax_setup/site-packages/py3dsMax_startup.py
|
vedantirb/cross3d
|
e27c2dc80bf607ef7ccf3970b713bfea8211228b
|
[
"MIT"
] | 33
|
2016-07-30T00:26:00.000Z
|
2022-03-09T07:10:54.000Z
|
##
# \namespace __startup__.py
#
# \remarks Call any python initialization code
#
# \author beta@blur.com
# \author Blur Studio
# \date 12/05/08
#
# initialize the Py3dsMax system
import os
import Py3dsMax
from Py3dsMax import mxs
# At some point in initialization the install path is removed from the system path resulting in missing dlls when loading qt
# if it is installed in the max folder.
os.environ['path'] = ';'.join((mxs.pathConfig.resolvePathSymbols('$max'), os.environ.get('path', '')))
Py3dsMax.__dict__[ 'installPath' ] = mxs.pathConfig.resolvePathSymbols('$scripts\\python')
version = mxs.maxVersion()
if version:
version = version[0]
# initialize the sys module
import sys
if version < 17000:
# Max 2014 or older
sys.argv = ['']
sys.path.insert(0, Py3dsMax.installPath + r'\site-packages')
else:
# Max 2015 or newer
# Attempt to find the standard python install location and add it to sys.path
registry = 'HKEY_LOCAL_MACHINE'
key = r'SOFTWARE\Python\PythonCore\2.7\InstallPath'
import _winreg
aReg = _winreg.ConnectRegistry(None, getattr(_winreg, registry))
sam = _winreg.KEY_WOW64_64KEY
path = None
try:
regKey = _winreg.OpenKey(aReg, key, 0, _winreg.KEY_READ | sam)
if regKey:
path, regType = _winreg.QueryValueEx(regKey, '')
path = os.path.join(path, 'Lib', 'site-packages')
except WindowsError:
pass
if path:
sp = list(sys.path)
# Import the standard python install libs. This prevents us from having to install the same
# python packages into the max directory.
import site
site.addsitedir(path)
# Any paths added by addsitedir, should be moved below the default sys.path arguments.
# This ensures that max specific versions will get priority over global python modules.
for p in reversed(sp):
sys.path.remove(p)
sys.path.insert(0, p)
| 30.733333
| 125
| 0.7218
|
e52255614e7ac4a867af5c78256fad59dc7fab33
| 9,729
|
py
|
Python
|
dm_control/composer/observation/updater.py
|
willwhitney/dm_control
|
f2568f59970f3c895f21e0c4278f1723d79b0933
|
[
"Apache-2.0"
] | 1
|
2019-05-29T15:49:15.000Z
|
2019-05-29T15:49:15.000Z
|
dm_control/composer/observation/updater.py
|
willwhitney/dm_control
|
f2568f59970f3c895f21e0c4278f1723d79b0933
|
[
"Apache-2.0"
] | null | null | null |
dm_control/composer/observation/updater.py
|
willwhitney/dm_control
|
f2568f59970f3c895f21e0c4278f1723d79b0933
|
[
"Apache-2.0"
] | 1
|
2021-01-24T20:28:15.000Z
|
2021-01-24T20:28:15.000Z
|
# Copyright 2018 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""An object that creates and updates buffers for enabled observables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from dm_control.composer.observation import obs_buffer
import numpy as np
import six
from six.moves import range
from dm_control.rl import specs
DEFAULT_BUFFER_SIZE = 1
DEFAULT_UPDATE_INTERVAL = 1
DEFAULT_DELAY = 0
class _EnabledObservable(object):
"""Encapsulates an enabled observable, its buffer, and its update schedule."""
__slots__ = ('observable', 'buffer', 'update_schedule')
def __init__(self, observable, physics, random_state,
strip_singleton_buffer_dim):
self.observable = observable
# We take an observation here to determine the shape and size.
# This occurs outside of an episode and doesn't affect environment behavior.
obs_value = np.array(
observable.observation_callable(physics, random_state)())
self.buffer = obs_buffer.Buffer(
buffer_size=(observable.buffer_size or DEFAULT_BUFFER_SIZE),
shape=obs_value.shape, dtype=obs_value.dtype,
strip_singleton_buffer_dim=strip_singleton_buffer_dim)
self.update_schedule = collections.deque()
def _call_if_callable(arg):
if callable(arg):
return arg()
else:
return arg
def _validate_structure(structure):
"""Validates the structure of the given observables collection.
The collection must either be a dict, or a (list or tuple) of dicts.
Args:
structure: A candidate collection of observables.
Returns:
A boolean that is `True` if `structure` is either a list or a tuple, or
`False` otherwise.
Raises:
ValueError: If `structure` is neither a dict nor a (list or tuple) of dicts.
"""
is_nested = isinstance(structure, (list, tuple))
if is_nested:
is_valid = all(isinstance(obj, dict) for obj in structure)
else:
is_valid = isinstance(structure, dict)
if not is_valid:
raise ValueError(
'`observables` should be a dict, or a (list or tuple) of dicts'
': got {}'.format(structure))
return is_nested
class Updater(object):
"""Creates and updates buffers for enabled observables."""
def __init__(self, observables, physics_steps_per_control_step=1,
strip_singleton_buffer_dim=False):
self._physics_steps_per_control_step = physics_steps_per_control_step
self._strip_singleton_buffer_dim = strip_singleton_buffer_dim
self._step_counter = 0
self._observables = observables
self._is_nested = _validate_structure(observables)
self._enabled_structure = None
self._enabled_list = None
def reset(self, physics, random_state):
"""Resets this updater's state."""
def make_buffers_dict(observables):
"""Makes observable states in a dict."""
# Use `type(observables)` so that our output structure respects the
# original dict subclass (e.g. OrderedDict).
out_dict = type(observables)()
for key, value in six.iteritems(observables):
if value.enabled:
out_dict[key] = _EnabledObservable(value, physics, random_state,
self._strip_singleton_buffer_dim)
return out_dict
if self._is_nested:
self._enabled_structure = type(self._observables)(
make_buffers_dict(obs_dict) for obs_dict in self._observables)
self._enabled_list = []
for enabled_dict in self._enabled_structure:
self._enabled_list.extend(enabled_dict.values())
else:
self._enabled_structure = make_buffers_dict(self._observables)
self._enabled_list = self._enabled_structure.values()
self._step_counter = 0
for enabled in self._enabled_list:
first_delay = _call_if_callable(enabled.observable.delay or DEFAULT_DELAY)
enabled.buffer.insert(
0, first_delay,
enabled.observable.observation_callable(physics, random_state)())
def observation_spec(self):
"""The observation specification for this environment.
The returned specification is only valid as of the previous call
to `reset`. In particular, it is an error to call this function before
the first call to `reset`.
Returns:
A dict mapping observation name to `ArraySpec` containing observation
shape and dtype.
Raises:
RuntimeError: If this method is called before `reset` has been called.
"""
if self._enabled_structure is None:
raise RuntimeError('`reset` must be called before `observation_spec`.')
def make_observation_spec_dict(enabled_dict):
"""Makes a dict of enabled observation specs from of observables."""
out_dict = type(enabled_dict)()
for name, enabled in six.iteritems(enabled_dict):
if enabled.observable.aggregator:
aggregated = enabled.observable.aggregator(
np.zeros(enabled.buffer.shape, dtype=enabled.buffer.dtype))
spec = specs.ArraySpec(
shape=aggregated.shape, dtype=aggregated.dtype, name=name)
else:
spec = specs.ArraySpec(
shape=enabled.buffer.shape, dtype=enabled.buffer.dtype, name=name)
out_dict[name] = spec
return out_dict
if self._is_nested:
enabled_specs = type(self._enabled_structure)(
make_observation_spec_dict(enabled_dict)
for enabled_dict in self._enabled_structure)
else:
enabled_specs = make_observation_spec_dict(self._enabled_structure)
return enabled_specs
def prepare_for_next_control_step(self):
"""Simulates the next control step and optimizes the update schedule."""
if self._enabled_structure is None:
raise RuntimeError('`reset` must be called before `before_step`.')
for enabled in self._enabled_list:
update_interval = (
enabled.observable.update_interval or DEFAULT_UPDATE_INTERVAL)
delay = enabled.observable.delay or DEFAULT_DELAY
buffer_size = enabled.observable.buffer_size or DEFAULT_BUFFER_SIZE
if (update_interval == DEFAULT_UPDATE_INTERVAL and delay == DEFAULT_DELAY
and buffer_size < self._physics_steps_per_control_step):
for i in reversed(range(buffer_size)):
next_step = (
self._step_counter + self._physics_steps_per_control_step - i)
next_delay = DEFAULT_DELAY
enabled.update_schedule.append((next_step, next_delay))
else:
if enabled.update_schedule:
last_scheduled_step = enabled.update_schedule[-1][0]
else:
last_scheduled_step = self._step_counter
max_step = self._step_counter + 2 * self._physics_steps_per_control_step
while last_scheduled_step < max_step:
next_update_interval = _call_if_callable(update_interval)
next_step = last_scheduled_step + next_update_interval
next_delay = _call_if_callable(delay)
enabled.update_schedule.append((next_step, next_delay))
last_scheduled_step = next_step
# Optimize the schedule by planning ahead and dropping unseen entries.
enabled.buffer.drop_unobserved_upcoming_items(
enabled.update_schedule, self._physics_steps_per_control_step)
def update(self, physics, random_state):
if self._enabled_structure is None:
raise RuntimeError('`reset` must be called before `after_substep`.')
self._step_counter += 1
for enabled in self._enabled_list:
if (enabled.update_schedule and
enabled.update_schedule[0][0] == self._step_counter):
timestamp, delay = enabled.update_schedule.popleft()
enabled.buffer.insert(
timestamp, delay,
enabled.observable.observation_callable(physics, random_state)())
def get_observation(self):
"""Gets the current observation.
The returned observation is only valid as of the previous call
to `reset`. In particular, it is an error to call this function before
the first call to `reset`.
Returns:
A dict, or list of dicts, or tuple of dicts, of observation values.
The returned structure corresponds to the structure of the `observables`
that was given at initialization time.
Raises:
RuntimeError: If this method is called before `reset` has been called.
"""
if self._enabled_structure is None:
raise RuntimeError('`reset` must be called before `observation`.')
def aggregate_dict(enabled_dict):
out_dict = type(enabled_dict)()
for name, enabled in six.iteritems(enabled_dict):
if enabled.observable.aggregator:
aggregated = enabled.observable.aggregator(
enabled.buffer.read(self._step_counter))
else:
aggregated = enabled.buffer.read(self._step_counter)
out_dict[name] = aggregated
return out_dict
if self._is_nested:
return type(self._enabled_structure)(
aggregate_dict(enabled_dict)
for enabled_dict in self._enabled_structure)
else:
return aggregate_dict(self._enabled_structure)
| 38.30315
| 80
| 0.705622
|
eaee32f541d711b6f1177c4d84077bb921ea1a33
| 2,531
|
py
|
Python
|
alvm_tools/binutils.py
|
Achi-Coin/alvm_tools
|
93632f6647e1066b9ee85b0af1da6e43a43c5769
|
[
"Apache-2.0"
] | null | null | null |
alvm_tools/binutils.py
|
Achi-Coin/alvm_tools
|
93632f6647e1066b9ee85b0af1da6e43a43c5769
|
[
"Apache-2.0"
] | null | null | null |
alvm_tools/binutils.py
|
Achi-Coin/alvm_tools
|
93632f6647e1066b9ee85b0af1da6e43a43c5769
|
[
"Apache-2.0"
] | 2
|
2021-09-02T07:26:45.000Z
|
2021-11-08T19:55:37.000Z
|
import string
from alvm import KEYWORD_FROM_ATOM, KEYWORD_TO_ATOM
from alvm.casts import int_from_bytes, int_to_bytes
from ir.reader import read_ir
from ir.writer import write_ir
from ir.utils import (
ir_as_symbol, ir_cons, ir_first, ir_listp, ir_null,
ir_nullp, ir_rest, ir_symbol, ir_val, is_ir
)
from ir.Type import Type
def assemble_from_ir(ir_sexp):
keyword = ir_as_symbol(ir_sexp)
if keyword:
if keyword[:1] == "#":
keyword = keyword[1:]
atom = KEYWORD_TO_ATOM.get(keyword)
if atom is not None:
return ir_sexp.to(atom)
if True:
return ir_val(ir_sexp)
raise SyntaxError(
"can't parse %s at %s" % (keyword, ir_sexp._offset))
if not ir_listp(ir_sexp):
return ir_val(ir_sexp)
if ir_nullp(ir_sexp):
return ir_sexp.to([])
# handle "q"
first = ir_first(ir_sexp)
keyword = ir_as_symbol(first)
if keyword == "q":
pass
# TODO: note that any symbol is legal after this point
sexp_1 = assemble_from_ir(first)
sexp_2 = assemble_from_ir(ir_rest(ir_sexp))
return sexp_1.cons(sexp_2)
def type_for_atom(atom) -> Type:
if len(atom) > 2:
try:
v = bytes(atom).decode("utf8")
if all(c in string.printable for c in v):
return Type.QUOTES
except UnicodeDecodeError:
pass
return Type.HEX
if int_to_bytes(int_from_bytes(atom)) == atom:
return Type.INT
return Type.HEX
def disassemble_to_ir(sexp, keyword_from_atom, allow_keyword=None):
if is_ir(sexp) and allow_keyword is not False:
return ir_cons(ir_symbol("ir"), sexp)
if sexp.listp():
if sexp.first().listp() or allow_keyword is None:
allow_keyword = True
v0 = disassemble_to_ir(sexp.first(), keyword_from_atom, allow_keyword=allow_keyword)
v1 = disassemble_to_ir(sexp.rest(), keyword_from_atom, allow_keyword=False)
return ir_cons(v0, v1)
as_atom = sexp.as_atom()
if allow_keyword:
v = keyword_from_atom.get(as_atom)
if v is not None and v != '.':
return ir_symbol(v)
if sexp.nullp():
return ir_null()
return sexp.to((type_for_atom(as_atom), as_atom))
def disassemble(sexp, keyword_from_atom=KEYWORD_FROM_ATOM):
symbols = disassemble_to_ir(sexp, keyword_from_atom=keyword_from_atom)
return write_ir(symbols)
def assemble(s):
symbols = read_ir(s)
return assemble_from_ir(symbols)
| 27.813187
| 92
| 0.650336
|
237e4d858b21a8dfbab2a8f61ae53005c317dc90
| 78
|
py
|
Python
|
certbot_dns_namesilo/_internal/__init__.py
|
mateste/certbot-dns-namesilo
|
da27eacaabac394f4c5fd9ffb5e436bb2be0e79d
|
[
"Apache-2.0"
] | null | null | null |
certbot_dns_namesilo/_internal/__init__.py
|
mateste/certbot-dns-namesilo
|
da27eacaabac394f4c5fd9ffb5e436bb2be0e79d
|
[
"Apache-2.0"
] | null | null | null |
certbot_dns_namesilo/_internal/__init__.py
|
mateste/certbot-dns-namesilo
|
da27eacaabac394f4c5fd9ffb5e436bb2be0e79d
|
[
"Apache-2.0"
] | null | null | null |
"""Internal implementation of `~certbot_dns_namesilo.dns_namesilo` plugin."""
| 39
| 77
| 0.794872
|
df59c102fc3dce19b64a7b5a7419a09cec230344
| 17,973
|
py
|
Python
|
hydeengine/siteinfo.py
|
gulBAC/gulBAC-oldsite
|
0da5e13934a855194ffb00fac20d935b8d3173c0
|
[
"MIT"
] | null | null | null |
hydeengine/siteinfo.py
|
gulBAC/gulBAC-oldsite
|
0da5e13934a855194ffb00fac20d935b8d3173c0
|
[
"MIT"
] | 4
|
2018-10-08T20:01:40.000Z
|
2018-10-08T20:02:43.000Z
|
hydeengine/siteinfo.py
|
gulBAC/gulBAC-oldsite
|
0da5e13934a855194ffb00fac20d935b8d3173c0
|
[
"MIT"
] | 1
|
2021-09-08T10:00:17.000Z
|
2021-09-08T10:00:17.000Z
|
import sys
import re
import time as sleeper
import operator
from datetime import date, datetime, time
from threading import Thread, Event
from hydeengine import url
from hydeengine.file_system import File, Folder
class SiteResource(object):
def __init__(self, a_file, node):
super(SiteResource, self).__init__()
self.node = node
self.file = a_file
self.source_file = self.file
self.prerendered = False
if self.node.target_folder:
self.target_file = File(
self.node.target_folder.child(self.file.name))
self.temp_file = File(
self.node.temp_folder.child(self.file.name))
self.last_known_modification_time = a_file.last_modified
@property
def is_layout(self):
return (self.node.type == "layout" or
self.file.name.startswith("_"))
@property
def has_changes(self):
return (not self.last_known_modification_time ==
self.file.last_modified)
@property
def url(self):
if self.node.url is None:
return None
return url.join(self.node.url, self.file.name)
@property
def last_modified(self):
return self.file.last_modified
@property
def name(self):
return self.file.name
@property
def full_url(self):
if not self.node.full_url:
return None
return url.join(self.node.full_url, self.file.name)
def __repr__(self):
return str(self.file)
class Page(SiteResource):
def __init__(self, a_file, node):
if not node:
raise ValueError("Page cannot exist without a node")
super(Page, self).__init__(a_file, node)
self.created = datetime.strptime("2000-01-01", "%Y-%m-%d")
self.updated = None
listing_pages = self.node.site.settings.LISTING_PAGE_NAMES
self.listing = a_file.name_without_extension in listing_pages
self.exclude = False
self.display_in_list = None
self.module = node.module
self.process()
if type(self.created) == date:
self.created = datetime.combine(self.created, time())
if type(self.updated) == date:
self.updated = datetime.combine(self.updated, time())
elif type(self.updated) != datetime:
self.updated = self.created
@property
def page_name(self):
return self.file.name_without_extension
def get_context_text(self):
start = re.compile(r'.*?{%\s*hyde\s+(.*?)(%}|$)')
end = re.compile(r'(.*?)(%})')
fin = open(self.file.path,'r')
started = False
text = ''
matcher = start
for line in fin:
match = matcher.match(line)
if match:
text = text + match.group(1)
if started:
break
else:
matcher = end
started = True
elif started:
text = text + line
fin.close()
return text
def add_variables(self, page_vars):
if not page_vars: return
for key, value in page_vars.iteritems():
if not hasattr(Page, key):
setattr(Page, key, None)
setattr(self, key, value)
def process(self):
text = self.get_context_text()
import yaml
context = yaml.load(text)
if not context:
context = {}
self.add_variables(context)
if (self.file.name_without_extension.lower() ==
self.node.folder.name.lower() or
self.file.name_without_extension.lower() in
self.node.site.settings.LISTING_PAGE_NAMES):
self.listing = True
if self.display_in_list is None:
self.display_in_list = (not self.listing and
not self.exclude and
not self.file.name.startswith("_") and
self.file.kind == "html")
def _make_clean_url(self, page_url):
if self.node.listing_page == self:
page_url = self.node.url
else:
page_url = url.clean_url(page_url)
if self.node.site.settings.APPEND_SLASH or not page_url:
page_url += "/"
return page_url
@property
def url(self):
page_url = super(Page, self).url
# clean url generation requires knowing whether or not a page is a
# listing page prior to generating its url
if self.node.site.settings.GENERATE_CLEAN_URLS:
page_url = self._make_clean_url(page_url)
return page_url
@property
def full_url(self):
page_url = super(Page, self).full_url
# clean url generation requires knowing whether or not a page is a
# listing page prior to generating its url
if self.node.site.settings.GENERATE_CLEAN_URLS:
page_url = self._make_clean_url(page_url)
return page_url
class SiteNode(object):
def __init__(self, folder, parent=None):
super(SiteNode, self).__init__()
self.folder = folder
self.parent = parent
self.site = self
if self.parent:
self.site = self.parent.site
self.children = []
self.resources = []
def __repr__(self):
return str(self.folder)
@property
def simple_dict(self):
ress = []
for resource in self.walk_resources():
fragment = Folder(
resource.node.folder.get_fragment(
self.site.folder.path)).child(resource.file.name)
res = dict(
name=resource.file.name,
path=fragment)
ress.append(res)
nodes = []
for node in self.children:
nodes.append(node.simple_dict)
return dict(
name=self.folder.name,
path=self.folder.get_fragment(self.site.folder.path),
resources=ress,
nodes=nodes)
@property
def isroot(self):
return not self.parent
@property
def name(self):
return self.folder.name
@property
def author(self):
return self.site.settings.SITE_AUTHOR
@property
def has_listing(self):
return not self.listing_page is None
def walk(self):
yield self
for child in self.children:
for node in child.walk():
yield node
def walk_reverse(self):
yield self
for child in reversed(self.children):
for node in child.walk_reverse():
yield node
def walk_resources(self):
for node in self.walk():
for resource in node.resources:
yield resource
def walk_resources_reverse(self):
for node in self.walk_reverse():
for resource in reversed(node.resources):
yield resource
def add_child(self, folder):
if ContentNode.is_content(self.site, folder):
node = ContentNode(folder, parent=self)
elif LayoutNode.is_layout(self.site, folder):
node = LayoutNode(folder, parent=self)
elif MediaNode.is_media(self.site, folder):
node = MediaNode(folder, parent=self)
else:
node = SiteNode(folder, parent=self)
self.children.append(node)
self.site.child_added(node)
return node
def add_resource(self, a_file):
resource = self._add_resource(a_file)
self.site.resource_added(resource)
return resource
def remove_resource(self, resource):
self.resources.remove(resource)
self.site.resource_removed(resource)
def _add_resource(self, a_file):
resource = SiteResource(a_file, self)
self.resources.append(resource)
return resource
def find_node(self, folder):
try:
#print 'FIND NODE', folder, self.site.nodemap.get(folder.path)
return self.site.nodemap[folder.path]
except KeyError:
#print 'FAILED FIND NODE', folder
return None
find_child = find_node
def find_resource(self, a_file):
try:
return self.site.resourcemap[a_file.path]
except KeyError:
return None
@property
def source_folder(self):
return self.folder
@property
def target_folder(self):
return None
@property
def temp_folder(self):
return None
@property
def url(self):
return None
@property
def full_url(self):
if self.url is None:
return None
return url.join(self.site.settings.SITE_WWW_URL, self.url)
@property
def type(self):
return None
class ContentNode(SiteNode):
def __init__(self, folder, parent=None):
super(ContentNode, self).__init__(folder, parent)
self.listing_page = None
self.feed_url = None
walk_pages = SiteNode.walk_resources
walk_pages_reverse = SiteNode.walk_resources_reverse
@property
def module(self):
module = self
while (module.parent and
not module.parent == self.site.content_node):
module = module.parent
return module
@property
def name(self):
if self == self.site.content_node:
return self.site.name
else:
return super(ContentNode, self).name
@property
def pages(self):
return self.resources
@property
def ancestors(self):
node = self
ancestors = []
while not node.isroot:
ancestors.append(node)
node = node.parent
ancestors.reverse()
return ancestors
@staticmethod
def is_content(site, folder):
return (site.content_folder.same_as(folder) or
site.content_folder.is_ancestor_of(folder))
def _add_resource(self, a_file):
page = Page(a_file, self)
if page.listing and not self.listing_page:
self.listing_page = page
self.resources.append(page)
page.node.sort()
return page
def sort(self):
self.resources.sort(key=operator.attrgetter("created"), reverse=True)
prev = None
for page in self.resources:
page.prev = None
page.next = None
if page.display_in_list:
if prev:
prev.next = page
page.prev = prev
page.next = None
prev = page
for node in self.children:
node.sort()
@property
def target_folder(self):
deploy_folder = self.site.target_folder
return deploy_folder.child_folder_with_fragment(self.fragment)
@property
def temp_folder(self):
temp_folder = self.site.temp_folder
return temp_folder.child_folder_with_fragment(self.fragment)
@property
def fragment(self):
return self.folder.get_fragment(self.site.content_folder)
@property
def url(self):
return url.join(self.site.settings.SITE_ROOT,
url.fixslash(
self.folder.get_fragment(self.site.content_folder)))
@property
def type(self):
return "content"
@property
def listing_url(self):
return self.listing_page.url
class LayoutNode(SiteNode):
@staticmethod
def is_layout(site, folder):
return (site.layout_folder.same_as(folder) or
site.layout_folder.is_ancestor_of(folder))
@property
def fragment(self):
return self.folder.get_fragment(self.site.layout_folder)
@property
def type(self):
return "layout"
class MediaNode(SiteNode):
@staticmethod
def is_media(site, folder):
return (site.media_folder.same_as(folder) or
site.media_folder.is_ancestor_of(folder))
@property
def fragment(self):
return self.folder.get_fragment(self.site.media_folder)
@property
def url(self):
return url.join(self.site.settings.SITE_ROOT,
url.fixslash(
self.folder.get_fragment(self.site.folder)))
@property
def type(self):
return "media"
@property
def target_folder(self):
deploy_folder = self.site.target_folder
return deploy_folder.child_folder_with_fragment(
Folder(self.site.media_folder.name).child(self.fragment))
@property
def temp_folder(self):
temp_folder = self.site.temp_folder
return temp_folder.child_folder_with_fragment(
Folder(self.site.media_folder.name).child(self.fragment))
class SiteInfo(SiteNode):
def __init__(self, settings, site_path):
super(SiteInfo, self).__init__(Folder(site_path))
self.settings = settings
self.m = None
self._stop = Event()
self.nodemap = {site_path:self}
self.resourcemap = {}
@property
def name(self):
return self.settings.SITE_NAME
@property
def content_node(self):
return self.nodemap[self.content_folder.path]
@property
def fragment(self):
return ""
@property
def media_node(self):
return self.nodemap[self.media_folder.path]
@property
def layout_node(self):
return self.nodemap[self.layout_folder.path]
@property
def content_folder(self):
return Folder(self.settings.CONTENT_DIR)
@property
def layout_folder(self):
return Folder(self.settings.LAYOUT_DIR)
@property
def media_folder(self):
return Folder(self.settings.MEDIA_DIR)
@property
def temp_folder(self):
return Folder(self.settings.TMP_DIR)
@property
def target_folder(self):
return Folder(self.settings.DEPLOY_DIR)
def child_added(self, node):
self.nodemap[node.folder.path] = node
def resource_added(self, resource):
self.resourcemap[resource.file.path] = resource
def resource_removed(self, resource):
if resource.file.path in self.resourcemap:
del self.resourcemap[resource.file.path]
def remove_node(self, node):
for node in node.walk():
if node.folder.path in self.nodemap:
del self.nodemap[node.folder.path]
for resource in node.walk_resources():
self.resource_removed(resource)
if node.parent and node in node.parent.children:
node.parent.children.remove(node)
def monitor(self, queue=None, waittime=1):
if self.m and self.m.isAlive():
raise "A monitor is currently running."
self._stop.clear()
self.m = Thread(target=self.__monitor_thread__,
kwargs={"waittime":waittime, "queue": queue})
self.m.start()
return self.m
def dont_monitor(self):
if not self.m or not self.m.isAlive():
return
self._stop.set()
self.m.join()
self._stop.clear()
def __monitor_thread__(self, queue, waittime):
while not self._stop.isSet():
try:
self.refresh(queue)
except:
if queue:
queue.put({"exception": True})
raise
if self._stop.isSet():
break
sleeper.sleep(waittime)
def find_and_add_resource(self, a_file):
resource = self.find_resource(a_file)
if resource:
return resource
node = self.find_and_add_node(a_file.parent)
return node.add_resource(a_file)
def find_and_add_node(self, folder):
node = self.find_node(folder)
if node:
return node
node = self.find_and_add_node(folder.parent)
return node.add_child(folder)
def refresh(self, queue=None):
site = self
# Have to poll for changes since there is no reliable way
# to get notification in a platform independent manner
class Visitor(object):
def visit_folder(self, folder):
return folder.allow(**site.settings.FILTER)
def visit_file(self, a_file):
if not a_file.allow(**site.settings.FILTER):
return
resource = site.find_resource(a_file)
change = None
if not resource:
resource = site.find_and_add_resource(a_file)
change = "Added"
elif resource.has_changes:
change = "Modified"
resource.last_known_modification_time = a_file.last_modified
if change:
if queue:
queue.put({
"change": change,
"resource": resource,
"exception": False
})
visitor = Visitor()
self.layout_folder.walk(visitor)
self.content_folder.walk(visitor)
self.media_folder.walk(visitor)
nodes_to_remove = []
for node in self.walk():
if not node.folder.exists:
queue.put({
"change":"NodeRemoved",
"node":node,
"exception": False
})
nodes_to_remove += [node]
for node in nodes_to_remove:
self.remove_node(node)
for resource in self.walk_resources():
if not resource.file.exists:
if queue:
queue.put({
"change":"Deleted",
"resource":resource,
"exception": False
})
resource.node.remove_resource(resource)
| 29.463934
| 81
| 0.580148
|
0612e84df528013af102ae9e3983a7cb1bf7f8fb
| 9,493
|
py
|
Python
|
flexure/flexure.py
|
landlab/flexure-component
|
3672faa181172e1be62e4431b04f57c9666411ad
|
[
"MIT"
] | null | null | null |
flexure/flexure.py
|
landlab/flexure-component
|
3672faa181172e1be62e4431b04f57c9666411ad
|
[
"MIT"
] | null | null | null |
flexure/flexure.py
|
landlab/flexure-component
|
3672faa181172e1be62e4431b04f57c9666411ad
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Deform the lithosphere with 1D or 2D flexure.
Landlab component that implements a 1 and 2D lithospheric flexure
model.
Examples
--------
Create a grid on which we will run the flexure calculations.
>>> from landlab import RasterModelGrid
>>> from landlab.components.flexure import Flexure
>>> grid = RasterModelGrid((5, 4), spacing=(1.e4, 1.e4))
Check the fields that are used as input to the flexure component.
>>> Flexure.input_var_names # doctest: +NORMALIZE_WHITESPACE
('lithosphere__overlying_pressure_increment',)
Check the units for the fields.
>>> Flexure.var_units('lithosphere__overlying_pressure_increment')
'Pa'
If you are not sure about one of the input or output variables, you can
get help for specific variables.
>>> Flexure.var_help('lithosphere__overlying_pressure_increment')
name: lithosphere__overlying_pressure_increment
description:
Applied pressure to the lithosphere over a time step
units: Pa
at: node
intent: in
>>> flex = Flexure(grid)
In creating the component, a field (initialized with zeros) was added to the
grid. Reset the interior nodes for the loading.
>>> dh = grid.at_node['lithosphere__overlying_pressure_increment']
>>> dh = dh.reshape(grid.shape)
>>> dh[1:-1, 1:-1] = flex.gamma_mantle
>>> flex.update()
>>> flex.output_var_names
('lithosphere_surface__elevation_increment',)
>>> flex.grid.at_node['lithosphere_surface__elevation_increment']
... # doctest: +NORMALIZE_WHITESPACE
array([ 0., 0., 0., 0.,
0., 1., 1., 0.,
0., 1., 1., 0.,
0., 1., 1., 0.,
0., 0., 0., 0.])
"""
import numpy as np
from landlab import Component
from landlab.utils.decorators import use_file_name_or_kwds
from .funcs import get_flexure_parameter
class Flexure(Component):
"""Deform the lithosphere with 1D or 2D flexure.
Landlab component that implements a 1 and 2D lithospheric flexure
model.
Construction::
Flexure(grid, eet=65e3, youngs=7e10, method='airy', rho_mantle=3300.,
gravity=9.80665)
Parameters
----------
grid : RasterModelGrid
A grid.
eet : float, optional
Effective elastic thickness (m).
youngs : float, optional
Young's modulus.
method : {'airy', 'flexure'}, optional
Method to use to calculate deflections.
rho_mantle : float, optional
Density of the mantle (kg / m^3).
gravity : float, optional
Acceleration due to gravity (m / s^2).
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.components.flexure import Flexure
>>> grid = RasterModelGrid((5, 4), spacing=(1.e4, 1.e4))
>>> flex = Flexure(grid)
>>> flex.name
'Flexure'
>>> flex.input_var_names
('lithosphere__overlying_pressure_increment',)
>>> flex.output_var_names
('lithosphere_surface__elevation_increment',)
>>> sorted(flex.units) # doctest: +NORMALIZE_WHITESPACE
[('lithosphere__overlying_pressure_increment', 'Pa'),
('lithosphere_surface__elevation_increment', 'm')]
>>> flex.grid.number_of_node_rows
5
>>> flex.grid.number_of_node_columns
4
>>> flex.grid is grid
True
>>> np.all(grid.at_node['lithosphere_surface__elevation_increment'] == 0.)
True
>>> np.all(grid.at_node['lithosphere__overlying_pressure_increment'] == 0.)
True
>>> flex.update()
>>> np.all(grid.at_node['lithosphere_surface__elevation_increment'] == 0.)
True
>>> load = grid.at_node['lithosphere__overlying_pressure_increment']
>>> load[4] = 1e9
>>> dz = grid.at_node['lithosphere_surface__elevation_increment']
>>> np.all(dz == 0.)
True
>>> flex.update()
>>> np.all(grid.at_node['lithosphere_surface__elevation_increment'] == 0.)
False
"""
_name = 'Flexure'
_input_var_names = (
'lithosphere__overlying_pressure_increment',
)
_output_var_names = (
'lithosphere_surface__elevation_increment',
)
_var_units = {
'lithosphere__overlying_pressure_increment': 'Pa',
'lithosphere_surface__elevation_increment': 'm',
}
_var_mapping = {
'lithosphere__overlying_pressure_increment': 'node',
'lithosphere_surface__elevation_increment': 'node',
}
_var_doc = {
'lithosphere__overlying_pressure_increment':
'Applied pressure to the lithosphere over a time step',
'lithosphere_surface__elevation_increment':
'The change in elevation of the top of the lithosphere (the land '
'surface) in one timestep',
}
@use_file_name_or_kwds
def __init__(self, grid, eet=65e3, youngs=7e10, method='airy',
rho_mantle=3300., gravity=9.80665, **kwds):
"""Initialize the flexure component.
Parameters
----------
grid : RasterModelGrid
A grid.
eet : float, optional
Effective elastic thickness (m).
youngs : float, optional
Young's modulus.
method : {'airy', 'flexure'}, optional
Method to use to calculate deflections.
rho_mantle : float, optional
Density of the mantle (kg / m^3).
gravity : float, optional
Acceleration due to gravity (m / s^2).
"""
if method not in ('airy', 'flexure'):
raise ValueError(
'{method}: method not understood'.format(method=method))
self._grid = grid
self._youngs = youngs
self._method = method
self._rho_mantle = rho_mantle
self._gravity = gravity
self.eet = eet
super(Flexure, self).__init__(grid, **kwds)
for name in self._input_var_names:
if name not in self.grid.at_node:
self.grid.add_zeros('node', name, units=self._var_units[name])
for name in self._output_var_names:
if name not in self.grid.at_node:
self.grid.add_zeros('node', name, units=self._var_units[name])
self._r = self._create_kei_func_grid(self._grid.shape,
(self.grid.dy, self.grid.dx),
self.alpha)
@property
def eet(self):
"""Effective elastic thickness (m)."""
return self._eet
@eet.setter
def eet(self, new_val):
if new_val <= 0:
raise ValueError('Effective elastic thickness must be positive.')
self._eet = new_val
self._r = self._create_kei_func_grid(self._grid.shape,
(self.grid.dy, self.grid.dx),
self.alpha)
@property
def youngs(self):
"""Young's modulus of lithosphere (Pa)."""
return self._youngs
@property
def rho_mantle(self):
"""Density of mantle (kg/m^3)."""
return self._rho_mantle
@property
def gamma_mantle(self):
"""Specific density of mantle (N/m^3)."""
return self._rho_mantle * self._gravity
@property
def gravity(self):
"""Acceleration due to gravity (m/s^2)."""
return self._gravity
@property
def method(self):
"""Name of method used to calculate deflections."""
return self._method
@property
def alpha(self):
"""Flexure parameter (m)."""
return get_flexure_parameter(self._eet, self._youngs, 2,
gamma_mantle=self.gamma_mantle)
@staticmethod
def _create_kei_func_grid(shape, spacing, alpha):
from scipy.special import kei
dx, dy = np.meshgrid(np.arange(shape[1]) * spacing[1],
np.arange(shape[0]) * spacing[0])
return kei(np.sqrt(dx ** 2 + dy ** 2) / alpha)
def update(self, n_procs=1):
"""Update fields with current loading conditions.
Parameters
----------
n_procs : int, optional
Number of processors to use for calculations.
"""
load = self.grid.at_node['lithosphere__overlying_pressure_increment']
deflection = self.grid.at_node['lithosphere_surface__elevation_increment']
new_load = load.copy()
deflection.fill(0.)
if self._method == 'airy':
deflection[:] = new_load / self.gamma_mantle
else:
self.subside_loads(new_load, deflection=deflection,
n_procs=n_procs)
def subside_loads(self, loads, deflection=None, n_procs=1):
"""Subside surface due to multiple loads.
Parameters
----------
loads : ndarray of float
Loads applied to each grid node.
deflection : ndarray of float, optional
Buffer to place resulting deflection values.
n_procs : int, optional
Number of processors to use for calculations.
Returns
-------
ndarray of float
Deflections caused by the loading.
"""
if deflection is None:
deflection = np.empty(self.shape, dtype=np.float)
from .cfuncs import subside_grid_in_parallel
w = deflection.reshape(self._grid.shape)
load = loads.reshape(self._grid.shape)
subside_grid_in_parallel(w, load * self._grid.dx * self._grid.dy,
self._r, self.alpha, self.gamma_mantle,
n_procs)
return deflection
| 29.946372
| 82
| 0.615717
|
08e52da7b0c290c43076d82d574d3fbb00407549
| 16,071
|
py
|
Python
|
code/models.py
|
taipahuchu/language-Identification-
|
68660bc110d374f0d8802b942792b15f8782e647
|
[
"Unlicense"
] | 13
|
2016-10-12T17:20:55.000Z
|
2021-07-13T10:19:23.000Z
|
code/models.py
|
taipahuchu/language-Identification-
|
68660bc110d374f0d8802b942792b15f8782e647
|
[
"Unlicense"
] | 2
|
2019-03-28T03:06:54.000Z
|
2019-04-04T20:54:43.000Z
|
code/models.py
|
taipahuchu/language-Identification-
|
68660bc110d374f0d8802b942792b15f8782e647
|
[
"Unlicense"
] | 4
|
2018-07-05T05:50:34.000Z
|
2019-09-01T20:21:50.000Z
|
import tensorflow as tf
import numpy as np
class BaseModel(object):
"""Holds code shared between all the different model variants."""
def __init__(self, batch_size, max_sequence_len, out_vocab_size, c2v,
dropout_keep_prob=0.0):
self._batch_size = batch_size
self._dropout_keep_prob = dropout_keep_prob
self._out_vocab_size = out_vocab_size
self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len],
name='x')
self.y = tf.placeholder(tf.float32, [batch_size, out_vocab_size],
name='y')
# The bidirectional rnn code requires seq_lens as int64
self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens')
self.example_weights = tf.placeholder(tf.float32, [batch_size],
name='example_weights')
embeddings = c2v.GetEmbeddings(self.x)
self._inputs = [tf.squeeze(input_, [1]) for input_ in
tf.split(1, max_sequence_len, embeddings)]
# Need to prepare a mask to zero out the padding symbols.
# Make a batch_size x max_sequence_len matrix where each
# row contains the length repeated max_sequence_len times.
lengths_transposed = tf.expand_dims(tf.to_int32(self.seq_lens), 1)
lengths_tiled = tf.tile(lengths_transposed, [1, max_sequence_len])
# Make a matrix where each row contains [0, 1, ..., max_sequence_len]
r = tf.range(0, max_sequence_len, 1)
range_row = tf.expand_dims(r, 0)
range_tiled = tf.tile(range_row, [batch_size, 1])
# Use the logical operations to create a mask
indicator = tf.less(range_tiled, lengths_tiled)
sz = [batch_size, max_sequence_len]
self._mask = tf.select(indicator, tf.ones(sz), tf.zeros(sz))
def _DoPredictions(self, in_size, mats, class_weights=None):
"""Takes in an array of states and calculates predictions.
Get the cross-entropy for each example in the vector self._xent.
Args:
in_size: size of the hidden state vectors
mats: list of hidden state vectors
"""
pred_mat = tf.get_variable('pred_mat',
[in_size, self._out_vocab_size])
pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size])
# Make a prediction on every word.
def GetWordPred(o_):
logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias)
return tf.nn.softmax(logits)
self.preds_by_word = tf.pack([GetWordPred(o_) for o_ in mats])
self.cs = self._mask / tf.reduce_sum(self._mask, 1, keep_dims=True)
# The final prediction is the average of the predictions for each word
# weighted by the individual confidence/utility scores.
preds_weighted = tf.mul(tf.reshape(tf.transpose(self.cs), [-1, 1]),
tf.reshape(self.preds_by_word,
[-1, self._out_vocab_size]))
preds_weighted_reshaped = tf.reshape(preds_weighted,
self.preds_by_word.get_shape())
self.probs = tf.reduce_sum(preds_weighted_reshaped, 0)
self._xent = _SafeXEnt(self.y, self.probs, class_weights=class_weights)
class WordAvgModel(BaseModel): #formerly SimpleModel
"""A bag of word /predictions/."""
def __init__(self, out_vocab_size=None,
batch_size=10,
model_params=None,
c2v=None,
max_sequence_len=None,
dropout_keep_prob=None,
weights=None):
super(WordAvgModel, self).__init__(batch_size, max_sequence_len,
out_vocab_size, c2v)
super(WordAvgModel, self)._DoPredictions(c2v.embedding_dims,
self._inputs)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
class WordSeqModel(BaseModel):
"""A bag of word embeddings."""
def __init__(self, out_vocab_size=None,
batch_size=10,
model_params=None,
c2v=None,
max_sequence_len=None,
dropout_keep_prob=None,
weights=None):
super(WordSeqModel, self).__init__(batch_size, max_sequence_len,
out_vocab_size, c2v)
in_size = self._inputs[0].get_shape()[1].value
# Also, output confidence scores at every word.
confidence_mat = tf.get_variable('confidence_mat', [in_size, 1])
confidence_scores = tf.concat(1, [tf.matmul(o_, confidence_mat)
for o_ in self._inputs])
# dropout on confidence_scores
random_tensor = (1.0 - self._dropout_keep_prob +
tf.random_uniform(tf.shape(confidence_scores)))
binary_tensor = -50.0 * tf.floor(random_tensor)
csshape = confidence_scores.get_shape()
self.cs = tf.nn.softmax(tf.constant(1.0, shape=csshape))
# The final prediction is the average of the predictions for each word
# weighted by the individual confidence/utility scores.
wvs = tf.pack(self._inputs)
wvs_weighted = tf.mul(tf.reshape(tf.transpose(self.cs), [-1, 1]),
tf.reshape(wvs, [-1, in_size]))
wvs_weighted_reshaped = tf.reshape(wvs_weighted, wvs.get_shape())
wvsum = tf.reduce_sum(wvs_weighted_reshaped,0)
pred_mat = tf.get_variable('pred_mat', [in_size, self._out_vocab_size])
pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size])
# Make a prediction for each tweet.
def GetWordPred(o_):
logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias)
return tf.nn.softmax(logits)
preds = GetWordPred(wvsum)
z = tf.tile(tf.reshape(tf.reduce_sum(preds,1),[-1,1]), [1, out_vocab_size])
self.preds, self.z = preds, z
self.probs = tf.div(preds, z) #normalize
self.unweighted_xent = _SafeXEnt(self.y, self.probs)
self._xent = _SafeXEnt(self.y, self.probs, class_weights=weights)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
class TweetSeqModel(BaseModel): #formerly SeqModel
"""Single layer LSTM on top of the word embeddings.
Lang id predictions are done on each word and then combined via
a weighted average.
"""
def __init__(self, out_vocab_size=None,
batch_size=10, model_params=None,
c2v=None,
max_sequence_len=None,
dropout_keep_prob=None,
weights=None):
"""Initialize the TweetSeqModel
Args:
out_vocab_size: how many languages we are predicting
batch_size: minibatch size
model_params: dictionary of other model parameters
c2v: char2vec class instance
max_sequence_len: length of all the input sequences
dropout_keep_prob: dropout probability indicator
weights: class weights
"""
hidden_size = model_params['model_hidden_size']
proj_size = model_params['model_proj_size'] # optional, can be None
super(TweetSeqModel, self).__init__(batch_size, max_sequence_len,
out_vocab_size, c2v,
dropout_keep_prob)
weights = tf.constant(weights, dtype=tf.float32, name='class_weights')
def GetCell():
"""Creates an LSTM cell with dropout."""
c = tf.nn.rnn_cell.LSTMCell(hidden_size,
use_peepholes=model_params['peepholes'],
num_proj=proj_size)
if dropout_keep_prob is not None:
c = tf.nn.rnn_cell.DropoutWrapper(c, input_keep_prob=dropout_keep_prob)
return c
# Create the bi-directional LSTM
with tf.variable_scope('wordrnn'):
with tf.variable_scope('fw'):
cell_fw = GetCell()
with tf.variable_scope('bw'):
cell_bw = GetCell()
rnnout, _, _ = tf.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs,
dtype=tf.float32,
sequence_length=self.seq_lens)
if proj_size:
out_size = 2 * proj_size
else:
out_size = 2 * hidden_size
super(TweetSeqModel, self)._DoPredictions(out_size, rnnout, class_weights=weights)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
class CharSeqModel(object): #formerly TweetSeqModel
"""
Treats each document (tweet) as a single "word," which is fed through c2v,
and the output "embedding" sized to be a vector of language predictions.
"""
def __init__(self, out_vocab_size=None,
batch_size=10, model_params=None, c2v=None,
max_sequence_len=None,
dropout_keep_prob=None,
weights=None):
self.params = model_params
self._out_vocab_size = out_vocab_size # num. of languages
self.weights = tf.constant(weights, dtype=tf.float32, name='class_weights')
with tf.variable_scope("tweetff"):
hidden = tf.get_variable("ff_hidden",
[c2v.embedding_dims, out_vocab_size])
bias = tf.get_variable('ff_bias', [out_vocab_size])
#probably useless. at least I don't want to use it
self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens')
self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len],
name='x')
self.y = tf.placeholder(tf.float32, [batch_size, out_vocab_size],
name='y')
self.example_weights = tf.placeholder(tf.float32, [batch_size],
name='example_weights')
# get one 'word' embedding for the full tweet
tweet_embedding = c2v.GetEmbeddings(self.x)[:,1,:]
logits = tf.nn.xw_plus_b(tweet_embedding, hidden, bias)
self.probs = tf.nn.softmax(logits)
self._xent = tf.nn.softmax_cross_entropy_with_logits(logits, self.y)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
class WordLevelModel(object):
"""
Model to evaluate on word-level predictions
Args:
batch_size: minibatch size
model_params: dictionary of other model parameters
c2v: char2vec class instance
max_sequence_len: length of all the input/output sequences
out_vocab_size: how many languages we are predicting
dropout_keep_prob: dropout probability indicator
weights: class weights
"""
def __init__(self, batch_size, model_params, c2v, max_sequence_len,
out_vocab_size, dropout_keep_prob=0.0, weights=None):
self._batch_size = batch_size
self._dropout_keep_prob = dropout_keep_prob
self._out_vocab_size = out_vocab_size
self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len],
name='x')
self.y = tf.placeholder(tf.float32,
[batch_size, max_sequence_len, out_vocab_size],
name='y')
# The bidirectional rnn code requires seq_lens as int64
self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens')
self.example_weights = tf.placeholder(tf.float32, [batch_size],
name='example_weights')
embeddings = c2v.GetEmbeddings(self.x)
self._inputs = [tf.squeeze(input_, [1]) for input_ in
tf.split(1, max_sequence_len, embeddings)]
# Need to prepare a mask to zero out the padding symbols.
# Make a batch_size x max_sequence_len matrix where each
# row contains the length repeated max_sequence_len times.
lengths_transposed = tf.expand_dims(tf.to_int32(self.seq_lens), 1)
lengths_tiled = tf.tile(lengths_transposed, [1, max_sequence_len])
# Make a matrix where each row contains [0, 1, ..., max_sequence_len]
r = tf.range(0, max_sequence_len, 1)
range_row = tf.expand_dims(r, 0)
range_tiled = tf.tile(range_row, [batch_size, 1])
self.lengths_transposed = lengths_transposed
self.lengths_tiled = lengths_tiled
self.range_row = range_row
self.range_tiled = range_tiled
# Use the logical operations to create a mask
indicator = tf.less(range_tiled, lengths_tiled+1) #i.e. where seq len is less than index
trim = np.ones(indicator.get_shape())
trim[:,0] = 0 #ignore start symbol
indicator = tf.logical_and(indicator, trim.astype(bool))
self.indicator = indicator
sz = [batch_size, max_sequence_len]
self._mask = tf.select(indicator, tf.ones(sz), tf.zeros(sz))
#-------------------------------#
self.weights = tf.constant(weights, dtype=tf.float32, name='class_weights')
hidden_size = model_params['model_hidden_size']
proj_size = model_params['model_proj_size'] # optional, can be None
def GetCell():
"""Creates an LSTM cell with dropout."""
c = tf.nn.rnn_cell.LSTMCell(hidden_size,
use_peepholes=model_params['peepholes'],
num_proj=proj_size)
if dropout_keep_prob is not None:
c = tf.nn.rnn_cell.DropoutWrapper(c, input_keep_prob=dropout_keep_prob)
return c
# Create the bi-directional LSTM
with tf.variable_scope('wordrnn'):
with tf.variable_scope('fw'):
cell_fw = GetCell()
with tf.variable_scope('bw'):
cell_bw = GetCell()
rnnout, _, _ = tf.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs,
dtype=tf.float32,
sequence_length=self.seq_lens)
if proj_size:
out_size = 2 * proj_size
else:
out_size = 2 * hidden_size
self._DoPredictions(out_size, rnnout, self.weights)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
def _DoPredictions(self, in_size, mats, class_weights=None):
"""Takes in an array of states and calculates predictions.
Get the cross-entropy for each example in the vector self._xent.
Args:
in_size: size of the hidden state vectors
mats: list of hidden state vectors
"""
pred_mat = tf.get_variable('pred_mat',
[in_size, self._out_vocab_size])
pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size])
# Make a prediction on every word.
def GetWordPred(o_):
logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias)
return tf.nn.softmax(logits)
#self.preds_by_word1 = tf.pack([GetWordPred(o_) for o_ in mats])
#self.preds_by_word = tf.reshape(self.preds_by_word1, self.y.get_shape())
#self.probs = tf.mul(tf.expand_dims(self._mask,2), self.preds_by_word)
self.preds_by_word = tf.pack([GetWordPred(o_) for o_ in mats])
self.preds_by_instance = tf.pack([self.preds_by_word[:,i,:] for i in range(self.preds_by_word.get_shape()[1])])
self.probs = tf.mul(tf.expand_dims(self._mask,2), self.preds_by_instance)
self._xent = _SafeXEnt(self.y, self.probs, class_weights=class_weights, sumd=[1,2])
def _SafeXEnt(y, probs, eps=0.0001, class_weights=None, sumd=[1]):
"""Version of cross entropy loss that should not produce NaNs.
If the predicted proability for the true class is near zero then when
taking the log it can produce a NaN, which ruins everything. This
function ensures each probability is at least eps and no more than one
before taking the log.
Args:
y: matrix of true probabilities same size as probs
probs: matrix of probabilities for the minibatch
eps: value to clip the probabilities at
class_weights: vector of relative weights to be assigned to each class
sumd: dimensions along which to sum the x-ent matrix
Returns:
cross entropy loss for each example in the minibatch
"""
adjusted_probs = tf.clip_by_value(probs, eps, 1.0 - eps)
xent_mat = -y * tf.log(adjusted_probs)
if class_weights is not None:
xent_mat *= class_weights
return tf.reduce_sum(xent_mat, sumd)
def _SafeNegEntropy(probs, batch_size, eps=0.0001):
"""Computes negative entropy in a way that will not overflow."""
adjusted_probs = tf.clip_by_value(probs, eps, 1.0 - eps)
entropy = tf.mul(probs, tf.log(adjusted_probs))
return tf.reduce_sum(entropy) / batch_size
| 39.681481
| 115
| 0.65136
|
72178f3068bc85d4bc5185a5ed749e4877532ce9
| 165,362
|
py
|
Python
|
redis/client.py
|
malinaa96/redis-py
|
435759aa008c12c379130636561ec854ad961390
|
[
"MIT"
] | null | null | null |
redis/client.py
|
malinaa96/redis-py
|
435759aa008c12c379130636561ec854ad961390
|
[
"MIT"
] | null | null | null |
redis/client.py
|
malinaa96/redis-py
|
435759aa008c12c379130636561ec854ad961390
|
[
"MIT"
] | null | null | null |
from itertools import chain
import datetime
import warnings
import time
import threading
import time as mod_time
import re
import hashlib
from redis.connection import (ConnectionPool, UnixDomainSocketConnection,
SSLConnection)
from redis.lock import Lock
from redis.exceptions import (
ConnectionError,
DataError,
ExecAbortError,
NoScriptError,
PubSubError,
RedisError,
ResponseError,
TimeoutError,
WatchError,
ModuleError,
)
from redis.utils import safe_str, str_if_bytes
SYM_EMPTY = b''
EMPTY_RESPONSE = 'EMPTY_RESPONSE'
def list_or_args(keys, args):
# returns a single new list combining keys and args
try:
iter(keys)
# a string or bytes instance can be iterated, but indicates
# keys wasn't passed as a list
if isinstance(keys, (bytes, str)):
keys = [keys]
else:
keys = list(keys)
except TypeError:
keys = [keys]
if args:
keys.extend(args)
return keys
def timestamp_to_datetime(response):
"Converts a unix timestamp to a Python datetime object"
if not response:
return None
try:
response = int(response)
except ValueError:
return None
return datetime.datetime.fromtimestamp(response)
def string_keys_to_dict(key_string, callback):
return dict.fromkeys(key_string.split(), callback)
class CaseInsensitiveDict(dict):
"Case insensitive dict implementation. Assumes string keys only."
def __init__(self, data):
for k, v in data.items():
self[k.upper()] = v
def __contains__(self, k):
return super().__contains__(k.upper())
def __delitem__(self, k):
super().__delitem__(k.upper())
def __getitem__(self, k):
return super().__getitem__(k.upper())
def get(self, k, default=None):
return super().get(k.upper(), default)
def __setitem__(self, k, v):
super().__setitem__(k.upper(), v)
def update(self, data):
data = CaseInsensitiveDict(data)
super().update(data)
def parse_debug_object(response):
"Parse the results of Redis's DEBUG OBJECT command into a Python dict"
# The 'type' of the object is the first item in the response, but isn't
# prefixed with a name
response = str_if_bytes(response)
response = 'type:' + response
response = dict(kv.split(':') for kv in response.split())
# parse some expected int values from the string response
# note: this cmd isn't spec'd so these may not appear in all redis versions
int_fields = ('refcount', 'serializedlength', 'lru', 'lru_seconds_idle')
for field in int_fields:
if field in response:
response[field] = int(response[field])
return response
def parse_object(response, infotype):
"Parse the results of an OBJECT command"
if infotype in ('idletime', 'refcount'):
return int_or_none(response)
return response
def parse_info(response):
"Parse the result of Redis's INFO command into a Python dict"
info = {}
response = str_if_bytes(response)
def get_value(value):
if ',' not in value or '=' not in value:
try:
if '.' in value:
return float(value)
else:
return int(value)
except ValueError:
return value
else:
sub_dict = {}
for item in value.split(','):
k, v = item.rsplit('=', 1)
sub_dict[k] = get_value(v)
return sub_dict
for line in response.splitlines():
if line and not line.startswith('#'):
if line.find(':') != -1:
# Split, the info fields keys and values.
# Note that the value may contain ':'. but the 'host:'
# pseudo-command is the only case where the key contains ':'
key, value = line.split(':', 1)
if key == 'cmdstat_host':
key, value = line.rsplit(':', 1)
if key == 'module':
# Hardcode a list for key 'modules' since there could be
# multiple lines that started with 'module'
info.setdefault('modules', []).append(get_value(value))
else:
info[key] = get_value(value)
else:
# if the line isn't splittable, append it to the "__raw__" key
info.setdefault('__raw__', []).append(line)
return info
def parse_memory_stats(response, **kwargs):
"Parse the results of MEMORY STATS"
stats = pairs_to_dict(response,
decode_keys=True,
decode_string_values=True)
for key, value in stats.items():
if key.startswith('db.'):
stats[key] = pairs_to_dict(value,
decode_keys=True,
decode_string_values=True)
return stats
SENTINEL_STATE_TYPES = {
'can-failover-its-master': int,
'config-epoch': int,
'down-after-milliseconds': int,
'failover-timeout': int,
'info-refresh': int,
'last-hello-message': int,
'last-ok-ping-reply': int,
'last-ping-reply': int,
'last-ping-sent': int,
'master-link-down-time': int,
'master-port': int,
'num-other-sentinels': int,
'num-slaves': int,
'o-down-time': int,
'pending-commands': int,
'parallel-syncs': int,
'port': int,
'quorum': int,
'role-reported-time': int,
's-down-time': int,
'slave-priority': int,
'slave-repl-offset': int,
'voted-leader-epoch': int
}
def parse_sentinel_state(item):
result = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES)
flags = set(result['flags'].split(','))
for name, flag in (('is_master', 'master'), ('is_slave', 'slave'),
('is_sdown', 's_down'), ('is_odown', 'o_down'),
('is_sentinel', 'sentinel'),
('is_disconnected', 'disconnected'),
('is_master_down', 'master_down')):
result[name] = flag in flags
return result
def parse_sentinel_master(response):
return parse_sentinel_state(map(str_if_bytes, response))
def parse_sentinel_masters(response):
result = {}
for item in response:
state = parse_sentinel_state(map(str_if_bytes, item))
result[state['name']] = state
return result
def parse_sentinel_slaves_and_sentinels(response):
return [parse_sentinel_state(map(str_if_bytes, item)) for item in response]
def parse_sentinel_get_master(response):
return response and (response[0], int(response[1])) or None
def pairs_to_dict(response, decode_keys=False, decode_string_values=False):
"Create a dict given a list of key/value pairs"
if response is None:
return {}
if decode_keys or decode_string_values:
# the iter form is faster, but I don't know how to make that work
# with a str_if_bytes() map
keys = response[::2]
if decode_keys:
keys = map(str_if_bytes, keys)
values = response[1::2]
if decode_string_values:
values = map(str_if_bytes, values)
return dict(zip(keys, values))
else:
it = iter(response)
return dict(zip(it, it))
def pairs_to_dict_typed(response, type_info):
it = iter(response)
result = {}
for key, value in zip(it, it):
if key in type_info:
try:
value = type_info[key](value)
except Exception:
# if for some reason the value can't be coerced, just use
# the string value
pass
result[key] = value
return result
def zset_score_pairs(response, **options):
"""
If ``withscores`` is specified in the options, return the response as
a list of (value, score) pairs
"""
if not response or not options.get('withscores'):
return response
score_cast_func = options.get('score_cast_func', float)
it = iter(response)
return list(zip(it, map(score_cast_func, it)))
def sort_return_tuples(response, **options):
"""
If ``groups`` is specified, return the response as a list of
n-element tuples with n being the value found in options['groups']
"""
if not response or not options.get('groups'):
return response
n = options['groups']
return list(zip(*[response[i::n] for i in range(n)]))
def int_or_none(response):
if response is None:
return None
return int(response)
def parse_stream_list(response):
if response is None:
return None
data = []
for r in response:
if r is not None:
data.append((r[0], pairs_to_dict(r[1])))
else:
data.append((None, None))
return data
def pairs_to_dict_with_str_keys(response):
return pairs_to_dict(response, decode_keys=True)
def parse_list_of_dicts(response):
return list(map(pairs_to_dict_with_str_keys, response))
def parse_xclaim(response, **options):
if options.get('parse_justid', False):
return response
return parse_stream_list(response)
def parse_xinfo_stream(response):
data = pairs_to_dict(response, decode_keys=True)
first = data['first-entry']
if first is not None:
data['first-entry'] = (first[0], pairs_to_dict(first[1]))
last = data['last-entry']
if last is not None:
data['last-entry'] = (last[0], pairs_to_dict(last[1]))
return data
def parse_xread(response):
if response is None:
return []
return [[r[0], parse_stream_list(r[1])] for r in response]
def parse_xpending(response, **options):
if options.get('parse_detail', False):
return parse_xpending_range(response)
consumers = [{'name': n, 'pending': int(p)} for n, p in response[3] or []]
return {
'pending': response[0],
'min': response[1],
'max': response[2],
'consumers': consumers
}
def parse_xpending_range(response):
k = ('message_id', 'consumer', 'time_since_delivered', 'times_delivered')
return [dict(zip(k, r)) for r in response]
def float_or_none(response):
if response is None:
return None
return float(response)
def bool_ok(response):
return str_if_bytes(response) == 'OK'
def parse_zadd(response, **options):
if response is None:
return None
if options.get('as_score'):
return float(response)
return int(response)
def parse_client_list(response, **options):
clients = []
for c in str_if_bytes(response).splitlines():
# Values might contain '='
clients.append(dict(pair.split('=', 1) for pair in c.split(' ')))
return clients
def parse_config_get(response, **options):
response = [str_if_bytes(i) if i is not None else None for i in response]
return response and pairs_to_dict(response) or {}
def parse_scan(response, **options):
cursor, r = response
return int(cursor), r
def parse_hscan(response, **options):
cursor, r = response
return int(cursor), r and pairs_to_dict(r) or {}
def parse_zscan(response, **options):
score_cast_func = options.get('score_cast_func', float)
cursor, r = response
it = iter(r)
return int(cursor), list(zip(it, map(score_cast_func, it)))
def parse_slowlog_get(response, **options):
space = ' ' if options.get('decode_responses', False) else b' '
return [{
'id': item[0],
'start_time': int(item[1]),
'duration': int(item[2]),
'command':
# Redis Enterprise injects another entry at index [3], which has
# the complexity info (i.e. the value N in case the command has
# an O(N) complexity) instead of the command.
space.join(item[3]) if isinstance(item[3], list) else
space.join(item[4])
} for item in response]
def parse_cluster_info(response, **options):
response = str_if_bytes(response)
return dict(line.split(':') for line in response.splitlines() if line)
def _parse_node_line(line):
line_items = line.split(' ')
node_id, addr, flags, master_id, ping, pong, epoch, \
connected = line.split(' ')[:8]
slots = [sl.split('-') for sl in line_items[8:]]
node_dict = {
'node_id': node_id,
'flags': flags,
'master_id': master_id,
'last_ping_sent': ping,
'last_pong_rcvd': pong,
'epoch': epoch,
'slots': slots,
'connected': True if connected == 'connected' else False
}
return addr, node_dict
def parse_cluster_nodes(response, **options):
raw_lines = str_if_bytes(response).splitlines()
return dict(_parse_node_line(line) for line in raw_lines)
def parse_georadius_generic(response, **options):
if options['store'] or options['store_dist']:
# `store` and `store_diff` cant be combined
# with other command arguments.
return response
if type(response) != list:
response_list = [response]
else:
response_list = response
if not options['withdist'] and not options['withcoord']\
and not options['withhash']:
# just a bunch of places
return response_list
cast = {
'withdist': float,
'withcoord': lambda ll: (float(ll[0]), float(ll[1])),
'withhash': int
}
# zip all output results with each casting functino to get
# the properly native Python value.
f = [lambda x: x]
f += [cast[o] for o in ['withdist', 'withhash', 'withcoord'] if options[o]]
return [
list(map(lambda fv: fv[0](fv[1]), zip(f, r))) for r in response_list
]
def parse_pubsub_numsub(response, **options):
return list(zip(response[0::2], response[1::2]))
def parse_client_kill(response, **options):
if isinstance(response, int):
return response
return str_if_bytes(response) == 'OK'
def parse_acl_getuser(response, **options):
if response is None:
return None
data = pairs_to_dict(response, decode_keys=True)
# convert everything but user-defined data in 'keys' to native strings
data['flags'] = list(map(str_if_bytes, data['flags']))
data['passwords'] = list(map(str_if_bytes, data['passwords']))
data['commands'] = str_if_bytes(data['commands'])
# split 'commands' into separate 'categories' and 'commands' lists
commands, categories = [], []
for command in data['commands'].split(' '):
if '@' in command:
categories.append(command)
else:
commands.append(command)
data['commands'] = commands
data['categories'] = categories
data['enabled'] = 'on' in data['flags']
return data
def parse_acl_log(response, **options):
if response is None:
return None
if isinstance(response, list):
data = []
for log in response:
log_data = pairs_to_dict(log, True, True)
client_info = log_data.get('client-info', '')
log_data["client-info"] = parse_client_info(client_info)
# float() is lossy comparing to the "double" in C
log_data["age-seconds"] = float(log_data["age-seconds"])
data.append(log_data)
else:
data = bool_ok(response)
return data
def parse_client_info(value):
"""
Parsing client-info in ACL Log in following format.
"key1=value1 key2=value2 key3=value3"
"""
client_info = {}
infos = value.split(" ")
for info in infos:
key, value = info.split("=")
client_info[key] = value
# Those fields are definded as int in networking.c
for int_key in {"id", "age", "idle", "db", "sub", "psub",
"multi", "qbuf", "qbuf-free", "obl",
"oll", "omem"}:
client_info[int_key] = int(client_info[int_key])
return client_info
def parse_module_result(response):
if isinstance(response, ModuleError):
raise response
return True
class Redis:
"""
Implementation of the Redis protocol.
This abstract class provides a Python interface to all Redis commands
and an implementation of the Redis protocol.
Connection and Pipeline derive from this, implementing how
the commands are sent and received to the Redis server
"""
RESPONSE_CALLBACKS = {
**string_keys_to_dict(
'AUTH COPY EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST '
'PSETEX RENAMENX SISMEMBER SMOVE SETEX SETNX',
bool
),
**string_keys_to_dict(
'BITCOUNT BITPOS DECRBY DEL EXISTS GEOADD GETBIT HDEL HLEN '
'HSTRLEN INCRBY LINSERT LLEN LPUSHX PFADD PFCOUNT RPUSHX SADD '
'SCARD SDIFFSTORE SETBIT SETRANGE SINTERSTORE SREM STRLEN '
'SUNIONSTORE UNLINK XACK XDEL XLEN XTRIM ZCARD ZLEXCOUNT ZREM '
'ZREMRANGEBYLEX ZREMRANGEBYRANK ZREMRANGEBYSCORE',
int
),
**string_keys_to_dict(
'INCRBYFLOAT HINCRBYFLOAT',
float
),
**string_keys_to_dict(
# these return OK, or int if redis-server is >=1.3.4
'LPUSH RPUSH',
lambda r: isinstance(r, int) and r or str_if_bytes(r) == 'OK'
),
**string_keys_to_dict('SORT', sort_return_tuples),
**string_keys_to_dict('ZSCORE ZINCRBY GEODIST', float_or_none),
**string_keys_to_dict(
'FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE READONLY READWRITE '
'RENAME SAVE SELECT SHUTDOWN SLAVEOF SWAPDB WATCH UNWATCH ',
bool_ok
),
**string_keys_to_dict('BLPOP BRPOP', lambda r: r and tuple(r) or None),
**string_keys_to_dict(
'SDIFF SINTER SMEMBERS SUNION',
lambda r: r and set(r) or set()
),
**string_keys_to_dict(
'ZPOPMAX ZPOPMIN ZRANGE ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE',
zset_score_pairs
),
**string_keys_to_dict('BZPOPMIN BZPOPMAX', \
lambda r:
r and (r[0], r[1], float(r[2])) or None),
**string_keys_to_dict('ZRANK ZREVRANK', int_or_none),
**string_keys_to_dict('XREVRANGE XRANGE', parse_stream_list),
**string_keys_to_dict('XREAD XREADGROUP', parse_xread),
**string_keys_to_dict('BGREWRITEAOF BGSAVE', lambda r: True),
'ACL CAT': lambda r: list(map(str_if_bytes, r)),
'ACL DELUSER': int,
'ACL GENPASS': str_if_bytes,
'ACL GETUSER': parse_acl_getuser,
'ACL LIST': lambda r: list(map(str_if_bytes, r)),
'ACL LOAD': bool_ok,
'ACL LOG': parse_acl_log,
'ACL SAVE': bool_ok,
'ACL SETUSER': bool_ok,
'ACL USERS': lambda r: list(map(str_if_bytes, r)),
'ACL WHOAMI': str_if_bytes,
'CLIENT GETNAME': str_if_bytes,
'CLIENT ID': int,
'CLIENT KILL': parse_client_kill,
'CLIENT LIST': parse_client_list,
'CLIENT SETNAME': bool_ok,
'CLIENT UNBLOCK': lambda r: r and int(r) == 1 or False,
'CLIENT PAUSE': bool_ok,
'CLUSTER ADDSLOTS': bool_ok,
'CLUSTER COUNT-FAILURE-REPORTS': lambda x: int(x),
'CLUSTER COUNTKEYSINSLOT': lambda x: int(x),
'CLUSTER DELSLOTS': bool_ok,
'CLUSTER FAILOVER': bool_ok,
'CLUSTER FORGET': bool_ok,
'CLUSTER INFO': parse_cluster_info,
'CLUSTER KEYSLOT': lambda x: int(x),
'CLUSTER MEET': bool_ok,
'CLUSTER NODES': parse_cluster_nodes,
'CLUSTER REPLICATE': bool_ok,
'CLUSTER RESET': bool_ok,
'CLUSTER SAVECONFIG': bool_ok,
'CLUSTER SET-CONFIG-EPOCH': bool_ok,
'CLUSTER SETSLOT': bool_ok,
'CLUSTER SLAVES': parse_cluster_nodes,
'CONFIG GET': parse_config_get,
'CONFIG RESETSTAT': bool_ok,
'CONFIG SET': bool_ok,
'DEBUG OBJECT': parse_debug_object,
'GEOHASH': lambda r: list(map(str_if_bytes, r)),
'GEOPOS': lambda r: list(map(lambda ll: (float(ll[0]),
float(ll[1]))
if ll is not None else None, r)),
'GEORADIUS': parse_georadius_generic,
'GEORADIUSBYMEMBER': parse_georadius_generic,
'HGETALL': lambda r: r and pairs_to_dict(r) or {},
'HSCAN': parse_hscan,
'INFO': parse_info,
'LASTSAVE': timestamp_to_datetime,
'MEMORY PURGE': bool_ok,
'MEMORY STATS': parse_memory_stats,
'MEMORY USAGE': int_or_none,
'MODULE LOAD': parse_module_result,
'MODULE UNLOAD': parse_module_result,
'MODULE LIST': lambda r: [pairs_to_dict(m) for m in r],
'OBJECT': parse_object,
'PING': lambda r: str_if_bytes(r) == 'PONG',
'PUBSUB NUMSUB': parse_pubsub_numsub,
'RANDOMKEY': lambda r: r and r or None,
'SCAN': parse_scan,
'SCRIPT EXISTS': lambda r: list(map(bool, r)),
'SCRIPT FLUSH': bool_ok,
'SCRIPT KILL': bool_ok,
'SCRIPT LOAD': str_if_bytes,
'SENTINEL GET-MASTER-ADDR-BY-NAME': parse_sentinel_get_master,
'SENTINEL MASTER': parse_sentinel_master,
'SENTINEL MASTERS': parse_sentinel_masters,
'SENTINEL MONITOR': bool_ok,
'SENTINEL REMOVE': bool_ok,
'SENTINEL SENTINELS': parse_sentinel_slaves_and_sentinels,
'SENTINEL SET': bool_ok,
'SENTINEL SLAVES': parse_sentinel_slaves_and_sentinels,
'SET': lambda r: r and str_if_bytes(r) == 'OK',
'SLOWLOG GET': parse_slowlog_get,
'SLOWLOG LEN': int,
'SLOWLOG RESET': bool_ok,
'SSCAN': parse_scan,
'TIME': lambda x: (int(x[0]), int(x[1])),
'XCLAIM': parse_xclaim,
'XGROUP CREATE': bool_ok,
'XGROUP DELCONSUMER': int,
'XGROUP DESTROY': bool,
'XGROUP SETID': bool_ok,
'XINFO CONSUMERS': parse_list_of_dicts,
'XINFO GROUPS': parse_list_of_dicts,
'XINFO STREAM': parse_xinfo_stream,
'XPENDING': parse_xpending,
'ZADD': parse_zadd,
'ZSCAN': parse_zscan,
}
@classmethod
def from_url(cls, url, **kwargs):
"""
Return a Redis client object configured from the given URL
For example::
redis://[[username]:[password]]@localhost:6379/0
rediss://[[username]:[password]]@localhost:6379/0
unix://[[username]:[password]]@/path/to/socket.sock?db=0
Three URL schemes are supported:
- `redis://` creates a TCP socket connection. See more at:
<https://www.iana.org/assignments/uri-schemes/prov/redis>
- `rediss://` creates a SSL wrapped TCP socket connection. See more at:
<https://www.iana.org/assignments/uri-schemes/prov/rediss>
- ``unix://``: creates a Unix Domain Socket connection.
The username, password, hostname, path and all querystring values
are passed through urllib.parse.unquote in order to replace any
percent-encoded values with their corresponding characters.
There are several ways to specify a database number. The first value
found will be used:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// or rediss:// schemes, the path argument
of the url, e.g. redis://localhost/0
3. A ``db`` keyword argument to this function.
If none of these options are specified, the default db=0 is used.
All querystring options are cast to their appropriate Python types.
Boolean arguments can be specified with string values "True"/"False"
or "Yes"/"No". Values that cannot be properly cast cause a
``ValueError`` to be raised. Once parsed, the querystring arguments
and keyword arguments are passed to the ``ConnectionPool``'s
class initializer. In the case of conflicting arguments, querystring
arguments always win.
"""
connection_pool = ConnectionPool.from_url(url, **kwargs)
return cls(connection_pool=connection_pool)
def __init__(self, host='localhost', port=6379,
db=0, password=None, socket_timeout=None,
socket_connect_timeout=None,
socket_keepalive=None, socket_keepalive_options=None,
connection_pool=None, unix_socket_path=None,
encoding='utf-8', encoding_errors='strict',
charset=None, errors=None,
decode_responses=False, retry_on_timeout=False,
ssl=False, ssl_keyfile=None, ssl_certfile=None,
ssl_cert_reqs='required', ssl_ca_certs=None,
ssl_check_hostname=False,
max_connections=None, single_connection_client=False,
health_check_interval=0, client_name=None, username=None):
if not connection_pool:
if charset is not None:
warnings.warn(DeprecationWarning(
'"charset" is deprecated. Use "encoding" instead'))
encoding = charset
if errors is not None:
warnings.warn(DeprecationWarning(
'"errors" is deprecated. Use "encoding_errors" instead'))
encoding_errors = errors
kwargs = {
'db': db,
'username': username,
'password': password,
'socket_timeout': socket_timeout,
'encoding': encoding,
'encoding_errors': encoding_errors,
'decode_responses': decode_responses,
'retry_on_timeout': retry_on_timeout,
'max_connections': max_connections,
'health_check_interval': health_check_interval,
'client_name': client_name
}
# based on input, setup appropriate connection args
if unix_socket_path is not None:
kwargs.update({
'path': unix_socket_path,
'connection_class': UnixDomainSocketConnection
})
else:
# TCP specific options
kwargs.update({
'host': host,
'port': port,
'socket_connect_timeout': socket_connect_timeout,
'socket_keepalive': socket_keepalive,
'socket_keepalive_options': socket_keepalive_options,
})
if ssl:
kwargs.update({
'connection_class': SSLConnection,
'ssl_keyfile': ssl_keyfile,
'ssl_certfile': ssl_certfile,
'ssl_cert_reqs': ssl_cert_reqs,
'ssl_ca_certs': ssl_ca_certs,
'ssl_check_hostname': ssl_check_hostname,
})
connection_pool = ConnectionPool(**kwargs)
self.connection_pool = connection_pool
self.connection = None
if single_connection_client:
self.connection = self.connection_pool.get_connection('_')
self.response_callbacks = CaseInsensitiveDict(
self.__class__.RESPONSE_CALLBACKS)
def __repr__(self):
return "%s<%s>" % (type(self).__name__, repr(self.connection_pool))
def set_response_callback(self, command, callback):
"Set a custom Response Callback"
self.response_callbacks[command] = callback
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
"""
return Pipeline(
self.connection_pool,
self.response_callbacks,
transaction,
shard_hint)
def transaction(self, func, *watches, **kwargs):
"""
Convenience method for executing the callable `func` as a transaction
while watching all keys specified in `watches`. The 'func' callable
should expect a single argument which is a Pipeline object.
"""
shard_hint = kwargs.pop('shard_hint', None)
value_from_callable = kwargs.pop('value_from_callable', False)
watch_delay = kwargs.pop('watch_delay', None)
with self.pipeline(True, shard_hint) as pipe:
while True:
try:
if watches:
pipe.watch(*watches)
func_value = func(pipe)
exec_value = pipe.execute()
return func_value if value_from_callable else exec_value
except WatchError:
if watch_delay is not None and watch_delay > 0:
time.sleep(watch_delay)
continue
def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None,
lock_class=None, thread_local=True):
"""
Return a new Lock object using key ``name`` that mimics
the behavior of threading.Lock.
If specified, ``timeout`` indicates a maximum life for the lock.
By default, it will remain locked until release() is called.
``sleep`` indicates the amount of time to sleep per loop iteration
when the lock is in blocking mode and another client is currently
holding the lock.
``blocking_timeout`` indicates the maximum amount of time in seconds to
spend trying to acquire the lock. A value of ``None`` indicates
continue trying forever. ``blocking_timeout`` can be specified as a
float or integer, both representing the number of seconds to wait.
``lock_class`` forces the specified lock implementation.
``thread_local`` indicates whether the lock token is placed in
thread-local storage. By default, the token is placed in thread local
storage so that a thread only sees its token, not a token set by
another thread. Consider the following timeline:
time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
thread-1 sets the token to "abc"
time: 1, thread-2 blocks trying to acquire `my-lock` using the
Lock instance.
time: 5, thread-1 has not yet completed. redis expires the lock
key.
time: 5, thread-2 acquired `my-lock` now that it's available.
thread-2 sets the token to "xyz"
time: 6, thread-1 finishes its work and calls release(). if the
token is *not* stored in thread local storage, then
thread-1 would see the token value as "xyz" and would be
able to successfully release the thread-2's lock.
In some use cases it's necessary to disable thread local storage. For
example, if you have code where one thread acquires a lock and passes
that lock instance to a worker thread to release later. If thread
local storage isn't disabled in this case, the worker thread won't see
the token set by the thread that acquired the lock. Our assumption
is that these cases aren't common and as such default to using
thread local storage. """
if lock_class is None:
lock_class = Lock
return lock_class(self, name, timeout=timeout, sleep=sleep,
blocking_timeout=blocking_timeout,
thread_local=thread_local)
def pubsub(self, **kwargs):
"""
Return a Publish/Subscribe object. With this object, you can
subscribe to channels and listen for messages that get published to
them.
"""
return PubSub(self.connection_pool, **kwargs)
def monitor(self):
return Monitor(self.connection_pool)
def client(self):
return self.__class__(connection_pool=self.connection_pool,
single_connection_client=True)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __del__(self):
self.close()
def close(self):
conn = self.connection
if conn:
self.connection = None
self.connection_pool.release(conn)
# COMMAND EXECUTION AND PROTOCOL PARSING
def execute_command(self, *args, **options):
"Execute a command and return a parsed response"
pool = self.connection_pool
command_name = args[0]
conn = self.connection or pool.get_connection(command_name, **options)
try:
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
except (ConnectionError, TimeoutError) as e:
conn.disconnect()
if not (conn.retry_on_timeout and isinstance(e, TimeoutError)):
raise
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
finally:
if not self.connection:
pool.release(conn)
def parse_response(self, connection, command_name, **options):
"Parses a response from the Redis server"
try:
response = connection.read_response()
except ResponseError:
if EMPTY_RESPONSE in options:
return options[EMPTY_RESPONSE]
raise
if command_name in self.response_callbacks:
return self.response_callbacks[command_name](response, **options)
return response
# SERVER INFORMATION
# ACL methods
def acl_cat(self, category=None):
"""
Returns a list of categories or commands within a category.
If ``category`` is not supplied, returns a list of all categories.
If ``category`` is supplied, returns a list of all commands within
that category.
"""
pieces = [category] if category else []
return self.execute_command('ACL CAT', *pieces)
def acl_deluser(self, username):
"Delete the ACL for the specified ``username``"
return self.execute_command('ACL DELUSER', username)
def acl_genpass(self):
"Generate a random password value"
return self.execute_command('ACL GENPASS')
def acl_getuser(self, username):
"""
Get the ACL details for the specified ``username``.
If ``username`` does not exist, return None
"""
return self.execute_command('ACL GETUSER', username)
def acl_list(self):
"Return a list of all ACLs on the server"
return self.execute_command('ACL LIST')
def acl_log(self, count=None):
"""
Get ACL logs as a list.
:param int count: Get logs[0:count].
:rtype: List.
"""
args = []
if count is not None:
if not isinstance(count, int):
raise DataError('ACL LOG count must be an '
'integer')
args.append(count)
return self.execute_command('ACL LOG', *args)
def acl_log_reset(self):
"""
Reset ACL logs.
:rtype: Boolean.
"""
args = [b'RESET']
return self.execute_command('ACL LOG', *args)
def acl_load(self):
"""
Load ACL rules from the configured ``aclfile``.
Note that the server must be configured with the ``aclfile``
directive to be able to load ACL rules from an aclfile.
"""
return self.execute_command('ACL LOAD')
def acl_save(self):
"""
Save ACL rules to the configured ``aclfile``.
Note that the server must be configured with the ``aclfile``
directive to be able to save ACL rules to an aclfile.
"""
return self.execute_command('ACL SAVE')
def acl_setuser(self, username, enabled=False, nopass=False,
passwords=None, hashed_passwords=None, categories=None,
commands=None, keys=None, reset=False, reset_keys=False,
reset_passwords=False):
"""
Create or update an ACL user.
Create or update the ACL for ``username``. If the user already exists,
the existing ACL is completely overwritten and replaced with the
specified values.
``enabled`` is a boolean indicating whether the user should be allowed
to authenticate or not. Defaults to ``False``.
``nopass`` is a boolean indicating whether the can authenticate without
a password. This cannot be True if ``passwords`` are also specified.
``passwords`` if specified is a list of plain text passwords
to add to or remove from the user. Each password must be prefixed with
a '+' to add or a '-' to remove. For convenience, the value of
``passwords`` can be a simple prefixed string when adding or
removing a single password.
``hashed_passwords`` if specified is a list of SHA-256 hashed passwords
to add to or remove from the user. Each hashed password must be
prefixed with a '+' to add or a '-' to remove. For convenience,
the value of ``hashed_passwords`` can be a simple prefixed string when
adding or removing a single password.
``categories`` if specified is a list of strings representing category
permissions. Each string must be prefixed with either a '+' to add the
category permission or a '-' to remove the category permission.
``commands`` if specified is a list of strings representing command
permissions. Each string must be prefixed with either a '+' to add the
command permission or a '-' to remove the command permission.
``keys`` if specified is a list of key patterns to grant the user
access to. Keys patterns allow '*' to support wildcard matching. For
example, '*' grants access to all keys while 'cache:*' grants access
to all keys that are prefixed with 'cache:'. ``keys`` should not be
prefixed with a '~'.
``reset`` is a boolean indicating whether the user should be fully
reset prior to applying the new ACL. Setting this to True will
remove all existing passwords, flags and privileges from the user and
then apply the specified rules. If this is False, the user's existing
passwords, flags and privileges will be kept and any new specified
rules will be applied on top.
``reset_keys`` is a boolean indicating whether the user's key
permissions should be reset prior to applying any new key permissions
specified in ``keys``. If this is False, the user's existing
key permissions will be kept and any new specified key permissions
will be applied on top.
``reset_passwords`` is a boolean indicating whether to remove all
existing passwords and the 'nopass' flag from the user prior to
applying any new passwords specified in 'passwords' or
'hashed_passwords'. If this is False, the user's existing passwords
and 'nopass' status will be kept and any new specified passwords
or hashed_passwords will be applied on top.
"""
encoder = self.connection_pool.get_encoder()
pieces = [username]
if reset:
pieces.append(b'reset')
if reset_keys:
pieces.append(b'resetkeys')
if reset_passwords:
pieces.append(b'resetpass')
if enabled:
pieces.append(b'on')
else:
pieces.append(b'off')
if (passwords or hashed_passwords) and nopass:
raise DataError('Cannot set \'nopass\' and supply '
'\'passwords\' or \'hashed_passwords\'')
if passwords:
# as most users will have only one password, allow remove_passwords
# to be specified as a simple string or a list
passwords = list_or_args(passwords, [])
for i, password in enumerate(passwords):
password = encoder.encode(password)
if password.startswith(b'+'):
pieces.append(b'>%s' % password[1:])
elif password.startswith(b'-'):
pieces.append(b'<%s' % password[1:])
else:
raise DataError('Password %d must be prefixeed with a '
'"+" to add or a "-" to remove' % i)
if hashed_passwords:
# as most users will have only one password, allow remove_passwords
# to be specified as a simple string or a list
hashed_passwords = list_or_args(hashed_passwords, [])
for i, hashed_password in enumerate(hashed_passwords):
hashed_password = encoder.encode(hashed_password)
if hashed_password.startswith(b'+'):
pieces.append(b'#%s' % hashed_password[1:])
elif hashed_password.startswith(b'-'):
pieces.append(b'!%s' % hashed_password[1:])
else:
raise DataError('Hashed %d password must be prefixeed '
'with a "+" to add or a "-" to remove' % i)
if nopass:
pieces.append(b'nopass')
if categories:
for category in categories:
category = encoder.encode(category)
# categories can be prefixed with one of (+@, +, -@, -)
if category.startswith(b'+@'):
pieces.append(category)
elif category.startswith(b'+'):
pieces.append(b'+@%s' % category[1:])
elif category.startswith(b'-@'):
pieces.append(category)
elif category.startswith(b'-'):
pieces.append(b'-@%s' % category[1:])
else:
raise DataError('Category "%s" must be prefixed with '
'"+" or "-"'
% encoder.decode(category, force=True))
if commands:
for cmd in commands:
cmd = encoder.encode(cmd)
if not cmd.startswith(b'+') and not cmd.startswith(b'-'):
raise DataError('Command "%s" must be prefixed with '
'"+" or "-"'
% encoder.decode(cmd, force=True))
pieces.append(cmd)
if keys:
for key in keys:
key = encoder.encode(key)
pieces.append(b'~%s' % key)
return self.execute_command('ACL SETUSER', *pieces)
def acl_users(self):
"Returns a list of all registered users on the server."
return self.execute_command('ACL USERS')
def acl_whoami(self):
"Get the username for the current connection"
return self.execute_command('ACL WHOAMI')
def bgrewriteaof(self):
"Tell the Redis server to rewrite the AOF file from data in memory."
return self.execute_command('BGREWRITEAOF')
def bgsave(self):
"""
Tell the Redis server to save its data to disk. Unlike save(),
this method is asynchronous and returns immediately.
"""
return self.execute_command('BGSAVE')
def client_kill(self, address):
"Disconnects the client at ``address`` (ip:port)"
return self.execute_command('CLIENT KILL', address)
def client_kill_filter(self, _id=None, _type=None, addr=None, skipme=None):
"""
Disconnects client(s) using a variety of filter options
:param id: Kills a client by its unique ID field
:param type: Kills a client by type where type is one of 'normal',
'master', 'slave' or 'pubsub'
:param addr: Kills a client by its 'address:port'
:param skipme: If True, then the client calling the command
will not get killed even if it is identified by one of the filter
options. If skipme is not provided, the server defaults to skipme=True
"""
args = []
if _type is not None:
client_types = ('normal', 'master', 'slave', 'pubsub')
if str(_type).lower() not in client_types:
raise DataError("CLIENT KILL type must be one of %r" % (
client_types,))
args.extend((b'TYPE', _type))
if skipme is not None:
if not isinstance(skipme, bool):
raise DataError("CLIENT KILL skipme must be a bool")
if skipme:
args.extend((b'SKIPME', b'YES'))
else:
args.extend((b'SKIPME', b'NO'))
if _id is not None:
args.extend((b'ID', _id))
if addr is not None:
args.extend((b'ADDR', addr))
if not args:
raise DataError("CLIENT KILL <filter> <value> ... ... <filter> "
"<value> must specify at least one filter")
return self.execute_command('CLIENT KILL', *args)
def client_list(self, _type=None):
"""
Returns a list of currently connected clients.
If type of client specified, only that type will be returned.
:param _type: optional. one of the client types (normal, master,
replica, pubsub)
"""
"Returns a list of currently connected clients"
if _type is not None:
client_types = ('normal', 'master', 'replica', 'pubsub')
if str(_type).lower() not in client_types:
raise DataError("CLIENT LIST _type must be one of %r" % (
client_types,))
return self.execute_command('CLIENT LIST', b'TYPE', _type)
return self.execute_command('CLIENT LIST')
def client_getname(self):
"Returns the current connection name"
return self.execute_command('CLIENT GETNAME')
def client_id(self):
"Returns the current connection id"
return self.execute_command('CLIENT ID')
def client_setname(self, name):
"Sets the current connection name"
return self.execute_command('CLIENT SETNAME', name)
def client_unblock(self, client_id, error=False):
"""
Unblocks a connection by its client id.
If ``error`` is True, unblocks the client with a special error message.
If ``error`` is False (default), the client is unblocked using the
regular timeout mechanism.
"""
args = ['CLIENT UNBLOCK', int(client_id)]
if error:
args.append(b'ERROR')
return self.execute_command(*args)
def client_pause(self, timeout):
"""
Suspend all the Redis clients for the specified amount of time
:param timeout: milliseconds to pause clients
"""
if not isinstance(timeout, int):
raise DataError("CLIENT PAUSE timeout must be an integer")
return self.execute_command('CLIENT PAUSE', str(timeout))
def readwrite(self):
"Disables read queries for a connection to a Redis Cluster slave node"
return self.execute_command('READWRITE')
def readonly(self):
"Enables read queries for a connection to a Redis Cluster replica node"
return self.execute_command('READONLY')
def config_get(self, pattern="*"):
"Return a dictionary of configuration based on the ``pattern``"
return self.execute_command('CONFIG GET', pattern)
def config_set(self, name, value):
"Set config item ``name`` with ``value``"
return self.execute_command('CONFIG SET', name, value)
def config_resetstat(self):
"Reset runtime statistics"
return self.execute_command('CONFIG RESETSTAT')
def config_rewrite(self):
"Rewrite config file with the minimal change to reflect running config"
return self.execute_command('CONFIG REWRITE')
def dbsize(self):
"Returns the number of keys in the current database"
return self.execute_command('DBSIZE')
def debug_object(self, key):
"Returns version specific meta information about a given key"
return self.execute_command('DEBUG OBJECT', key)
def echo(self, value):
"Echo the string back from the server"
return self.execute_command('ECHO', value)
def flushall(self, asynchronous=False):
"""
Delete all keys in all databases on the current host.
``asynchronous`` indicates whether the operation is
executed asynchronously by the server.
"""
args = []
if asynchronous:
args.append(b'ASYNC')
return self.execute_command('FLUSHALL', *args)
def flushdb(self, asynchronous=False):
"""
Delete all keys in the current database.
``asynchronous`` indicates whether the operation is
executed asynchronously by the server.
"""
args = []
if asynchronous:
args.append(b'ASYNC')
return self.execute_command('FLUSHDB', *args)
def swapdb(self, first, second):
"Swap two databases"
return self.execute_command('SWAPDB', first, second)
def info(self, section=None):
"""
Returns a dictionary containing information about the Redis server
The ``section`` option can be used to select a specific section
of information
The section option is not supported by older versions of Redis Server,
and will generate ResponseError
"""
if section is None:
return self.execute_command('INFO')
else:
return self.execute_command('INFO', section)
def lastsave(self):
"""
Return a Python datetime object representing the last time the
Redis database was saved to disk
"""
return self.execute_command('LASTSAVE')
def migrate(self, host, port, keys, destination_db, timeout,
copy=False, replace=False, auth=None):
"""
Migrate 1 or more keys from the current Redis server to a different
server specified by the ``host``, ``port`` and ``destination_db``.
The ``timeout``, specified in milliseconds, indicates the maximum
time the connection between the two servers can be idle before the
command is interrupted.
If ``copy`` is True, the specified ``keys`` are NOT deleted from
the source server.
If ``replace`` is True, this operation will overwrite the keys
on the destination server if they exist.
If ``auth`` is specified, authenticate to the destination server with
the password provided.
"""
keys = list_or_args(keys, [])
if not keys:
raise DataError('MIGRATE requires at least one key')
pieces = []
if copy:
pieces.append(b'COPY')
if replace:
pieces.append(b'REPLACE')
if auth:
pieces.append(b'AUTH')
pieces.append(auth)
pieces.append(b'KEYS')
pieces.extend(keys)
return self.execute_command('MIGRATE', host, port, '', destination_db,
timeout, *pieces)
def object(self, infotype, key):
"Return the encoding, idletime, or refcount about the key"
return self.execute_command('OBJECT', infotype, key, infotype=infotype)
def memory_stats(self):
"Return a dictionary of memory stats"
return self.execute_command('MEMORY STATS')
def memory_usage(self, key, samples=None):
"""
Return the total memory usage for key, its value and associated
administrative overheads.
For nested data structures, ``samples`` is the number of elements to
sample. If left unspecified, the server's default is 5. Use 0 to sample
all elements.
"""
args = []
if isinstance(samples, int):
args.extend([b'SAMPLES', samples])
return self.execute_command('MEMORY USAGE', key, *args)
def memory_purge(self):
"Attempts to purge dirty pages for reclamation by allocator"
return self.execute_command('MEMORY PURGE')
def ping(self):
"Ping the Redis server"
return self.execute_command('PING')
def save(self):
"""
Tell the Redis server to save its data to disk,
blocking until the save is complete
"""
return self.execute_command('SAVE')
def sentinel(self, *args):
"Redis Sentinel's SENTINEL command."
warnings.warn(
DeprecationWarning('Use the individual sentinel_* methods'))
def sentinel_get_master_addr_by_name(self, service_name):
"Returns a (host, port) pair for the given ``service_name``"
return self.execute_command('SENTINEL GET-MASTER-ADDR-BY-NAME',
service_name)
def sentinel_master(self, service_name):
"Returns a dictionary containing the specified masters state."
return self.execute_command('SENTINEL MASTER', service_name)
def sentinel_masters(self):
"Returns a list of dictionaries containing each master's state."
return self.execute_command('SENTINEL MASTERS')
def sentinel_monitor(self, name, ip, port, quorum):
"Add a new master to Sentinel to be monitored"
return self.execute_command('SENTINEL MONITOR', name, ip, port, quorum)
def sentinel_remove(self, name):
"Remove a master from Sentinel's monitoring"
return self.execute_command('SENTINEL REMOVE', name)
def sentinel_sentinels(self, service_name):
"Returns a list of sentinels for ``service_name``"
return self.execute_command('SENTINEL SENTINELS', service_name)
def sentinel_set(self, name, option, value):
"Set Sentinel monitoring parameters for a given master"
return self.execute_command('SENTINEL SET', name, option, value)
def sentinel_slaves(self, service_name):
"Returns a list of slaves for ``service_name``"
return self.execute_command('SENTINEL SLAVES', service_name)
def shutdown(self, save=False, nosave=False):
"""Shutdown the Redis server. If Redis has persistence configured,
data will be flushed before shutdown. If the "save" option is set,
a data flush will be attempted even if there is no persistence
configured. If the "nosave" option is set, no data flush will be
attempted. The "save" and "nosave" options cannot both be set.
"""
if save and nosave:
raise DataError('SHUTDOWN save and nosave cannot both be set')
args = ['SHUTDOWN']
if save:
args.append('SAVE')
if nosave:
args.append('NOSAVE')
try:
self.execute_command(*args)
except ConnectionError:
# a ConnectionError here is expected
return
raise RedisError("SHUTDOWN seems to have failed.")
def slaveof(self, host=None, port=None):
"""
Set the server to be a replicated slave of the instance identified
by the ``host`` and ``port``. If called without arguments, the
instance is promoted to a master instead.
"""
if host is None and port is None:
return self.execute_command('SLAVEOF', b'NO', b'ONE')
return self.execute_command('SLAVEOF', host, port)
def slowlog_get(self, num=None):
"""
Get the entries from the slowlog. If ``num`` is specified, get the
most recent ``num`` items.
"""
args = ['SLOWLOG GET']
if num is not None:
args.append(num)
decode_responses = self.connection_pool.connection_kwargs.get(
'decode_responses', False)
return self.execute_command(*args, decode_responses=decode_responses)
def slowlog_len(self):
"Get the number of items in the slowlog"
return self.execute_command('SLOWLOG LEN')
def slowlog_reset(self):
"Remove all items in the slowlog"
return self.execute_command('SLOWLOG RESET')
def time(self):
"""
Returns the server time as a 2-item tuple of ints:
(seconds since epoch, microseconds into this second).
"""
return self.execute_command('TIME')
def wait(self, num_replicas, timeout):
"""
Redis synchronous replication
That returns the number of replicas that processed the query when
we finally have at least ``num_replicas``, or when the ``timeout`` was
reached.
"""
return self.execute_command('WAIT', num_replicas, timeout)
# BASIC KEY COMMANDS
def append(self, key, value):
"""
Appends the string ``value`` to the value at ``key``. If ``key``
doesn't already exist, create it with a value of ``value``.
Returns the new length of the value at ``key``.
"""
return self.execute_command('APPEND', key, value)
def bitcount(self, key, start=None, end=None):
"""
Returns the count of set bits in the value of ``key``. Optional
``start`` and ``end`` paramaters indicate which bytes to consider
"""
params = [key]
if start is not None and end is not None:
params.append(start)
params.append(end)
elif (start is not None and end is None) or \
(end is not None and start is None):
raise DataError("Both start and end must be specified")
return self.execute_command('BITCOUNT', *params)
def bitfield(self, key, default_overflow=None):
"""
Return a BitFieldOperation instance to conveniently construct one or
more bitfield operations on ``key``.
"""
return BitFieldOperation(self, key, default_overflow=default_overflow)
def bitop(self, operation, dest, *keys):
"""
Perform a bitwise operation using ``operation`` between ``keys`` and
store the result in ``dest``.
"""
return self.execute_command('BITOP', operation, dest, *keys)
def bitpos(self, key, bit, start=None, end=None):
"""
Return the position of the first bit set to 1 or 0 in a string.
``start`` and ``end`` difines search range. The range is interpreted
as a range of bytes and not a range of bits, so start=0 and end=2
means to look at the first three bytes.
"""
if bit not in (0, 1):
raise DataError('bit must be 0 or 1')
params = [key, bit]
start is not None and params.append(start)
if start is not None and end is not None:
params.append(end)
elif start is None and end is not None:
raise DataError("start argument is not set, "
"when end is specified")
return self.execute_command('BITPOS', *params)
def copy(self, source, destination, destination_db=None, replace=False):
"""
Copy the value stored in the ``source`` key to the ``destination`` key.
``destination_db`` an alternative destination database. By default,
the ``destination`` key is created in the source Redis database.
``replace`` whether the ``destination`` key should be removed before
copying the value to it. By default, the value is not copied if
the ``destination`` key already exists.
"""
params = [source, destination]
if destination_db is not None:
params.extend(["DB", destination_db])
if replace:
params.append("REPLACE")
return self.execute_command('COPY', *params)
def decr(self, name, amount=1):
"""
Decrements the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as 0 - ``amount``
"""
# An alias for ``decr()``, because it is already implemented
# as DECRBY redis command.
return self.decrby(name, amount)
def decrby(self, name, amount=1):
"""
Decrements the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as 0 - ``amount``
"""
return self.execute_command('DECRBY', name, amount)
def delete(self, *names):
"Delete one or more keys specified by ``names``"
return self.execute_command('DEL', *names)
def __delitem__(self, name):
self.delete(name)
def dump(self, name):
"""
Return a serialized version of the value stored at the specified key.
If key does not exist a nil bulk reply is returned.
"""
return self.execute_command('DUMP', name)
def exists(self, *names):
"Returns the number of ``names`` that exist"
return self.execute_command('EXISTS', *names)
__contains__ = exists
def expire(self, name, time):
"""
Set an expire flag on key ``name`` for ``time`` seconds. ``time``
can be represented by an integer or a Python timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = int(time.total_seconds())
return self.execute_command('EXPIRE', name, time)
def expireat(self, name, when):
"""
Set an expire flag on key ``name``. ``when`` can be represented
as an integer indicating unix time or a Python datetime object.
"""
if isinstance(when, datetime.datetime):
when = int(mod_time.mktime(when.timetuple()))
return self.execute_command('EXPIREAT', name, when)
def get(self, name):
"""
Return the value at key ``name``, or None if the key doesn't exist
"""
return self.execute_command('GET', name)
def __getitem__(self, name):
"""
Return the value at key ``name``, raises a KeyError if the key
doesn't exist.
"""
value = self.get(name)
if value is not None:
return value
raise KeyError(name)
def getbit(self, name, offset):
"Returns a boolean indicating the value of ``offset`` in ``name``"
return self.execute_command('GETBIT', name, offset)
def getrange(self, key, start, end):
"""
Returns the substring of the string value stored at ``key``,
determined by the offsets ``start`` and ``end`` (both are inclusive)
"""
return self.execute_command('GETRANGE', key, start, end)
def getset(self, name, value):
"""
Sets the value at key ``name`` to ``value``
and returns the old value at key ``name`` atomically.
"""
return self.execute_command('GETSET', name, value)
def incr(self, name, amount=1):
"""
Increments the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as ``amount``
"""
return self.incrby(name, amount)
def incrby(self, name, amount=1):
"""
Increments the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as ``amount``
"""
# An alias for ``incr()``, because it is already implemented
# as INCRBY redis command.
return self.execute_command('INCRBY', name, amount)
def incrbyfloat(self, name, amount=1.0):
"""
Increments the value at key ``name`` by floating ``amount``.
If no key exists, the value will be initialized as ``amount``
"""
return self.execute_command('INCRBYFLOAT', name, amount)
def keys(self, pattern='*'):
"Returns a list of keys matching ``pattern``"
return self.execute_command('KEYS', pattern)
def mget(self, keys, *args):
"""
Returns a list of values ordered identically to ``keys``
"""
args = list_or_args(keys, args)
options = {}
if not args:
options[EMPTY_RESPONSE] = []
return self.execute_command('MGET', *args, **options)
def mset(self, mapping):
"""
Sets key/values based on a mapping. Mapping is a dictionary of
key/value pairs. Both keys and values should be strings or types that
can be cast to a string via str().
"""
items = []
for pair in mapping.items():
items.extend(pair)
return self.execute_command('MSET', *items)
def msetnx(self, mapping):
"""
Sets key/values based on a mapping if none of the keys are already set.
Mapping is a dictionary of key/value pairs. Both keys and values
should be strings or types that can be cast to a string via str().
Returns a boolean indicating if the operation was successful.
"""
items = []
for pair in mapping.items():
items.extend(pair)
return self.execute_command('MSETNX', *items)
def move(self, name, db):
"Moves the key ``name`` to a different Redis database ``db``"
return self.execute_command('MOVE', name, db)
def persist(self, name):
"Removes an expiration on ``name``"
return self.execute_command('PERSIST', name)
def pexpire(self, name, time):
"""
Set an expire flag on key ``name`` for ``time`` milliseconds.
``time`` can be represented by an integer or a Python timedelta
object.
"""
if isinstance(time, datetime.timedelta):
time = int(time.total_seconds() * 1000)
return self.execute_command('PEXPIRE', name, time)
def pexpireat(self, name, when):
"""
Set an expire flag on key ``name``. ``when`` can be represented
as an integer representing unix time in milliseconds (unix time * 1000)
or a Python datetime object.
"""
if isinstance(when, datetime.datetime):
ms = int(when.microsecond / 1000)
when = int(mod_time.mktime(when.timetuple())) * 1000 + ms
return self.execute_command('PEXPIREAT', name, when)
def psetex(self, name, time_ms, value):
"""
Set the value of key ``name`` to ``value`` that expires in ``time_ms``
milliseconds. ``time_ms`` can be represented by an integer or a Python
timedelta object
"""
if isinstance(time_ms, datetime.timedelta):
time_ms = int(time_ms.total_seconds() * 1000)
return self.execute_command('PSETEX', name, time_ms, value)
def pttl(self, name):
"Returns the number of milliseconds until the key ``name`` will expire"
return self.execute_command('PTTL', name)
def randomkey(self):
"Returns the name of a random key"
return self.execute_command('RANDOMKEY')
def rename(self, src, dst):
"""
Rename key ``src`` to ``dst``
"""
return self.execute_command('RENAME', src, dst)
def renamenx(self, src, dst):
"Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist"
return self.execute_command('RENAMENX', src, dst)
def restore(self, name, ttl, value, replace=False, absttl=False):
"""
Create a key using the provided serialized value, previously obtained
using DUMP.
``replace`` allows an existing key on ``name`` to be overridden. If
it's not specified an error is raised on collision.
``absttl`` if True, specified ``ttl`` should represent an absolute Unix
timestamp in milliseconds in which the key will expire. (Redis 5.0 or
greater).
"""
params = [name, ttl, value]
if replace:
params.append('REPLACE')
if absttl:
params.append('ABSTTL')
return self.execute_command('RESTORE', *params)
def set(self, name, value,
ex=None, px=None, nx=False, xx=False, keepttl=False):
"""
Set the value at key ``name`` to ``value``
``ex`` sets an expire flag on key ``name`` for ``ex`` seconds.
``px`` sets an expire flag on key ``name`` for ``px`` milliseconds.
``nx`` if set to True, set the value at key ``name`` to ``value`` only
if it does not exist.
``xx`` if set to True, set the value at key ``name`` to ``value`` only
if it already exists.
``keepttl`` if True, retain the time to live associated with the key.
(Available since Redis 6.0)
"""
pieces = [name, value]
if ex is not None:
pieces.append('EX')
if isinstance(ex, datetime.timedelta):
ex = int(ex.total_seconds())
pieces.append(ex)
if px is not None:
pieces.append('PX')
if isinstance(px, datetime.timedelta):
px = int(px.total_seconds() * 1000)
pieces.append(px)
if nx:
pieces.append('NX')
if xx:
pieces.append('XX')
if keepttl:
pieces.append('KEEPTTL')
return self.execute_command('SET', *pieces)
def __setitem__(self, name, value):
self.set(name, value)
def setbit(self, name, offset, value):
"""
Flag the ``offset`` in ``name`` as ``value``. Returns a boolean
indicating the previous value of ``offset``.
"""
value = value and 1 or 0
return self.execute_command('SETBIT', name, offset, value)
def setex(self, name, time, value):
"""
Set the value of key ``name`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = int(time.total_seconds())
return self.execute_command('SETEX', name, time, value)
def setnx(self, name, value):
"Set the value of key ``name`` to ``value`` if key doesn't exist"
return self.execute_command('SETNX', name, value)
def setrange(self, name, offset, value):
"""
Overwrite bytes in the value of ``name`` starting at ``offset`` with
``value``. If ``offset`` plus the length of ``value`` exceeds the
length of the original value, the new value will be larger than before.
If ``offset`` exceeds the length of the original value, null bytes
will be used to pad between the end of the previous value and the start
of what's being injected.
Returns the length of the new string.
"""
return self.execute_command('SETRANGE', name, offset, value)
def strlen(self, name):
"Return the number of bytes stored in the value of ``name``"
return self.execute_command('STRLEN', name)
def substr(self, name, start, end=-1):
"""
Return a substring of the string at key ``name``. ``start`` and ``end``
are 0-based integers specifying the portion of the string to return.
"""
return self.execute_command('SUBSTR', name, start, end)
def touch(self, *args):
"""
Alters the last access time of a key(s) ``*args``. A key is ignored
if it does not exist.
"""
return self.execute_command('TOUCH', *args)
def ttl(self, name):
"Returns the number of seconds until the key ``name`` will expire"
return self.execute_command('TTL', name)
def type(self, name):
"Returns the type of key ``name``"
return self.execute_command('TYPE', name)
def watch(self, *names):
"""
Watches the values at keys ``names``, or None if the key doesn't exist
"""
warnings.warn(DeprecationWarning('Call WATCH from a Pipeline object'))
def unwatch(self):
"""
Unwatches the value at key ``name``, or None of the key doesn't exist
"""
warnings.warn(
DeprecationWarning('Call UNWATCH from a Pipeline object'))
def unlink(self, *names):
"Unlink one or more keys specified by ``names``"
return self.execute_command('UNLINK', *names)
# LIST COMMANDS
def blpop(self, keys, timeout=0):
"""
LPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to LPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
keys = list_or_args(keys, None)
keys.append(timeout)
return self.execute_command('BLPOP', *keys)
def brpop(self, keys, timeout=0):
"""
RPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to RPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
keys = list_or_args(keys, None)
keys.append(timeout)
return self.execute_command('BRPOP', *keys)
def brpoplpush(self, src, dst, timeout=0):
"""
Pop a value off the tail of ``src``, push it on the head of ``dst``
and then return it.
This command blocks until a value is in ``src`` or until ``timeout``
seconds elapse, whichever is first. A ``timeout`` value of 0 blocks
forever.
"""
if timeout is None:
timeout = 0
return self.execute_command('BRPOPLPUSH', src, dst, timeout)
def lindex(self, name, index):
"""
Return the item from list ``name`` at position ``index``
Negative indexes are supported and will return an item at the
end of the list
"""
return self.execute_command('LINDEX', name, index)
def linsert(self, name, where, refvalue, value):
"""
Insert ``value`` in list ``name`` either immediately before or after
[``where``] ``refvalue``
Returns the new length of the list on success or -1 if ``refvalue``
is not in the list.
"""
return self.execute_command('LINSERT', name, where, refvalue, value)
def llen(self, name):
"Return the length of the list ``name``"
return self.execute_command('LLEN', name)
def lpop(self, name):
"Remove and return the first item of the list ``name``"
return self.execute_command('LPOP', name)
def lpush(self, name, *values):
"Push ``values`` onto the head of the list ``name``"
return self.execute_command('LPUSH', name, *values)
def lpushx(self, name, value):
"Push ``value`` onto the head of the list ``name`` if ``name`` exists"
return self.execute_command('LPUSHX', name, value)
def lrange(self, name, start, end):
"""
Return a slice of the list ``name`` between
position ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return self.execute_command('LRANGE', name, start, end)
def lrem(self, name, count, value):
"""
Remove the first ``count`` occurrences of elements equal to ``value``
from the list stored at ``name``.
The count argument influences the operation in the following ways:
count > 0: Remove elements equal to value moving from head to tail.
count < 0: Remove elements equal to value moving from tail to head.
count = 0: Remove all elements equal to value.
"""
return self.execute_command('LREM', name, count, value)
def lset(self, name, index, value):
"Set ``position`` of list ``name`` to ``value``"
return self.execute_command('LSET', name, index, value)
def ltrim(self, name, start, end):
"""
Trim the list ``name``, removing all values not within the slice
between ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return self.execute_command('LTRIM', name, start, end)
def rpop(self, name):
"Remove and return the last item of the list ``name``"
return self.execute_command('RPOP', name)
def rpoplpush(self, src, dst):
"""
RPOP a value off of the ``src`` list and atomically LPUSH it
on to the ``dst`` list. Returns the value.
"""
return self.execute_command('RPOPLPUSH', src, dst)
def rpush(self, name, *values):
"Push ``values`` onto the tail of the list ``name``"
return self.execute_command('RPUSH', name, *values)
def rpushx(self, name, value):
"Push ``value`` onto the tail of the list ``name`` if ``name`` exists"
return self.execute_command('RPUSHX', name, value)
def lpos(self, name, value, rank=None, count=None, maxlen=None):
"""
Get position of ``value`` within the list ``name``
If specified, ``rank`` indicates the "rank" of the first element to
return in case there are multiple copies of ``value`` in the list.
By default, LPOS returns the position of the first occurrence of
``value`` in the list. When ``rank`` 2, LPOS returns the position of
the second ``value`` in the list. If ``rank`` is negative, LPOS
searches the list in reverse. For example, -1 would return the
position of the last occurrence of ``value`` and -2 would return the
position of the next to last occurrence of ``value``.
If specified, ``count`` indicates that LPOS should return a list of
up to ``count`` positions. A ``count`` of 2 would return a list of
up to 2 positions. A ``count`` of 0 returns a list of all positions
matching ``value``. When ``count`` is specified and but ``value``
does not exist in the list, an empty list is returned.
If specified, ``maxlen`` indicates the maximum number of list
elements to scan. A ``maxlen`` of 1000 will only return the
position(s) of items within the first 1000 entries in the list.
A ``maxlen`` of 0 (the default) will scan the entire list.
"""
pieces = [name, value]
if rank is not None:
pieces.extend(['RANK', rank])
if count is not None:
pieces.extend(['COUNT', count])
if maxlen is not None:
pieces.extend(['MAXLEN', maxlen])
return self.execute_command('LPOS', *pieces)
def sort(self, name, start=None, num=None, by=None, get=None,
desc=False, alpha=False, store=None, groups=False):
"""
Sort and return the list, set or sorted set at ``name``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where in the key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
``groups`` if set to True and if ``get`` contains at least two
elements, sort will return a list of tuples, each containing the
values fetched from the arguments to ``get``.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise DataError("``start`` and ``num`` must both be specified")
pieces = [name]
if by is not None:
pieces.append(b'BY')
pieces.append(by)
if start is not None and num is not None:
pieces.append(b'LIMIT')
pieces.append(start)
pieces.append(num)
if get is not None:
# If get is a string assume we want to get a single value.
# Otherwise assume it's an interable and we want to get multiple
# values. We can't just iterate blindly because strings are
# iterable.
if isinstance(get, (bytes, str)):
pieces.append(b'GET')
pieces.append(get)
else:
for g in get:
pieces.append(b'GET')
pieces.append(g)
if desc:
pieces.append(b'DESC')
if alpha:
pieces.append(b'ALPHA')
if store is not None:
pieces.append(b'STORE')
pieces.append(store)
if groups:
if not get or isinstance(get, (bytes, str)) or len(get) < 2:
raise DataError('when using "groups" the "get" argument '
'must be specified and contain at least '
'two keys')
options = {'groups': len(get) if groups else None}
return self.execute_command('SORT', *pieces, **options)
# SCAN COMMANDS
def scan(self, cursor=0, match=None, count=None, _type=None):
"""
Incrementally return lists of key names. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` provides a hint to Redis about the number of keys to
return per batch.
``_type`` filters the returned values by a particular Redis type.
Stock Redis instances allow for the following types:
HASH, LIST, SET, STREAM, STRING, ZSET
Additionally, Redis modules can expose other types as well.
"""
pieces = [cursor]
if match is not None:
pieces.extend([b'MATCH', match])
if count is not None:
pieces.extend([b'COUNT', count])
if _type is not None:
pieces.extend([b'TYPE', _type])
return self.execute_command('SCAN', *pieces)
def scan_iter(self, match=None, count=None, _type=None):
"""
Make an iterator using the SCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` provides a hint to Redis about the number of keys to
return per batch.
``_type`` filters the returned values by a particular Redis type.
Stock Redis instances allow for the following types:
HASH, LIST, SET, STREAM, STRING, ZSET
Additionally, Redis modules can expose other types as well.
"""
cursor = '0'
while cursor != 0:
cursor, data = self.scan(cursor=cursor, match=match,
count=count, _type=_type)
yield from data
def sscan(self, name, cursor=0, match=None, count=None):
"""
Incrementally return lists of elements in a set. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
pieces = [name, cursor]
if match is not None:
pieces.extend([b'MATCH', match])
if count is not None:
pieces.extend([b'COUNT', count])
return self.execute_command('SSCAN', *pieces)
def sscan_iter(self, name, match=None, count=None):
"""
Make an iterator using the SSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
cursor = '0'
while cursor != 0:
cursor, data = self.sscan(name, cursor=cursor,
match=match, count=count)
yield from data
def hscan(self, name, cursor=0, match=None, count=None):
"""
Incrementally return key/value slices in a hash. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
pieces = [name, cursor]
if match is not None:
pieces.extend([b'MATCH', match])
if count is not None:
pieces.extend([b'COUNT', count])
return self.execute_command('HSCAN', *pieces)
def hscan_iter(self, name, match=None, count=None):
"""
Make an iterator using the HSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
cursor = '0'
while cursor != 0:
cursor, data = self.hscan(name, cursor=cursor,
match=match, count=count)
yield from data.items()
def zscan(self, name, cursor=0, match=None, count=None,
score_cast_func=float):
"""
Incrementally return lists of elements in a sorted set. Also return a
cursor indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
``score_cast_func`` a callable used to cast the score return value
"""
pieces = [name, cursor]
if match is not None:
pieces.extend([b'MATCH', match])
if count is not None:
pieces.extend([b'COUNT', count])
options = {'score_cast_func': score_cast_func}
return self.execute_command('ZSCAN', *pieces, **options)
def zscan_iter(self, name, match=None, count=None,
score_cast_func=float):
"""
Make an iterator using the ZSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
``score_cast_func`` a callable used to cast the score return value
"""
cursor = '0'
while cursor != 0:
cursor, data = self.zscan(name, cursor=cursor, match=match,
count=count,
score_cast_func=score_cast_func)
yield from data
# SET COMMANDS
def sadd(self, name, *values):
"Add ``value(s)`` to set ``name``"
return self.execute_command('SADD', name, *values)
def scard(self, name):
"Return the number of elements in set ``name``"
return self.execute_command('SCARD', name)
def sdiff(self, keys, *args):
"Return the difference of sets specified by ``keys``"
args = list_or_args(keys, args)
return self.execute_command('SDIFF', *args)
def sdiffstore(self, dest, keys, *args):
"""
Store the difference of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(keys, args)
return self.execute_command('SDIFFSTORE', dest, *args)
def sinter(self, keys, *args):
"Return the intersection of sets specified by ``keys``"
args = list_or_args(keys, args)
return self.execute_command('SINTER', *args)
def sinterstore(self, dest, keys, *args):
"""
Store the intersection of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(keys, args)
return self.execute_command('SINTERSTORE', dest, *args)
def sismember(self, name, value):
"Return a boolean indicating if ``value`` is a member of set ``name``"
return self.execute_command('SISMEMBER', name, value)
def smembers(self, name):
"Return all members of the set ``name``"
return self.execute_command('SMEMBERS', name)
def smove(self, src, dst, value):
"Move ``value`` from set ``src`` to set ``dst`` atomically"
return self.execute_command('SMOVE', src, dst, value)
def spop(self, name, count=None):
"Remove and return a random member of set ``name``"
args = (count is not None) and [count] or []
return self.execute_command('SPOP', name, *args)
def srandmember(self, name, number=None):
"""
If ``number`` is None, returns a random member of set ``name``.
If ``number`` is supplied, returns a list of ``number`` random
members of set ``name``. Note this is only available when running
Redis 2.6+.
"""
args = (number is not None) and [number] or []
return self.execute_command('SRANDMEMBER', name, *args)
def srem(self, name, *values):
"Remove ``values`` from set ``name``"
return self.execute_command('SREM', name, *values)
def sunion(self, keys, *args):
"Return the union of sets specified by ``keys``"
args = list_or_args(keys, args)
return self.execute_command('SUNION', *args)
def sunionstore(self, dest, keys, *args):
"""
Store the union of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(keys, args)
return self.execute_command('SUNIONSTORE', dest, *args)
# STREAMS COMMANDS
def xack(self, name, groupname, *ids):
"""
Acknowledges the successful processing of one or more messages.
name: name of the stream.
groupname: name of the consumer group.
*ids: message ids to acknowlege.
"""
return self.execute_command('XACK', name, groupname, *ids)
def xadd(self, name, fields, id='*', maxlen=None, approximate=True):
"""
Add to a stream.
name: name of the stream
fields: dict of field/value pairs to insert into the stream
id: Location to insert this record. By default it is appended.
maxlen: truncate old stream members beyond this size
approximate: actual stream length may be slightly more than maxlen
"""
pieces = []
if maxlen is not None:
if not isinstance(maxlen, int) or maxlen < 1:
raise DataError('XADD maxlen must be a positive integer')
pieces.append(b'MAXLEN')
if approximate:
pieces.append(b'~')
pieces.append(str(maxlen))
pieces.append(id)
if not isinstance(fields, dict) or len(fields) == 0:
raise DataError('XADD fields must be a non-empty dict')
for pair in fields.items():
pieces.extend(pair)
return self.execute_command('XADD', name, *pieces)
def xclaim(self, name, groupname, consumername, min_idle_time, message_ids,
idle=None, time=None, retrycount=None, force=False,
justid=False):
"""
Changes the ownership of a pending message.
name: name of the stream.
groupname: name of the consumer group.
consumername: name of a consumer that claims the message.
min_idle_time: filter messages that were idle less than this amount of
milliseconds
message_ids: non-empty list or tuple of message IDs to claim
idle: optional. Set the idle time (last time it was delivered) of the
message in ms
time: optional integer. This is the same as idle but instead of a
relative amount of milliseconds, it sets the idle time to a specific
Unix time (in milliseconds).
retrycount: optional integer. set the retry counter to the specified
value. This counter is incremented every time a message is delivered
again.
force: optional boolean, false by default. Creates the pending message
entry in the PEL even if certain specified IDs are not already in the
PEL assigned to a different client.
justid: optional boolean, false by default. Return just an array of IDs
of messages successfully claimed, without returning the actual message
"""
if not isinstance(min_idle_time, int) or min_idle_time < 0:
raise DataError("XCLAIM min_idle_time must be a non negative "
"integer")
if not isinstance(message_ids, (list, tuple)) or not message_ids:
raise DataError("XCLAIM message_ids must be a non empty list or "
"tuple of message IDs to claim")
kwargs = {}
pieces = [name, groupname, consumername, str(min_idle_time)]
pieces.extend(list(message_ids))
if idle is not None:
if not isinstance(idle, int):
raise DataError("XCLAIM idle must be an integer")
pieces.extend((b'IDLE', str(idle)))
if time is not None:
if not isinstance(time, int):
raise DataError("XCLAIM time must be an integer")
pieces.extend((b'TIME', str(time)))
if retrycount is not None:
if not isinstance(retrycount, int):
raise DataError("XCLAIM retrycount must be an integer")
pieces.extend((b'RETRYCOUNT', str(retrycount)))
if force:
if not isinstance(force, bool):
raise DataError("XCLAIM force must be a boolean")
pieces.append(b'FORCE')
if justid:
if not isinstance(justid, bool):
raise DataError("XCLAIM justid must be a boolean")
pieces.append(b'JUSTID')
kwargs['parse_justid'] = True
return self.execute_command('XCLAIM', *pieces, **kwargs)
def xdel(self, name, *ids):
"""
Deletes one or more messages from a stream.
name: name of the stream.
*ids: message ids to delete.
"""
return self.execute_command('XDEL', name, *ids)
def xgroup_create(self, name, groupname, id='$', mkstream=False):
"""
Create a new consumer group associated with a stream.
name: name of the stream.
groupname: name of the consumer group.
id: ID of the last item in the stream to consider already delivered.
"""
pieces = ['XGROUP CREATE', name, groupname, id]
if mkstream:
pieces.append(b'MKSTREAM')
return self.execute_command(*pieces)
def xgroup_delconsumer(self, name, groupname, consumername):
"""
Remove a specific consumer from a consumer group.
Returns the number of pending messages that the consumer had before it
was deleted.
name: name of the stream.
groupname: name of the consumer group.
consumername: name of consumer to delete
"""
return self.execute_command('XGROUP DELCONSUMER', name, groupname,
consumername)
def xgroup_destroy(self, name, groupname):
"""
Destroy a consumer group.
name: name of the stream.
groupname: name of the consumer group.
"""
return self.execute_command('XGROUP DESTROY', name, groupname)
def xgroup_setid(self, name, groupname, id):
"""
Set the consumer group last delivered ID to something else.
name: name of the stream.
groupname: name of the consumer group.
id: ID of the last item in the stream to consider already delivered.
"""
return self.execute_command('XGROUP SETID', name, groupname, id)
def xinfo_consumers(self, name, groupname):
"""
Returns general information about the consumers in the group.
name: name of the stream.
groupname: name of the consumer group.
"""
return self.execute_command('XINFO CONSUMERS', name, groupname)
def xinfo_groups(self, name):
"""
Returns general information about the consumer groups of the stream.
name: name of the stream.
"""
return self.execute_command('XINFO GROUPS', name)
def xinfo_stream(self, name):
"""
Returns general information about the stream.
name: name of the stream.
"""
return self.execute_command('XINFO STREAM', name)
def xlen(self, name):
"""
Returns the number of elements in a given stream.
"""
return self.execute_command('XLEN', name)
def xpending(self, name, groupname):
"""
Returns information about pending messages of a group.
name: name of the stream.
groupname: name of the consumer group.
"""
return self.execute_command('XPENDING', name, groupname)
def xpending_range(self, name, groupname, min, max, count,
consumername=None):
"""
Returns information about pending messages, in a range.
name: name of the stream.
groupname: name of the consumer group.
min: minimum stream ID.
max: maximum stream ID.
count: number of messages to return
consumername: name of a consumer to filter by (optional).
"""
pieces = [name, groupname]
if min is not None or max is not None or count is not None:
if min is None or max is None or count is None:
raise DataError("XPENDING must be provided with min, max "
"and count parameters, or none of them. ")
if not isinstance(count, int) or count < -1:
raise DataError("XPENDING count must be a integer >= -1")
pieces.extend((min, max, str(count)))
if consumername is not None:
if min is None or max is None or count is None:
raise DataError("if XPENDING is provided with consumername,"
" it must be provided with min, max and"
" count parameters")
pieces.append(consumername)
return self.execute_command('XPENDING', *pieces, parse_detail=True)
def xrange(self, name, min='-', max='+', count=None):
"""
Read stream values within an interval.
name: name of the stream.
start: first stream ID. defaults to '-',
meaning the earliest available.
finish: last stream ID. defaults to '+',
meaning the latest available.
count: if set, only return this many items, beginning with the
earliest available.
"""
pieces = [min, max]
if count is not None:
if not isinstance(count, int) or count < 1:
raise DataError('XRANGE count must be a positive integer')
pieces.append(b'COUNT')
pieces.append(str(count))
return self.execute_command('XRANGE', name, *pieces)
def xread(self, streams, count=None, block=None):
"""
Block and monitor multiple streams for new data.
streams: a dict of stream names to stream IDs, where
IDs indicate the last ID already seen.
count: if set, only return this many items, beginning with the
earliest available.
block: number of milliseconds to wait, if nothing already present.
"""
pieces = []
if block is not None:
if not isinstance(block, int) or block < 0:
raise DataError('XREAD block must be a non-negative integer')
pieces.append(b'BLOCK')
pieces.append(str(block))
if count is not None:
if not isinstance(count, int) or count < 1:
raise DataError('XREAD count must be a positive integer')
pieces.append(b'COUNT')
pieces.append(str(count))
if not isinstance(streams, dict) or len(streams) == 0:
raise DataError('XREAD streams must be a non empty dict')
pieces.append(b'STREAMS')
keys, values = zip(*streams.items())
pieces.extend(keys)
pieces.extend(values)
return self.execute_command('XREAD', *pieces)
def xreadgroup(self, groupname, consumername, streams, count=None,
block=None, noack=False):
"""
Read from a stream via a consumer group.
groupname: name of the consumer group.
consumername: name of the requesting consumer.
streams: a dict of stream names to stream IDs, where
IDs indicate the last ID already seen.
count: if set, only return this many items, beginning with the
earliest available.
block: number of milliseconds to wait, if nothing already present.
noack: do not add messages to the PEL
"""
pieces = [b'GROUP', groupname, consumername]
if count is not None:
if not isinstance(count, int) or count < 1:
raise DataError("XREADGROUP count must be a positive integer")
pieces.append(b'COUNT')
pieces.append(str(count))
if block is not None:
if not isinstance(block, int) or block < 0:
raise DataError("XREADGROUP block must be a non-negative "
"integer")
pieces.append(b'BLOCK')
pieces.append(str(block))
if noack:
pieces.append(b'NOACK')
if not isinstance(streams, dict) or len(streams) == 0:
raise DataError('XREADGROUP streams must be a non empty dict')
pieces.append(b'STREAMS')
pieces.extend(streams.keys())
pieces.extend(streams.values())
return self.execute_command('XREADGROUP', *pieces)
def xrevrange(self, name, max='+', min='-', count=None):
"""
Read stream values within an interval, in reverse order.
name: name of the stream
start: first stream ID. defaults to '+',
meaning the latest available.
finish: last stream ID. defaults to '-',
meaning the earliest available.
count: if set, only return this many items, beginning with the
latest available.
"""
pieces = [max, min]
if count is not None:
if not isinstance(count, int) or count < 1:
raise DataError('XREVRANGE count must be a positive integer')
pieces.append(b'COUNT')
pieces.append(str(count))
return self.execute_command('XREVRANGE', name, *pieces)
def xtrim(self, name, maxlen, approximate=True):
"""
Trims old messages from a stream.
name: name of the stream.
maxlen: truncate old stream messages beyond this size
approximate: actual stream length may be slightly more than maxlen
"""
pieces = [b'MAXLEN']
if approximate:
pieces.append(b'~')
pieces.append(maxlen)
return self.execute_command('XTRIM', name, *pieces)
# SORTED SET COMMANDS
def zadd(self, name, mapping, nx=False, xx=False, ch=False, incr=False):
"""
Set any number of element-name, score pairs to the key ``name``. Pairs
are specified as a dict of element-names keys to score values.
``nx`` forces ZADD to only create new elements and not to update
scores for elements that already exist.
``xx`` forces ZADD to only update scores of elements that already
exist. New elements will not be added.
``ch`` modifies the return value to be the numbers of elements changed.
Changed elements include new elements that were added and elements
whose scores changed.
``incr`` modifies ZADD to behave like ZINCRBY. In this mode only a
single element/score pair can be specified and the score is the amount
the existing score will be incremented by. When using this mode the
return value of ZADD will be the new score of the element.
The return value of ZADD varies based on the mode specified. With no
options, ZADD returns the number of new elements added to the sorted
set.
"""
if not mapping:
raise DataError("ZADD requires at least one element/score pair")
if nx and xx:
raise DataError("ZADD allows either 'nx' or 'xx', not both")
if incr and len(mapping) != 1:
raise DataError("ZADD option 'incr' only works when passing a "
"single element/score pair")
pieces = []
options = {}
if nx:
pieces.append(b'NX')
if xx:
pieces.append(b'XX')
if ch:
pieces.append(b'CH')
if incr:
pieces.append(b'INCR')
options['as_score'] = True
for pair in mapping.items():
pieces.append(pair[1])
pieces.append(pair[0])
return self.execute_command('ZADD', name, *pieces, **options)
def zcard(self, name):
"Return the number of elements in the sorted set ``name``"
return self.execute_command('ZCARD', name)
def zcount(self, name, min, max):
"""
Returns the number of elements in the sorted set at key ``name`` with
a score between ``min`` and ``max``.
"""
return self.execute_command('ZCOUNT', name, min, max)
def zincrby(self, name, amount, value):
"Increment the score of ``value`` in sorted set ``name`` by ``amount``"
return self.execute_command('ZINCRBY', name, amount, value)
def zinterstore(self, dest, keys, aggregate=None):
"""
Intersect multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return self._zaggregate('ZINTERSTORE', dest, keys, aggregate)
def zlexcount(self, name, min, max):
"""
Return the number of items in the sorted set ``name`` between the
lexicographical range ``min`` and ``max``.
"""
return self.execute_command('ZLEXCOUNT', name, min, max)
def zpopmax(self, name, count=None):
"""
Remove and return up to ``count`` members with the highest scores
from the sorted set ``name``.
"""
args = (count is not None) and [count] or []
options = {
'withscores': True
}
return self.execute_command('ZPOPMAX', name, *args, **options)
def zpopmin(self, name, count=None):
"""
Remove and return up to ``count`` members with the lowest scores
from the sorted set ``name``.
"""
args = (count is not None) and [count] or []
options = {
'withscores': True
}
return self.execute_command('ZPOPMIN', name, *args, **options)
def bzpopmax(self, keys, timeout=0):
"""
ZPOPMAX a value off of the first non-empty sorted set
named in the ``keys`` list.
If none of the sorted sets in ``keys`` has a value to ZPOPMAX,
then block for ``timeout`` seconds, or until a member gets added
to one of the sorted sets.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
keys = list_or_args(keys, None)
keys.append(timeout)
return self.execute_command('BZPOPMAX', *keys)
def bzpopmin(self, keys, timeout=0):
"""
ZPOPMIN a value off of the first non-empty sorted set
named in the ``keys`` list.
If none of the sorted sets in ``keys`` has a value to ZPOPMIN,
then block for ``timeout`` seconds, or until a member gets added
to one of the sorted sets.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
keys = list_or_args(keys, None)
keys.append(timeout)
return self.execute_command('BZPOPMIN', *keys)
def zrange(self, name, start, end, desc=False, withscores=False,
score_cast_func=float):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in ascending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``desc`` a boolean indicating whether to sort the results descendingly
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if desc:
return self.zrevrange(name, start, end, withscores,
score_cast_func)
pieces = ['ZRANGE', name, start, end]
if withscores:
pieces.append(b'WITHSCORES')
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrangebylex(self, name, min, max, start=None, num=None):
"""
Return the lexicographical range of values from sorted set ``name``
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice of the
range.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise DataError("``start`` and ``num`` must both be specified")
pieces = ['ZRANGEBYLEX', name, min, max]
if start is not None and num is not None:
pieces.extend([b'LIMIT', start, num])
return self.execute_command(*pieces)
def zrevrangebylex(self, name, max, min, start=None, num=None):
"""
Return the reversed lexicographical range of values from sorted set
``name`` between ``max`` and ``min``.
If ``start`` and ``num`` are specified, then return a slice of the
range.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise DataError("``start`` and ``num`` must both be specified")
pieces = ['ZREVRANGEBYLEX', name, max, min]
if start is not None and num is not None:
pieces.extend([b'LIMIT', start, num])
return self.execute_command(*pieces)
def zrangebyscore(self, name, min, max, start=None, num=None,
withscores=False, score_cast_func=float):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
`score_cast_func`` a callable used to cast the score return value
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise DataError("``start`` and ``num`` must both be specified")
pieces = ['ZRANGEBYSCORE', name, min, max]
if start is not None and num is not None:
pieces.extend([b'LIMIT', start, num])
if withscores:
pieces.append(b'WITHSCORES')
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrank(self, name, value):
"""
Returns a 0-based value indicating the rank of ``value`` in sorted set
``name``
"""
return self.execute_command('ZRANK', name, value)
def zrem(self, name, *values):
"Remove member ``values`` from sorted set ``name``"
return self.execute_command('ZREM', name, *values)
def zremrangebylex(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` between the
lexicographical range specified by ``min`` and ``max``.
Returns the number of elements removed.
"""
return self.execute_command('ZREMRANGEBYLEX', name, min, max)
def zremrangebyrank(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` with ranks between
``min`` and ``max``. Values are 0-based, ordered from smallest score
to largest. Values can be negative indicating the highest scores.
Returns the number of elements removed
"""
return self.execute_command('ZREMRANGEBYRANK', name, min, max)
def zremrangebyscore(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` with scores
between ``min`` and ``max``. Returns the number of elements removed.
"""
return self.execute_command('ZREMRANGEBYSCORE', name, min, max)
def zrevrange(self, name, start, end, withscores=False,
score_cast_func=float):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in descending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``withscores`` indicates to return the scores along with the values
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
pieces = ['ZREVRANGE', name, start, end]
if withscores:
pieces.append(b'WITHSCORES')
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrevrangebyscore(self, name, max, min, start=None, num=None,
withscores=False, score_cast_func=float):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max`` in descending order.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise DataError("``start`` and ``num`` must both be specified")
pieces = ['ZREVRANGEBYSCORE', name, max, min]
if start is not None and num is not None:
pieces.extend([b'LIMIT', start, num])
if withscores:
pieces.append(b'WITHSCORES')
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrevrank(self, name, value):
"""
Returns a 0-based value indicating the descending rank of
``value`` in sorted set ``name``
"""
return self.execute_command('ZREVRANK', name, value)
def zscore(self, name, value):
"Return the score of element ``value`` in sorted set ``name``"
return self.execute_command('ZSCORE', name, value)
def zunionstore(self, dest, keys, aggregate=None):
"""
Union multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate)
def _zaggregate(self, command, dest, keys, aggregate=None):
pieces = [command, dest, len(keys)]
if isinstance(keys, dict):
keys, weights = keys.keys(), keys.values()
else:
weights = None
pieces.extend(keys)
if weights:
pieces.append(b'WEIGHTS')
pieces.extend(weights)
if aggregate:
pieces.append(b'AGGREGATE')
pieces.append(aggregate)
return self.execute_command(*pieces)
# HYPERLOGLOG COMMANDS
def pfadd(self, name, *values):
"Adds the specified elements to the specified HyperLogLog."
return self.execute_command('PFADD', name, *values)
def pfcount(self, *sources):
"""
Return the approximated cardinality of
the set observed by the HyperLogLog at key(s).
"""
return self.execute_command('PFCOUNT', *sources)
def pfmerge(self, dest, *sources):
"Merge N different HyperLogLogs into a single one."
return self.execute_command('PFMERGE', dest, *sources)
# HASH COMMANDS
def hdel(self, name, *keys):
"Delete ``keys`` from hash ``name``"
return self.execute_command('HDEL', name, *keys)
def hexists(self, name, key):
"Returns a boolean indicating if ``key`` exists within hash ``name``"
return self.execute_command('HEXISTS', name, key)
def hget(self, name, key):
"Return the value of ``key`` within the hash ``name``"
return self.execute_command('HGET', name, key)
def hgetall(self, name):
"Return a Python dict of the hash's name/value pairs"
return self.execute_command('HGETALL', name)
def hincrby(self, name, key, amount=1):
"Increment the value of ``key`` in hash ``name`` by ``amount``"
return self.execute_command('HINCRBY', name, key, amount)
def hincrbyfloat(self, name, key, amount=1.0):
"""
Increment the value of ``key`` in hash ``name`` by floating ``amount``
"""
return self.execute_command('HINCRBYFLOAT', name, key, amount)
def hkeys(self, name):
"Return the list of keys within hash ``name``"
return self.execute_command('HKEYS', name)
def hlen(self, name):
"Return the number of elements in hash ``name``"
return self.execute_command('HLEN', name)
def hset(self, name, key=None, value=None, mapping=None):
"""
Set ``key`` to ``value`` within hash ``name``,
``mapping`` accepts a dict of key/value pairs that that will be
added to hash ``name``.
Returns the number of fields that were added.
"""
if key is None and not mapping:
raise DataError("'hset' with no key value pairs")
items = []
if key is not None:
items.extend((key, value))
if mapping:
for pair in mapping.items():
items.extend(pair)
return self.execute_command('HSET', name, *items)
def hsetnx(self, name, key, value):
"""
Set ``key`` to ``value`` within hash ``name`` if ``key`` does not
exist. Returns 1 if HSETNX created a field, otherwise 0.
"""
return self.execute_command('HSETNX', name, key, value)
def hmset(self, name, mapping):
"""
Set key to value within hash ``name`` for each corresponding
key and value from the ``mapping`` dict.
"""
warnings.warn(
'%s.hmset() is deprecated. Use %s.hset() instead.'
% (self.__class__.__name__, self.__class__.__name__),
DeprecationWarning,
stacklevel=2,
)
if not mapping:
raise DataError("'hmset' with 'mapping' of length 0")
items = []
for pair in mapping.items():
items.extend(pair)
return self.execute_command('HMSET', name, *items)
def hmget(self, name, keys, *args):
"Returns a list of values ordered identically to ``keys``"
args = list_or_args(keys, args)
return self.execute_command('HMGET', name, *args)
def hvals(self, name):
"Return the list of values within hash ``name``"
return self.execute_command('HVALS', name)
def hstrlen(self, name, key):
"""
Return the number of bytes stored in the value of ``key``
within hash ``name``
"""
return self.execute_command('HSTRLEN', name, key)
def publish(self, channel, message):
"""
Publish ``message`` on ``channel``.
Returns the number of subscribers the message was delivered to.
"""
return self.execute_command('PUBLISH', channel, message)
def pubsub_channels(self, pattern='*'):
"""
Return a list of channels that have at least one subscriber
"""
return self.execute_command('PUBSUB CHANNELS', pattern)
def pubsub_numpat(self):
"""
Returns the number of subscriptions to patterns
"""
return self.execute_command('PUBSUB NUMPAT')
def pubsub_numsub(self, *args):
"""
Return a list of (channel, number of subscribers) tuples
for each channel given in ``*args``
"""
return self.execute_command('PUBSUB NUMSUB', *args)
def cluster(self, cluster_arg, *args):
return self.execute_command('CLUSTER %s' % cluster_arg.upper(), *args)
def eval(self, script, numkeys, *keys_and_args):
"""
Execute the Lua ``script``, specifying the ``numkeys`` the script
will touch and the key names and argument values in ``keys_and_args``.
Returns the result of the script.
In practice, use the object returned by ``register_script``. This
function exists purely for Redis API completion.
"""
return self.execute_command('EVAL', script, numkeys, *keys_and_args)
def evalsha(self, sha, numkeys, *keys_and_args):
"""
Use the ``sha`` to execute a Lua script already registered via EVAL
or SCRIPT LOAD. Specify the ``numkeys`` the script will touch and the
key names and argument values in ``keys_and_args``. Returns the result
of the script.
In practice, use the object returned by ``register_script``. This
function exists purely for Redis API completion.
"""
return self.execute_command('EVALSHA', sha, numkeys, *keys_and_args)
def script_exists(self, *args):
"""
Check if a script exists in the script cache by specifying the SHAs of
each script as ``args``. Returns a list of boolean values indicating if
if each already script exists in the cache.
"""
return self.execute_command('SCRIPT EXISTS', *args)
def script_flush(self):
"Flush all scripts from the script cache"
return self.execute_command('SCRIPT FLUSH')
def script_kill(self):
"Kill the currently executing Lua script"
return self.execute_command('SCRIPT KILL')
def script_load(self, script):
"Load a Lua ``script`` into the script cache. Returns the SHA."
return self.execute_command('SCRIPT LOAD', script)
def register_script(self, script):
"""
Register a Lua ``script`` specifying the ``keys`` it will touch.
Returns a Script object that is callable and hides the complexity of
deal with scripts, keys, and shas. This is the preferred way to work
with Lua scripts.
"""
return Script(self, script)
# GEO COMMANDS
def geoadd(self, name, *values):
"""
Add the specified geospatial items to the specified key identified
by the ``name`` argument. The Geospatial items are given as ordered
members of the ``values`` argument, each item or place is formed by
the triad longitude, latitude and name.
"""
if len(values) % 3 != 0:
raise DataError("GEOADD requires places with lon, lat and name"
" values")
return self.execute_command('GEOADD', name, *values)
def geodist(self, name, place1, place2, unit=None):
"""
Return the distance between ``place1`` and ``place2`` members of the
``name`` key.
The units must be one of the following : m, km mi, ft. By default
meters are used.
"""
pieces = [name, place1, place2]
if unit and unit not in ('m', 'km', 'mi', 'ft'):
raise DataError("GEODIST invalid unit")
elif unit:
pieces.append(unit)
return self.execute_command('GEODIST', *pieces)
def geohash(self, name, *values):
"""
Return the geo hash string for each item of ``values`` members of
the specified key identified by the ``name`` argument.
"""
return self.execute_command('GEOHASH', name, *values)
def geopos(self, name, *values):
"""
Return the positions of each item of ``values`` as members of
the specified key identified by the ``name`` argument. Each position
is represented by the pairs lon and lat.
"""
return self.execute_command('GEOPOS', name, *values)
def georadius(self, name, longitude, latitude, radius, unit=None,
withdist=False, withcoord=False, withhash=False, count=None,
sort=None, store=None, store_dist=None):
"""
Return the members of the specified key identified by the
``name`` argument which are within the borders of the area specified
with the ``latitude`` and ``longitude`` location and the maximum
distance from the center specified by the ``radius`` value.
The units must be one of the following : m, km mi, ft. By default
``withdist`` indicates to return the distances of each place.
``withcoord`` indicates to return the latitude and longitude of
each place.
``withhash`` indicates to return the geohash string of each place.
``count`` indicates to return the number of elements up to N.
``sort`` indicates to return the places in a sorted way, ASC for
nearest to fairest and DESC for fairest to nearest.
``store`` indicates to save the places names in a sorted set named
with a specific key, each element of the destination sorted set is
populated with the score got from the original geo sorted set.
``store_dist`` indicates to save the places names in a sorted set
named with a specific key, instead of ``store`` the sorted set
destination score is set with the distance.
"""
return self._georadiusgeneric('GEORADIUS',
name, longitude, latitude, radius,
unit=unit, withdist=withdist,
withcoord=withcoord, withhash=withhash,
count=count, sort=sort, store=store,
store_dist=store_dist)
def georadiusbymember(self, name, member, radius, unit=None,
withdist=False, withcoord=False, withhash=False,
count=None, sort=None, store=None, store_dist=None):
"""
This command is exactly like ``georadius`` with the sole difference
that instead of taking, as the center of the area to query, a longitude
and latitude value, it takes the name of a member already existing
inside the geospatial index represented by the sorted set.
"""
return self._georadiusgeneric('GEORADIUSBYMEMBER',
name, member, radius, unit=unit,
withdist=withdist, withcoord=withcoord,
withhash=withhash, count=count,
sort=sort, store=store,
store_dist=store_dist)
def _georadiusgeneric(self, command, *args, **kwargs):
pieces = list(args)
if kwargs['unit'] and kwargs['unit'] not in ('m', 'km', 'mi', 'ft'):
raise DataError("GEORADIUS invalid unit")
elif kwargs['unit']:
pieces.append(kwargs['unit'])
else:
pieces.append('m',)
for arg_name, byte_repr in (
('withdist', b'WITHDIST'),
('withcoord', b'WITHCOORD'),
('withhash', b'WITHHASH')):
if kwargs[arg_name]:
pieces.append(byte_repr)
if kwargs['count']:
pieces.extend([b'COUNT', kwargs['count']])
if kwargs['sort']:
if kwargs['sort'] == 'ASC':
pieces.append(b'ASC')
elif kwargs['sort'] == 'DESC':
pieces.append(b'DESC')
else:
raise DataError("GEORADIUS invalid sort")
if kwargs['store'] and kwargs['store_dist']:
raise DataError("GEORADIUS store and store_dist cant be set"
" together")
if kwargs['store']:
pieces.extend([b'STORE', kwargs['store']])
if kwargs['store_dist']:
pieces.extend([b'STOREDIST', kwargs['store_dist']])
return self.execute_command(command, *pieces, **kwargs)
# MODULE COMMANDS
def module_load(self, path):
"""
Loads the module from ``path``.
Raises ``ModuleError`` if a module is not found at ``path``.
"""
return self.execute_command('MODULE LOAD', path)
def module_unload(self, name):
"""
Unloads the module ``name``.
Raises ``ModuleError`` if ``name`` is not in loaded modules.
"""
return self.execute_command('MODULE UNLOAD', name)
def module_list(self):
"""
Returns a list of dictionaries containing the name and version of
all loaded modules.
"""
return self.execute_command('MODULE LIST')
StrictRedis = Redis
class Monitor:
"""
Monitor is useful for handling the MONITOR command to the redis server.
next_command() method returns one command from monitor
listen() method yields commands from monitor.
"""
monitor_re = re.compile(r'\[(\d+) (.*)\] (.*)')
command_re = re.compile(r'"(.*?)(?<!\\)"')
def __init__(self, connection_pool):
self.connection_pool = connection_pool
self.connection = self.connection_pool.get_connection('MONITOR')
def __enter__(self):
self.connection.send_command('MONITOR')
# check that monitor returns 'OK', but don't return it to user
response = self.connection.read_response()
if not bool_ok(response):
raise RedisError('MONITOR failed: %s' % response)
return self
def __exit__(self, *args):
self.connection.disconnect()
self.connection_pool.release(self.connection)
def next_command(self):
"Parse the response from a monitor command"
response = self.connection.read_response()
if isinstance(response, bytes):
response = self.connection.encoder.decode(response, force=True)
command_time, command_data = response.split(' ', 1)
m = self.monitor_re.match(command_data)
db_id, client_info, command = m.groups()
command = ' '.join(self.command_re.findall(command))
# Redis escapes double quotes because each piece of the command
# string is surrounded by double quotes. We don't have that
# requirement so remove the escaping and leave the quote.
command = command.replace('\\"', '"')
if client_info == 'lua':
client_address = 'lua'
client_port = ''
client_type = 'lua'
elif client_info.startswith('unix'):
client_address = 'unix'
client_port = client_info[5:]
client_type = 'unix'
else:
# use rsplit as ipv6 addresses contain colons
client_address, client_port = client_info.rsplit(':', 1)
client_type = 'tcp'
return {
'time': float(command_time),
'db': int(db_id),
'client_address': client_address,
'client_port': client_port,
'client_type': client_type,
'command': command
}
def listen(self):
"Listen for commands coming to the server."
while True:
yield self.next_command()
class PubSub:
"""
PubSub provides publish, subscribe and listen support to Redis channels.
After subscribing to one or more channels, the listen() method will block
until a message arrives on one of the subscribed channels. That message
will be returned and it's safe to start listening again.
"""
PUBLISH_MESSAGE_TYPES = ('message', 'pmessage')
UNSUBSCRIBE_MESSAGE_TYPES = ('unsubscribe', 'punsubscribe')
HEALTH_CHECK_MESSAGE = 'redis-py-health-check'
def __init__(self, connection_pool, shard_hint=None,
ignore_subscribe_messages=False):
self.connection_pool = connection_pool
self.shard_hint = shard_hint
self.ignore_subscribe_messages = ignore_subscribe_messages
self.connection = None
# we need to know the encoding options for this connection in order
# to lookup channel and pattern names for callback handlers.
self.encoder = self.connection_pool.get_encoder()
if self.encoder.decode_responses:
self.health_check_response = ['pong', self.HEALTH_CHECK_MESSAGE]
else:
self.health_check_response = [
b'pong',
self.encoder.encode(self.HEALTH_CHECK_MESSAGE)
]
self.reset()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.reset()
def __del__(self):
try:
# if this object went out of scope prior to shutting down
# subscriptions, close the connection manually before
# returning it to the connection pool
self.reset()
except Exception:
pass
def reset(self):
if self.connection:
self.connection.disconnect()
self.connection.clear_connect_callbacks()
self.connection_pool.release(self.connection)
self.connection = None
self.channels = {}
self.pending_unsubscribe_channels = set()
self.patterns = {}
self.pending_unsubscribe_patterns = set()
def close(self):
self.reset()
def on_connect(self, connection):
"Re-subscribe to any channels and patterns previously subscribed to"
# NOTE: for python3, we can't pass bytestrings as keyword arguments
# so we need to decode channel/pattern names back to unicode strings
# before passing them to [p]subscribe.
self.pending_unsubscribe_channels.clear()
self.pending_unsubscribe_patterns.clear()
if self.channels:
channels = {}
for k, v in self.channels.items():
channels[self.encoder.decode(k, force=True)] = v
self.subscribe(**channels)
if self.patterns:
patterns = {}
for k, v in self.patterns.items():
patterns[self.encoder.decode(k, force=True)] = v
self.psubscribe(**patterns)
@property
def subscribed(self):
"Indicates if there are subscriptions to any channels or patterns"
return bool(self.channels or self.patterns)
def execute_command(self, *args):
"Execute a publish/subscribe command"
# NOTE: don't parse the response in this function -- it could pull a
# legitimate message off the stack if the connection is already
# subscribed to one or more channels
if self.connection is None:
self.connection = self.connection_pool.get_connection(
'pubsub',
self.shard_hint
)
# register a callback that re-subscribes to any channels we
# were listening to when we were disconnected
self.connection.register_connect_callback(self.on_connect)
connection = self.connection
kwargs = {'check_health': not self.subscribed}
self._execute(connection, connection.send_command, *args, **kwargs)
def _execute(self, connection, command, *args, **kwargs):
try:
return command(*args, **kwargs)
except (ConnectionError, TimeoutError) as e:
connection.disconnect()
if not (connection.retry_on_timeout and
isinstance(e, TimeoutError)):
raise
# Connect manually here. If the Redis server is down, this will
# fail and raise a ConnectionError as desired.
connection.connect()
# the ``on_connect`` callback should haven been called by the
# connection to resubscribe us to any channels and patterns we were
# previously listening to
return command(*args, **kwargs)
def parse_response(self, block=True, timeout=0):
"Parse the response from a publish/subscribe command"
conn = self.connection
if conn is None:
raise RuntimeError(
'pubsub connection not set: '
'did you forget to call subscribe() or psubscribe()?')
self.check_health()
if not block and not conn.can_read(timeout=timeout):
return None
response = self._execute(conn, conn.read_response)
if conn.health_check_interval and \
response == self.health_check_response:
# ignore the health check message as user might not expect it
return None
return response
def check_health(self):
conn = self.connection
if conn is None:
raise RuntimeError(
'pubsub connection not set: '
'did you forget to call subscribe() or psubscribe()?')
if conn.health_check_interval and time.time() > conn.next_health_check:
conn.send_command('PING', self.HEALTH_CHECK_MESSAGE,
check_health=False)
def _normalize_keys(self, data):
"""
normalize channel/pattern names to be either bytes or strings
based on whether responses are automatically decoded. this saves us
from coercing the value for each message coming in.
"""
encode = self.encoder.encode
decode = self.encoder.decode
return {decode(encode(k)): v for k, v in data.items()}
def psubscribe(self, *args, **kwargs):
"""
Subscribe to channel patterns. Patterns supplied as keyword arguments
expect a pattern name as the key and a callable as the value. A
pattern's callable will be invoked automatically when a message is
received on that pattern rather than producing a message via
``listen()``.
"""
if args:
args = list_or_args(args[0], args[1:])
new_patterns = dict.fromkeys(args)
new_patterns.update(kwargs)
ret_val = self.execute_command('PSUBSCRIBE', *new_patterns.keys())
# update the patterns dict AFTER we send the command. we don't want to
# subscribe twice to these patterns, once for the command and again
# for the reconnection.
new_patterns = self._normalize_keys(new_patterns)
self.patterns.update(new_patterns)
self.pending_unsubscribe_patterns.difference_update(new_patterns)
return ret_val
def punsubscribe(self, *args):
"""
Unsubscribe from the supplied patterns. If empty, unsubscribe from
all patterns.
"""
if args:
args = list_or_args(args[0], args[1:])
patterns = self._normalize_keys(dict.fromkeys(args))
else:
patterns = self.patterns
self.pending_unsubscribe_patterns.update(patterns)
return self.execute_command('PUNSUBSCRIBE', *args)
def subscribe(self, *args, **kwargs):
"""
Subscribe to channels. Channels supplied as keyword arguments expect
a channel name as the key and a callable as the value. A channel's
callable will be invoked automatically when a message is received on
that channel rather than producing a message via ``listen()`` or
``get_message()``.
"""
if args:
args = list_or_args(args[0], args[1:])
new_channels = dict.fromkeys(args)
new_channels.update(kwargs)
ret_val = self.execute_command('SUBSCRIBE', *new_channels.keys())
# update the channels dict AFTER we send the command. we don't want to
# subscribe twice to these channels, once for the command and again
# for the reconnection.
new_channels = self._normalize_keys(new_channels)
self.channels.update(new_channels)
self.pending_unsubscribe_channels.difference_update(new_channels)
return ret_val
def unsubscribe(self, *args):
"""
Unsubscribe from the supplied channels. If empty, unsubscribe from
all channels
"""
if args:
args = list_or_args(args[0], args[1:])
channels = self._normalize_keys(dict.fromkeys(args))
else:
channels = self.channels
self.pending_unsubscribe_channels.update(channels)
return self.execute_command('UNSUBSCRIBE', *args)
def listen(self):
"Listen for messages on channels this client has been subscribed to"
while self.subscribed:
response = self.handle_message(self.parse_response(block=True))
if response is not None:
yield response
def get_message(self, ignore_subscribe_messages=False, timeout=0):
"""
Get the next message if one is available, otherwise None.
If timeout is specified, the system will wait for `timeout` seconds
before returning. Timeout should be specified as a floating point
number.
"""
response = self.parse_response(block=False, timeout=timeout)
if response:
return self.handle_message(response, ignore_subscribe_messages)
return None
def ping(self, message=None):
"""
Ping the Redis server
"""
message = '' if message is None else message
return self.execute_command('PING', message)
def handle_message(self, response, ignore_subscribe_messages=False):
"""
Parses a pub/sub message. If the channel or pattern was subscribed to
with a message handler, the handler is invoked instead of a parsed
message being returned.
"""
message_type = str_if_bytes(response[0])
if message_type == 'pmessage':
message = {
'type': message_type,
'pattern': response[1],
'channel': response[2],
'data': response[3]
}
elif message_type == 'pong':
message = {
'type': message_type,
'pattern': None,
'channel': None,
'data': response[1]
}
else:
message = {
'type': message_type,
'pattern': None,
'channel': response[1],
'data': response[2]
}
# if this is an unsubscribe message, remove it from memory
if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES:
if message_type == 'punsubscribe':
pattern = response[1]
if pattern in self.pending_unsubscribe_patterns:
self.pending_unsubscribe_patterns.remove(pattern)
self.patterns.pop(pattern, None)
else:
channel = response[1]
if channel in self.pending_unsubscribe_channels:
self.pending_unsubscribe_channels.remove(channel)
self.channels.pop(channel, None)
if message_type in self.PUBLISH_MESSAGE_TYPES:
# if there's a message handler, invoke it
if message_type == 'pmessage':
handler = self.patterns.get(message['pattern'], None)
else:
handler = self.channels.get(message['channel'], None)
if handler:
handler(message)
return None
elif message_type != 'pong':
# this is a subscribe/unsubscribe message. ignore if we don't
# want them
if ignore_subscribe_messages or self.ignore_subscribe_messages:
return None
return message
def run_in_thread(self, sleep_time=0, daemon=False,
exception_handler=None):
for channel, handler in self.channels.items():
if handler is None:
raise PubSubError("Channel: '%s' has no handler registered" %
channel)
for pattern, handler in self.patterns.items():
if handler is None:
raise PubSubError("Pattern: '%s' has no handler registered" %
pattern)
thread = PubSubWorkerThread(
self,
sleep_time,
daemon=daemon,
exception_handler=exception_handler
)
thread.start()
return thread
class PubSubWorkerThread(threading.Thread):
def __init__(self, pubsub, sleep_time, daemon=False,
exception_handler=None):
super().__init__()
self.daemon = daemon
self.pubsub = pubsub
self.sleep_time = sleep_time
self.exception_handler = exception_handler
self._running = threading.Event()
def run(self):
if self._running.is_set():
return
self._running.set()
pubsub = self.pubsub
sleep_time = self.sleep_time
while self._running.is_set():
try:
pubsub.get_message(ignore_subscribe_messages=True,
timeout=sleep_time)
except BaseException as e:
if self.exception_handler is None:
raise
self.exception_handler(e, pubsub, self)
pubsub.close()
def stop(self):
# trip the flag so the run loop exits. the run loop will
# close the pubsub connection, which disconnects the socket
# and returns the connection to the pool.
self._running.clear()
class Pipeline(Redis):
"""
Pipelines provide a way to transmit multiple commands to the Redis server
in one transmission. This is convenient for batch processing, such as
saving all the values in a list to Redis.
All commands executed within a pipeline are wrapped with MULTI and EXEC
calls. This guarantees all commands executed in the pipeline will be
executed atomically.
Any command raising an exception does *not* halt the execution of
subsequent commands in the pipeline. Instead, the exception is caught
and its instance is placed into the response list returned by execute().
Code iterating over the response list should be able to deal with an
instance of an exception as a potential value. In general, these will be
ResponseError exceptions, such as those raised when issuing a command
on a key of a different datatype.
"""
UNWATCH_COMMANDS = {'DISCARD', 'EXEC', 'UNWATCH'}
def __init__(self, connection_pool, response_callbacks, transaction,
shard_hint):
self.connection_pool = connection_pool
self.connection = None
self.response_callbacks = response_callbacks
self.transaction = transaction
self.shard_hint = shard_hint
self.watching = False
self.reset()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.reset()
def __del__(self):
try:
self.reset()
except Exception:
pass
def __len__(self):
return len(self.command_stack)
def __bool__(self):
"Pipeline instances should always evaluate to True"
return True
def reset(self):
self.command_stack = []
self.scripts = set()
# make sure to reset the connection state in the event that we were
# watching something
if self.watching and self.connection:
try:
# call this manually since our unwatch or
# immediate_execute_command methods can call reset()
self.connection.send_command('UNWATCH')
self.connection.read_response()
except ConnectionError:
# disconnect will also remove any previous WATCHes
self.connection.disconnect()
# clean up the other instance attributes
self.watching = False
self.explicit_transaction = False
# we can safely return the connection to the pool here since we're
# sure we're no longer WATCHing anything
if self.connection:
self.connection_pool.release(self.connection)
self.connection = None
def multi(self):
"""
Start a transactional block of the pipeline after WATCH commands
are issued. End the transactional block with `execute`.
"""
if self.explicit_transaction:
raise RedisError('Cannot issue nested calls to MULTI')
if self.command_stack:
raise RedisError('Commands without an initial WATCH have already '
'been issued')
self.explicit_transaction = True
def execute_command(self, *args, **kwargs):
if (self.watching or args[0] == 'WATCH') and \
not self.explicit_transaction:
return self.immediate_execute_command(*args, **kwargs)
return self.pipeline_execute_command(*args, **kwargs)
def immediate_execute_command(self, *args, **options):
"""
Execute a command immediately, but don't auto-retry on a
ConnectionError if we're already WATCHing a variable. Used when
issuing WATCH or subsequent commands retrieving their values but before
MULTI is called.
"""
command_name = args[0]
conn = self.connection
# if this is the first call, we need a connection
if not conn:
conn = self.connection_pool.get_connection(command_name,
self.shard_hint)
self.connection = conn
try:
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
except (ConnectionError, TimeoutError) as e:
conn.disconnect()
# if we were already watching a variable, the watch is no longer
# valid since this connection has died. raise a WatchError, which
# indicates the user should retry this transaction.
if self.watching:
self.reset()
raise WatchError("A ConnectionError occurred on while "
"watching one or more keys")
# if retry_on_timeout is not set, or the error is not
# a TimeoutError, raise it
if not (conn.retry_on_timeout and isinstance(e, TimeoutError)):
self.reset()
raise
# retry_on_timeout is set, this is a TimeoutError and we are not
# already WATCHing any variables. retry the command.
try:
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
except (ConnectionError, TimeoutError):
# a subsequent failure should simply be raised
self.reset()
raise
def pipeline_execute_command(self, *args, **options):
"""
Stage a command to be executed when execute() is next called
Returns the current Pipeline object back so commands can be
chained together, such as:
pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
At some other point, you can then run: pipe.execute(),
which will execute all commands queued in the pipe.
"""
self.command_stack.append((args, options))
return self
def _execute_transaction(self, connection, commands, raise_on_error):
cmds = chain([(('MULTI', ), {})], commands, [(('EXEC', ), {})])
all_cmds = connection.pack_commands([args for args, options in cmds
if EMPTY_RESPONSE not in options])
connection.send_packed_command(all_cmds)
errors = []
# parse off the response for MULTI
# NOTE: we need to handle ResponseErrors here and continue
# so that we read all the additional command messages from
# the socket
try:
self.parse_response(connection, '_')
except ResponseError as e:
errors.append((0, e))
# and all the other commands
for i, command in enumerate(commands):
if EMPTY_RESPONSE in command[1]:
errors.append((i, command[1][EMPTY_RESPONSE]))
else:
try:
self.parse_response(connection, '_')
except ResponseError as e:
self.annotate_exception(e, i + 1, command[0])
errors.append((i, e))
# parse the EXEC.
try:
response = self.parse_response(connection, '_')
except ExecAbortError:
if errors:
raise errors[0][1]
raise
# EXEC clears any watched keys
self.watching = False
if response is None:
raise WatchError("Watched variable changed.")
# put any parse errors into the response
for i, e in errors:
response.insert(i, e)
if len(response) != len(commands):
self.connection.disconnect()
raise ResponseError("Wrong number of response items from "
"pipeline execution")
# find any errors in the response and raise if necessary
if raise_on_error:
self.raise_first_error(commands, response)
# We have to run response callbacks manually
data = []
for r, cmd in zip(response, commands):
if not isinstance(r, Exception):
args, options = cmd
command_name = args[0]
if command_name in self.response_callbacks:
r = self.response_callbacks[command_name](r, **options)
data.append(r)
return data
def _execute_pipeline(self, connection, commands, raise_on_error):
# build up all commands into a single request to increase network perf
all_cmds = connection.pack_commands([args for args, _ in commands])
connection.send_packed_command(all_cmds)
response = []
for args, options in commands:
try:
response.append(
self.parse_response(connection, args[0], **options))
except ResponseError as e:
response.append(e)
if raise_on_error:
self.raise_first_error(commands, response)
return response
def raise_first_error(self, commands, response):
for i, r in enumerate(response):
if isinstance(r, ResponseError):
self.annotate_exception(r, i + 1, commands[i][0])
raise r
def annotate_exception(self, exception, number, command):
cmd = ' '.join(map(safe_str, command))
msg = 'Command # %d (%s) of pipeline caused error: %s' % (
number, cmd, exception.args[0])
exception.args = (msg,) + exception.args[1:]
def parse_response(self, connection, command_name, **options):
result = Redis.parse_response(
self, connection, command_name, **options)
if command_name in self.UNWATCH_COMMANDS:
self.watching = False
elif command_name == 'WATCH':
self.watching = True
return result
def load_scripts(self):
# make sure all scripts that are about to be run on this pipeline exist
scripts = list(self.scripts)
immediate = self.immediate_execute_command
shas = [s.sha for s in scripts]
# we can't use the normal script_* methods because they would just
# get buffered in the pipeline.
exists = immediate('SCRIPT EXISTS', *shas)
if not all(exists):
for s, exist in zip(scripts, exists):
if not exist:
s.sha = immediate('SCRIPT LOAD', s.script)
def execute(self, raise_on_error=True):
"Execute all the commands in the current pipeline"
stack = self.command_stack
if not stack and not self.watching:
return []
if self.scripts:
self.load_scripts()
if self.transaction or self.explicit_transaction:
execute = self._execute_transaction
else:
execute = self._execute_pipeline
conn = self.connection
if not conn:
conn = self.connection_pool.get_connection('MULTI',
self.shard_hint)
# assign to self.connection so reset() releases the connection
# back to the pool after we're done
self.connection = conn
try:
return execute(conn, stack, raise_on_error)
except (ConnectionError, TimeoutError) as e:
conn.disconnect()
# if we were watching a variable, the watch is no longer valid
# since this connection has died. raise a WatchError, which
# indicates the user should retry this transaction.
if self.watching:
raise WatchError("A ConnectionError occurred on while "
"watching one or more keys")
# if retry_on_timeout is not set, or the error is not
# a TimeoutError, raise it
if not (conn.retry_on_timeout and isinstance(e, TimeoutError)):
raise
# retry a TimeoutError when retry_on_timeout is set
return execute(conn, stack, raise_on_error)
finally:
self.reset()
def watch(self, *names):
"Watches the values at keys ``names``"
if self.explicit_transaction:
raise RedisError('Cannot issue a WATCH after a MULTI')
return self.execute_command('WATCH', *names)
def unwatch(self):
"Unwatches all previously specified keys"
return self.watching and self.execute_command('UNWATCH') or True
class Script:
"An executable Lua script object returned by ``register_script``"
def __init__(self, registered_client, script):
self.registered_client = registered_client
self.script = script
# Precalculate and store the SHA1 hex digest of the script.
if isinstance(script, str):
# We need the encoding from the client in order to generate an
# accurate byte representation of the script
encoder = registered_client.connection_pool.get_encoder()
script = encoder.encode(script)
self.sha = hashlib.sha1(script).hexdigest()
def __call__(self, keys=[], args=[], client=None):
"Execute the script, passing any required ``args``"
if client is None:
client = self.registered_client
args = tuple(keys) + tuple(args)
# make sure the Redis server knows about the script
if isinstance(client, Pipeline):
# Make sure the pipeline can register the script before executing.
client.scripts.add(self)
try:
return client.evalsha(self.sha, len(keys), *args)
except NoScriptError:
# Maybe the client is pointed to a differnet server than the client
# that created this instance?
# Overwrite the sha just in case there was a discrepancy.
self.sha = client.script_load(self.script)
return client.evalsha(self.sha, len(keys), *args)
class BitFieldOperation:
"""
Command builder for BITFIELD commands.
"""
def __init__(self, client, key, default_overflow=None):
self.client = client
self.key = key
self._default_overflow = default_overflow
self.reset()
def reset(self):
"""
Reset the state of the instance to when it was constructed
"""
self.operations = []
self._last_overflow = 'WRAP'
self.overflow(self._default_overflow or self._last_overflow)
def overflow(self, overflow):
"""
Update the overflow algorithm of successive INCRBY operations
:param overflow: Overflow algorithm, one of WRAP, SAT, FAIL. See the
Redis docs for descriptions of these algorithmsself.
:returns: a :py:class:`BitFieldOperation` instance.
"""
overflow = overflow.upper()
if overflow != self._last_overflow:
self._last_overflow = overflow
self.operations.append(('OVERFLOW', overflow))
return self
def incrby(self, fmt, offset, increment, overflow=None):
"""
Increment a bitfield by a given amount.
:param fmt: format-string for the bitfield being updated, e.g. 'u8'
for an unsigned 8-bit integer.
:param offset: offset (in number of bits). If prefixed with a
'#', this is an offset multiplier, e.g. given the arguments
fmt='u8', offset='#2', the offset will be 16.
:param int increment: value to increment the bitfield by.
:param str overflow: overflow algorithm. Defaults to WRAP, but other
acceptable values are SAT and FAIL. See the Redis docs for
descriptions of these algorithms.
:returns: a :py:class:`BitFieldOperation` instance.
"""
if overflow is not None:
self.overflow(overflow)
self.operations.append(('INCRBY', fmt, offset, increment))
return self
def get(self, fmt, offset):
"""
Get the value of a given bitfield.
:param fmt: format-string for the bitfield being read, e.g. 'u8' for
an unsigned 8-bit integer.
:param offset: offset (in number of bits). If prefixed with a
'#', this is an offset multiplier, e.g. given the arguments
fmt='u8', offset='#2', the offset will be 16.
:returns: a :py:class:`BitFieldOperation` instance.
"""
self.operations.append(('GET', fmt, offset))
return self
def set(self, fmt, offset, value):
"""
Set the value of a given bitfield.
:param fmt: format-string for the bitfield being read, e.g. 'u8' for
an unsigned 8-bit integer.
:param offset: offset (in number of bits). If prefixed with a
'#', this is an offset multiplier, e.g. given the arguments
fmt='u8', offset='#2', the offset will be 16.
:param int value: value to set at the given position.
:returns: a :py:class:`BitFieldOperation` instance.
"""
self.operations.append(('SET', fmt, offset, value))
return self
@property
def command(self):
cmd = ['BITFIELD', self.key]
for ops in self.operations:
cmd.extend(ops)
return cmd
def execute(self):
"""
Execute the operation(s) in a single BITFIELD command. The return value
is a list of values corresponding to each operation. If the client
used to create this instance was a pipeline, the list of values
will be present within the pipeline's execute.
"""
command = self.command
self.reset()
return self.client.execute_command(*command)
| 38.136993
| 79
| 0.598807
|
2c4210251ffd4f4ccc77067905e9ed648f40329c
| 2,570
|
py
|
Python
|
main/arm-none-eabi/lib/thumb/v6-m/libstdc++.a-gdb.py
|
aliosthings/gcc-arm-none-eabi-linux
|
94f74be42c74052ff500070b1d41d15fce76967a
|
[
"MIT"
] | 4
|
2019-11-15T17:27:46.000Z
|
2020-09-28T21:03:15.000Z
|
main/arm-none-eabi/lib/thumb/v6-m/libstdc++.a-gdb.py
|
aliosthings/gcc-arm-none-eabi-linux
|
94f74be42c74052ff500070b1d41d15fce76967a
|
[
"MIT"
] | 1
|
2020-06-22T19:20:36.000Z
|
2020-07-15T03:55:41.000Z
|
main/arm-none-eabi/lib/thumb/v6-m/libstdc++.a-gdb.py
|
aliosthings/gcc-arm-none-eabi-linux
|
94f74be42c74052ff500070b1d41d15fce76967a
|
[
"MIT"
] | 3
|
2020-06-11T22:32:45.000Z
|
2022-03-21T15:18:19.000Z
|
# -*- python -*-
# Copyright (C) 2009-2017 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/tmp/jenkins/jenkins-GCC-7-build_toolchain_docker-775_20180622_1529687456/install-native/share/gcc-arm-none-eabi'
libdir = '/tmp/jenkins/jenkins-GCC-7-build_toolchain_docker-775_20180622_1529687456/install-native/arm-none-eabi/lib/thumb/v6-m'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
| 41.451613
| 128
| 0.732685
|
fc549891e8c93ae154122f4fdd7716669de5cd92
| 9,228
|
py
|
Python
|
src/data/make_dataset.py
|
amuta/ambev-data-challenge
|
d5817b80be4dc419d76cdd40bafeee515fbf6f38
|
[
"MIT"
] | null | null | null |
src/data/make_dataset.py
|
amuta/ambev-data-challenge
|
d5817b80be4dc419d76cdd40bafeee515fbf6f38
|
[
"MIT"
] | 2
|
2021-04-30T20:37:11.000Z
|
2021-06-01T22:51:15.000Z
|
src/data/make_dataset.py
|
amuta/ambev-data-challenge
|
d5817b80be4dc419d76cdd40bafeee515fbf6f38
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# import click
import logging
import os
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import pandas as pd
import numpy as np
import re
from datetime import datetime
from sklearn.preprocessing import LabelEncoder
# @click.command()
# @click.argument('data_step', type=click.Path())
def main(force=False, out=False):
''' Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
'''
global logger
logger = logging.getLogger(__name__)
logger.info('start making data...')
AMBEV_FILE = 'ambev-final-dataset.csv'
RAW_PATH = 'data/raw/'
INTE_PATH = 'data/interim/'
PROC_PATH = 'data/processed/'
processed_file = PROC_PATH + AMBEV_FILE.replace('.csv', '.pkl')
interim_file = INTE_PATH + AMBEV_FILE.replace('.csv', '.pkl')
raw_file = RAW_PATH + AMBEV_FILE
# Create interim file if not exists
if os.path.isfile(interim_file) and not force:
logger.info('using found interim file {}'.format(interim_file))
df = pd.read_pickle(interim_file)
else:
logger.info('creating interim file {}'.format(interim_file))
df = pd.read_csv(raw_file, encoding='utf-8', low_memory=False)
df = make_interim(df)
df.to_csv(interim_file, index=False)
df.to_pickle(interim_file)
# Create processed file if not exists
if os.path.isfile(processed_file) and not force:
logger.info('using found processed file {}'.format(processed_file))
df = pd.read_pickle(processed_file)
else:
logger.info('creating processed file {}'.format(processed_file))
df = pd.read_pickle(interim_file)
df = make_processed(df)
df.to_csv(processed_file, index=False)
df.to_pickle(processed_file)
# Finishing up
logger.info('make data ran successfully\n interin file is located at {}'
'\n processed file is located at {}'.format(interim_file,
processed_file))
# Returning if called outside make
if out:
return df
######################
# INTERIM DATA PIPELINE
######################
def make_interim(df):
''' pipeline of interim df'''
logger.info('fixing column names')
df = fix_colnames(df)
logger.info('fixing column alignment')
df = fix_cols(df)
logger.info('correcting dtypes')
df = correct_dtypes(df)
logger.info('cleaning values')
df = clean_values(df)
logger.info('creating new features')
df = new_features(df)
return df
def fix_colnames(df):
''' Fix problematic column names
'''
col_names = [
'mes', 'pais', 'mundo', 'area_regional', 'unidade',
'grupo_cargo', 'cargo', 'grade', 'banda', 'area', 'id_funcionario',
'id_gestor', 'id_kpi', 'diretoria', 'area_diretoria',
'funcao', 'tipo_meta', 'categoria_kpi', 'nome_kpi', 'peso_kpi',
'prazo', 'regra_alcance_parcial', 'meta_projeto', 'ating_mes',
'pts_mes', 'acum_mes', 'ating_acum', 'pts_acumulado',
'acum_acumulado', 'ating_fim_exer', 'pts_fim_exer',
'acum_fim_exer', 'status_meta', 'c33', 'c34', 'c35', 'c36', 'c37']
df.columns = col_names
df['ult_col'] = np.nan
return df
def fix_cols(df):
''' Fix unaligned columns and column names
'''
bad_cols = df.columns.values[21:]
for i in range(6):
# meta_projeto should always be s/n/nan
idxs = df[~df.meta_projeto.isin([
'Sim', 'Não', np.nan, np.NAN])].index
df.loc[idxs, 'regra_alcance_parcial'] = (
df.loc[idxs, 'regra_alcance_parcial'].fillna('').astype(str) +
' ' + df.loc[idxs, 'meta_projeto'].fillna('').astype(str))
for i in range(1, len(bad_cols) - 1):
df.loc[idxs, bad_cols[i]] = df.loc[idxs, bad_cols[i + 1]].values
# dropping now empty columns
df.drop(['c33', 'c34', 'c35', 'c36', 'c37', 'ult_col'],
axis=1, inplace=True)
return df
def correct_dtypes(df):
''' Change the dtypes of columns to something more relatable
'''
df[df.columns[:23]] = df[df.columns[:23]].astype('str')
df.status_meta = df['status_meta'].astype('str')
df[df.columns[23:32]] = df[df.columns[23:32]].astype(np.number)
return df
def clean_values(df):
''' Do some data cleaning on problematic columns
'''
# Format errors
df.mes = df.mes.astype(str).str.extract('(^.*)(?=.{4}$)').astype(int)
# Encoding errors
df.loc[df.pais == 'PanamÁ¡', 'pais'] = 'Panama'
# NLP transforms
cols = [
'cargo', 'grupo_cargo', 'area', 'diretoria', 'mundo',
'area_diretoria', 'funcao', 'banda', 'tipo_meta', 'categoria_kpi',
'regra_alcance_parcial']
for col in cols:
df[col] = df[col].str.lower().str.replace(r'[^\w\s]+', '')
df[col] = df[col].str.normalize('NFKD').str.encode(
'ascii', errors='ignore').str.decode('utf-8')
# Month cleanup
df.prazo = df.prazo.astype('str').apply(extract_month)
return df
def extract_month(text):
''' Extract the month of some unformated date value using regex
'''
if re.match(r'(^[0-3]?[0-9].[0-1]?[0-9].?[0-9]{4})', text):
val = re.search(r'^[0-3]?[0-9].([0-1]?[0-9])', text).groups()[0]
elif re.match(r'(^[0-1]?[0-9].[0-9]?[0-9].?[0-9]{4})', text):
val = re.search(r'^([0-1]?[0-9])', text).groups()[0]
elif re.match(r'(^[0-9]?[0-9](?:[.]|[\/])[0-1]?[0-9].?[0-9]{2})', text):
val = re.search(r'^[0-3]?[0-9].([0-1]?[0-9])', text).groups()[0]
elif re.match(r'monthly', text, re.IGNORECASE): # not really a date
val = 0
elif re.match(r'^([0-9]{5})', text): # excel date format
val = datetime.fromordinal(
datetime(1900, 1, 1).toordinal() + int(
re.search(r'^([0-9]{5})', text).groups()[0]) - 2).month
else:
val = 0
return val
def new_features(df):
''' Create new features
'''
df['abrev_cargo'] = df.cargo.str[:3]
df['abrev_grupo_cargo'] = df.grupo_cargo.str[:3]
df['nivel_cargo'] = df.cargo.str.extract(r'([iv]{1,3}$)')
df['regra_n1'] = df.regra_alcance_parcial.str.extract(r'(\d{2})')
df['regra_n2'] = df.regra_alcance_parcial.str.extract(r'(?:\d{2})(?:.*)(\d{2})')
df['regra_n3'] = df.regra_alcance_parcial.str.extract(r'(?:\d{2})(?:.*)(?:\d{2})(?:.*)(\d{2})')
df['regra_n4'] = df.regra_alcance_parcial.str.extract(r'(?:\d{2})(?:.*)(?:\d{2})(?:.*)(?:\d{2})(?:.*)(\d{2})')
df['regra_real'] = df.regra_alcance_parcial.str.contains('real')
df['regra_lacuna'] = df.regra_alcance_parcial.str.contains('lacuna')
df['regra_pontosl'] = df.regra_alcance_parcial.str.contains('pontos')
return df
######################
# PROCESSED DATA PIPELINE
######################
def make_processed(df):
''' Final cleanup of processed data for model input
'''
logger.info('removing bad data')
df = remove_bad_data(df)
logger.info('making target')
df = make_target(df)
logger.info('removing ids and using first row')
df = remove_ids_months(df)
logger.info('encoding categorical columns')
df = encode_categoricals(df)
return df
def remove_bad_data(df):
''' Remove non usable columns and data
'''
# correlated/leaky columns
col_names = ['ating_acum', 'ating_fim_exer', 'pts_mes', 'pts_acumulado',
'pts_fim_exer', 'acum_mes', 'acum_acumulado', 'acum_fim_exer']
df = df[df.status_meta == 'Monitoramento Aprovado']
df = df.fillna(0)
df = df[df.columns[df.nunique() > 1]]
df = df.drop(col_names, axis=1)
return df
def make_target(df):
''' Make the target variable
'''
# df['ating_mes'] = df.ating_mes.astype(np.number)
df['target'] = df.groupby(
['id_funcionario']).ating_mes.transform('mean') / 100
df = df.drop([
'mes', 'ating_mes', 'nome_kpi', 'regra_alcance_parcial',
'id_kpi', 'categoria_kpi', 'tipo_meta', 'regra_n1', 'meta_projeto',
'regra_n2', 'regra_n3', 'regra_n4', 'regra_real', 'regra_lacuna'],
axis=1)
return df
def remove_ids_months(df):
''' Remove unique identifiers and use only first month data
'''
df = df.groupby(['id_funcionario']).agg('first').reset_index()
df = df.drop(['id_funcionario', 'id_gestor'], axis=1) # remove ids
return df
def encode_categoricals(df):
''' Encode categorical data
'''
le = LabelEncoder()
# columns to envcode (non-numeric)
col_names = df.drop('target', axis=1).columns
for col in col_names:
# logger.info('column {}'.format(col))
df[col] = le.fit_transform(df[col].astype('str'))
return df
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main(force=True)
| 34.561798
| 114
| 0.607174
|
bbc52f49bde2eb0a292aaa6544bf1d50bd38659e
| 2,369
|
py
|
Python
|
logbook/phone/gps/__init__.py
|
PREDICT-DPACC/logbook
|
2eb3ecf4bdac16bcd79fcb25ff40c5bfe580617a
|
[
"BSD-3-Clause"
] | null | null | null |
logbook/phone/gps/__init__.py
|
PREDICT-DPACC/logbook
|
2eb3ecf4bdac16bcd79fcb25ff40c5bfe580617a
|
[
"BSD-3-Clause"
] | 2
|
2021-03-31T13:28:48.000Z
|
2021-03-31T14:43:27.000Z
|
logbook/phone/gps/__init__.py
|
PREDICT-DPACC/logbook
|
2eb3ecf4bdac16bcd79fcb25ff40c5bfe580617a
|
[
"BSD-3-Clause"
] | 1
|
2021-03-29T21:20:40.000Z
|
2021-03-29T21:20:40.000Z
|
import os
import re
import gzip
import pandas as pd
import logging
from datetime import datetime
from dateutil import tz
logger = logging.getLogger(__name__)
FILE_REGEX = re.compile(r'(?P<year>[0-9]{4})-(?P<month>[0-9]{2})-(?P<day>[0-9]{2})\s(?P<hour>[0-9]{2})_(?P<minute>[0-9]{2})_(?P<second>[0-9]{2})(?P<extension>\..*)')
CHUNKSIZE = 10 ** 6
def process(study, subject, read_dir, date_from, output_tz, input_tz):
# Instantiate an empty dataframe
df = pd.DataFrame.from_records([])
for root_dir, dirs, files in os.walk(read_dir):
files[:] = [ f for f in files if not f[0] == '.' ]
dirs[:] = [ d for d in dirs if not d[0] == '.' ]
for file_name in sorted(files):
file_name, extension = verify(file_name)
if file_name is not None:
file_path = os.path.join(root_dir, file_name)
data_list = parse(date_from, output_tz,
input_tz, file_path, file_name)
df = df.append(data_list, ignore_index=True, sort=False)
return df
def process_seconds(df):
df.index.name = None
dfe = df.groupby(['day', 'weekday', 'timeofday', 'UTC_offset'])
df = dfe.size().reset_index(name='hours')
# Format numbers for the visual
df['hours'] = df['hours'].astype(int)
df['weekday'] = df['weekday'].astype(int)
df['day'] = df['day'].astype(int)
df.columns.name = None
return df
# Verify the file based on its filename
def verify(file_name):
match = FILE_REGEX.match(file_name)
if match and match.group('extension') in ['.csv.lock', '.csv']:
return file_name, match.group('extension')
else:
return None, None
# Return timestamp in the timezone
def process_datetime(row_timestamp, input_tz, output_tz):
match = FILE_REGEX.match(row_timestamp).groupdict()
timestamp = pd.Timestamp(year = int(match['year']),
month = int(match['month']),
day = int(match['day']),
hour = int(match['hour']),
minute = int(match['minute']),
second = int(match['second']),
nanosecond = 0,
tz = input_tz)
return timestamp.tz_convert(output_tz)
# Parse and process data
def parse(date_from, output_tz, input_tz, file_path, filename):
df = {}
df['$date_to'] = process_datetime(filename, input_tz, output_tz)
df['counts'] = 1
return df
| 32.013514
| 165
| 0.622203
|
0836990d068baef290bd8c5c12bfee5d862e7d17
| 4,001
|
py
|
Python
|
autoflow/tests/base.py
|
auto-flow/autoflow
|
f5903424ad8694d57741a0bd6dfeaba320ea6517
|
[
"BSD-3-Clause"
] | 49
|
2020-04-16T11:17:28.000Z
|
2020-05-06T01:32:44.000Z
|
autoflow/tests/base.py
|
auto-flow/autoflow
|
f5903424ad8694d57741a0bd6dfeaba320ea6517
|
[
"BSD-3-Clause"
] | null | null | null |
autoflow/tests/base.py
|
auto-flow/autoflow
|
f5903424ad8694d57741a0bd6dfeaba320ea6517
|
[
"BSD-3-Clause"
] | 3
|
2020-04-17T00:53:24.000Z
|
2020-04-23T03:04:26.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : qichun tang
# @Contact : tqichun@gmail.com
import os
import re
import shutil
import unittest
from pathlib import Path
from typing import Iterator, Tuple
import joblib
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OrdinalEncoder, StandardScaler, LabelEncoder
from autoflow.datasets import load_task
from autoflow.tests.mock import get_mock_resource_manager
class LocalResourceTestCase(unittest.TestCase):
def setUp(self) -> None:
super(LocalResourceTestCase, self).setUp()
self.mock_resource_manager = get_mock_resource_manager()
def tearDown(self) -> None:
shutil.rmtree(self.mock_resource_manager.store_path)
class LogTestCase(LocalResourceTestCase):
visible_levels = None
log_name = None
def setUp(self) -> None:
super(LogTestCase, self).setUp()
self.log_file = os.getcwd() + "/" + self.log_name
self.pattern = re.compile("\[(" + "|".join(self.visible_levels) + ")\]\s\[.*:(.*)\](.*)$", re.MULTILINE)
if os.path.exists(self.log_file):
os.remove(self.log_file)
def update_log_path(self, pipe):
pipe.resource_manager.init_experiment_table()
experiment = pipe.resource_manager.ExperimentModel
log_path = experiment.select().where(experiment.experiment_id == pipe.experiment_id)[0].log_path
self.log_file = log_path
def iter_log_items(self) -> Iterator[Tuple[str, str, str]]:
'''
iterate log items
Returns
-------
result:Iterator[Tuple[str,str,str]]
(level, logger, msg)
like: "INFO", "peewee", "SELECT * FROM table;"
'''
log_content = Path(self.log_file).read_text()
for item in self.pattern.finditer(log_content):
level = item.group(1)
logger = item.group(2)
msg = item.group(3)
msg = msg.strip()
yield (level, logger, msg)
class EstimatorTestCase(unittest.TestCase):
current_file = None
def setUp(self) -> None:
cur_dir = Path(self.current_file).parent
if (cur_dir / "126025.bz2").exists():
X_train, y_train, X_test, y_test, cat = joblib.load(cur_dir / "126025.bz2")
else:
X_train, y_train, X_test, y_test, cat = load_task(126025)
joblib.dump(
[X_train, y_train, X_test, y_test, cat],
cur_dir / "126025.bz2"
)
nan_cnt = np.count_nonzero(pd.isna(pd.concat([X_train, X_test])), axis=0)
cat = np.array(cat)
cat_na_mask = (nan_cnt > 0) & cat
num_na_mask = (nan_cnt > 0) & (~cat)
cat_imputer = SimpleImputer(strategy="constant", fill_value="NA").fit(X_train.loc[:, cat_na_mask])
# num_imputer = BaseImputer(strategy="median").fit(X_train.loc[:, num_na_mask])
X_train.loc[:, cat_na_mask] = cat_imputer.transform(X_train.loc[:, cat_na_mask])
X_test.loc[:, cat_na_mask] = cat_imputer.transform(X_test.loc[:, cat_na_mask])
# X_train.loc[:, num_na_mask] = num_imputer.transform(X_train.loc[:, num_na_mask])
# X_test.loc[:, num_na_mask] = num_imputer.transform(X_test.loc[:, num_na_mask])
ordinal_encoder = OrdinalEncoder(dtype="int").fit(X_train.loc[:, cat])
transformer = StandardScaler().fit(X_train.loc[:, ~cat])
X_train.loc[:, cat] = ordinal_encoder.transform(X_train.loc[:, cat])
X_train.loc[:, ~cat] = transformer.transform(X_train.loc[:, ~cat])
X_test.loc[:, cat] = ordinal_encoder.transform(X_test.loc[:, cat])
X_test.loc[:, ~cat] = transformer.transform(X_test.loc[:, ~cat])
self.cat_indexes = np.arange(len(cat))[cat]
label_encoder = LabelEncoder().fit(y_train)
self.y_train = label_encoder.transform(y_train)
self.y_test = label_encoder.transform(y_test)
self.X_train = X_train
self.X_test = X_test
| 39.22549
| 112
| 0.642839
|
284c150909e4f70a1b0ca3601f6fa53614266ea9
| 600
|
py
|
Python
|
docs/#11_numeric/#2_fractions.py
|
gymcoding/learning-python
|
5c211e10a0e97e0fab51ee49b03d74b417ee76ff
|
[
"MIT"
] | 1
|
2020-07-29T23:59:28.000Z
|
2020-07-29T23:59:28.000Z
|
docs/#11_numeric/#2_fractions.py
|
sodragon/learning-python
|
5c211e10a0e97e0fab51ee49b03d74b417ee76ff
|
[
"MIT"
] | null | null | null |
docs/#11_numeric/#2_fractions.py
|
sodragon/learning-python
|
5c211e10a0e97e0fab51ee49b03d74b417ee76ff
|
[
"MIT"
] | null | null | null |
#-- Fraction 클래스
# ● 유리수와 관련된 연산을 효율적으로 처리할 수 있는 분수(fractions)모듈
# ● Fraction 클래스의 생성자
# ○ fraction Fraction(분자=0, 분모=1)
# ○ fraction Fraction(Fraction 객체)
# ○ fraction Fraction(문자열)
import fractions
fraction_obj1 = fractions.Fraction(4, 16)
print(fraction_obj1)
fraction_obj2 = fractions.Fraction(3)
print(fraction_obj2)
fraction_obj3 = fractions.Fraction('3.14')
print(fraction_obj3)
#-- 지원 메서드
# ● 기본적인 연산 및 floor, ceil, round도 사용 가능하며, 최대공약수를 반환하는 클래스 메서드도 존재
f = fractions.Fraction.from_float(3.14)
print(f.__floor__())
import math
print(math.floor(f))
print(math.ceil(f))
print(round(f))
| 27.272727
| 66
| 0.735
|
56ee841ab644aa0a9b89cb5bc3ebaa80e95ffca2
| 82
|
py
|
Python
|
py_tdlib/constructors/update_user.py
|
Mr-TelegramBot/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 24
|
2018-10-05T13:04:30.000Z
|
2020-05-12T08:45:34.000Z
|
py_tdlib/constructors/update_user.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 3
|
2019-06-26T07:20:20.000Z
|
2021-05-24T13:06:56.000Z
|
py_tdlib/constructors/update_user.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 5
|
2018-10-05T14:29:28.000Z
|
2020-08-11T15:04:10.000Z
|
from ..factory import Type
class updateUser(Type):
user = None # type: "user"
| 13.666667
| 28
| 0.682927
|
452885063057c221cf93fda8f88039b4fba914f0
| 1,791
|
py
|
Python
|
resolve/plot_lat.py
|
bojieli/ipc-bench
|
76c03e880477666b1e2275607dd62a74f02b6daf
|
[
"MIT"
] | 56
|
2015-02-24T15:44:58.000Z
|
2022-01-25T04:09:10.000Z
|
resolve/plot_lat.py
|
shekkbuilder/ipc-bench
|
76c03e880477666b1e2275607dd62a74f02b6daf
|
[
"MIT"
] | 1
|
2019-12-29T19:05:37.000Z
|
2020-01-09T12:05:48.000Z
|
resolve/plot_lat.py
|
shekkbuilder/ipc-bench
|
76c03e880477666b1e2275607dd62a74f02b6daf
|
[
"MIT"
] | 24
|
2015-04-02T16:57:58.000Z
|
2020-07-05T07:11:38.000Z
|
import sys, os
import numpy as np
import numpy.random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pylab
def get_data(filename):
data = np.loadtxt(filename)
x_tmp = []
y_tmp = []
v_tmp = []
retdata = {}
for i in range(0, len(data)):
for j in range(0, len(data[i])):
x_tmp.append(i)
y_tmp.append(j)
v_tmp.append(data[i][j])
retdata = [x_tmp, y_tmp, v_tmp]
print len(retdata)
return retdata
# ---------------------------
# Handle command line args
if len(sys.argv) < 2:
print "usage: python plot_lat.py <input file> <title> [fix-scale] [colourmap]"
sys.exit(0)
input_file = sys.argv[1]
fix_scale = 0
if len(sys.argv) > 3:
fix_scale = int(sys.argv[3])
if len(sys.argv) > 4:
colourmap = sys.argv[4]
else:
colourmap = "Greys"
raw_data = np.loadtxt(input_file)
data = get_data(input_file)
fig = plt.figure(figsize=(4,3))
#f = pylab.Figure(figsize=(2,1.5))
# print ds
#heatmap, xedges, yedges = np.histogram2d(data[0], data[1], bins=48, weights=data[2])
#extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
#plt.clf()
#plt.hexbin(data[0], data[1], C=data[2], gridsize=40, linewidths=1, cmap=cm.jet, bins=None)
#plt.clf()
#plt.imshow(heatmap, extent=extent)
if fix_scale != 0:
plt.matshow(raw_data, vmax=0.000006, vmin=0.000001, fignum=0, cmap=colourmap)
else:
plt.matshow(raw_data, fignum=0, cmap=colourmap)
# add some
plt.ylabel('Core ID')
plt.ylim(0, 48)
plt.xlabel('Core ID')
plt.xlim(0, 48)
plt.title(sys.argv[2])
cb = plt.colorbar(shrink=1.0, format='%.3e')
cb.set_label('Latency in microseconds')
plt.savefig("lat_" + sys.argv[1] + ".pdf", format="pdf", bbox_inches='tight')
plt.savefig("lat_" + sys.argv[1] + ".png", format="png", bbox_inches='tight')
| 22.3875
| 91
| 0.658291
|
12a74aec086d763ab97c3f33c3d50453bee16c71
| 11,101
|
py
|
Python
|
test.py
|
columbia-robovision/dsr
|
f34d60f885cd01e6b562e799d7c81eafda3ae765
|
[
"MIT"
] | 20
|
2020-11-11T21:18:17.000Z
|
2021-03-28T09:09:52.000Z
|
test.py
|
columbia-robovision/dsr
|
f34d60f885cd01e6b562e799d7c81eafda3ae765
|
[
"MIT"
] | null | null | null |
test.py
|
columbia-robovision/dsr
|
f34d60f885cd01e6b562e799d7c81eafda3ae765
|
[
"MIT"
] | 5
|
2020-11-24T08:49:52.000Z
|
2021-03-29T14:20:41.000Z
|
import numpy as np
import torch
import argparse
from tqdm import tqdm
import os.path as osp
from data import Data
from torch.utils.data import DataLoader
from model import ModelDSR
import itertools
parser = argparse.ArgumentParser()
parser.add_argument('--resume', type=str, help='path to model')
parser.add_argument('--data_path', type=str, help='path to data')
parser.add_argument('--test_type', type=str, choices=['motion_visible', 'motion_full', 'mask_ordered', 'mask_unordered'])
parser.add_argument('--gpu', type=int, default=0, help='gpu id (single gpu)')
parser.add_argument('--object_num', type=int, default=5, help='number of objects')
parser.add_argument('--seq_len', type=int, default=10, help='sequence length')
parser.add_argument('--batch', type=int, default=12, help='batch size')
parser.add_argument('--workers', type=int, default=2, help='number of workers in data loader')
parser.add_argument('--model_type', type=str, default='dsr', choices=['dsr', 'single', 'nowarp', 'gtwarp', '3dflow'])
parser.add_argument('--transform_type', type=str, default='se3euler', choices=['affine', 'se3euler', 'se3aa', 'se3spquat', 'se3quat'])
def main():
args = parser.parse_args()
torch.cuda.set_device(args.gpu)
data, loaders = {}, {}
for split in ['test']:
data[split] = Data(data_path=args.data_path, split=split, seq_len=args.seq_len)
loaders[split] = DataLoader(dataset=data[split], batch_size=args.batch, num_workers=args.workers)
print('==> dataset loaded: [size] = {0}'.format(len(data['test'])))
model = ModelDSR(
object_num=args.object_num,
transform_type=args.transform_type,
motion_type='se3' if args.model_type != '3dflow' else 'conv',
)
model.cuda()
checkpoint = torch.load(args.resume, map_location=torch.device(f'cuda:{args.gpu}'))
model.load_state_dict(checkpoint['state_dict'])
print('==> resume: ' + args.resume)
with torch.no_grad():
if args.test_type == 'motion_visible':
evaluation_motion_visible(args, model, loaders['test'])
if args.test_type == 'motion_full':
evaluation_motion_full(args, model, loaders['test'])
if args.test_type == 'mask_ordered':
evaluation_mask_ordered(args, model, loaders['test'])
if args.test_type == 'mask_unordered':
evaluation_mask_unordered(args, model, loaders['test'])
def evaluation_mask_unordered(args, model, loader):
print(f'==> evaluation_mask (unordered)')
iou_dict = [[] for _ in range(args.seq_len)]
for batch in tqdm(loader):
batch_size = batch['0-action'].size(0)
last_s = model.get_init_repr(batch_size).cuda()
logit_pred_list, mask_gt_list = [], []
for step_id in range(args.seq_len):
output = model(
input_volume=batch['%d-tsdf' % step_id].cuda().unsqueeze(1),
last_s=last_s,
input_action=batch['%d-action' % step_id].cuda(),
input_motion=batch['%d-scene_flow_3d' % step_id].cuda() if args.model_type=='gtwarp' else None,
no_warp=args.model_type=='nowarp'
)
if not args.model_type == 'single':
last_s = output['s'].data
logit_pred = output['init_logit']
mask_gt = batch['%d-mask_3d' % step_id].cuda()
iou_unordered = calc_iou_unordered(logit_pred, mask_gt)
iou_dict[step_id].append(iou_unordered)
print('mask_unordered (IoU) = ', np.mean([np.mean(np.concatenate(iou_dict[i])) for i in range(args.seq_len)]))
def calc_iou_unordered(logit_pred, mask_gt_argmax):
# logit_pred: [B, K, S1, S2, S3], softmax, the last channel is empty
# mask_gt_argmax: [B, S1, S2, S3], 0 represents empty
B, K, S1, S2, S3 = logit_pred.size()
logit_pred_argmax = torch.argmax(logit_pred, dim=1, keepdim=True)
mask_gt_argmax = torch.unsqueeze(mask_gt_argmax, 1)
mask_pred_onehot = torch.zeros_like(logit_pred).scatter(1, logit_pred_argmax, 1)[:, :-1]
mask_gt_onehot = torch.zeros_like(logit_pred).scatter(1, mask_gt_argmax, 1)[:, 1:]
K -= 1
info_dict = {'I': np.zeros([B, K, K]), 'U': np.zeros([B, K, K])}
for b in range(B):
for i in range(K):
for j in range(K):
mask_gt = mask_gt_onehot[b, i]
mask_pred = mask_pred_onehot[b, j]
I = torch.sum(mask_gt * mask_pred).item()
U = torch.sum(mask_gt + mask_pred).item() - I
info_dict['I'][b, i, j] = I
info_dict['U'][b, i, j] = U
batch_ious = []
for b in range(B):
best_iou, best_p = 0, None
for p in list(itertools.permutations(range(K))):
cur_I = [info_dict['I'][b, i, p[i]] for i in range(K)]
cur_U = [info_dict['U'][b, i, p[i]] for i in range(K)]
cur_iou = np.mean(np.array(cur_I) / np.maximum(np.array(cur_U), 1))
if cur_iou > best_iou:
best_iou = cur_iou
batch_ious.append(best_iou)
return np.array(batch_ious)
def evaluation_mask_ordered(args, model, loader):
print(f'==> evaluation_mask (ordered)')
iou_dict = []
for batch in tqdm(loader):
batch_size = batch['0-action'].size(0)
last_s = model.get_init_repr(batch_size).cuda()
logit_pred_list, mask_gt_list = [], []
for step_id in range(args.seq_len):
output = model(
input_volume=batch['%d-tsdf' % step_id].cuda().unsqueeze(1),
last_s=last_s,
input_action=batch['%d-action' % step_id].cuda(),
input_motion=batch['%d-scene_flow_3d' % step_id].cuda() if args.model_type=='gtwarp' else None,
no_warp=args.model_type=='nowarp'
)
if not args.model_type == 'single':
last_s = output['s'].data
logit_pred = output['init_logit']
mask_gt = batch['%d-mask_3d' % step_id].cuda()
logit_pred_list.append(logit_pred)
mask_gt_list.append(mask_gt)
iou_ordered = calc_iou_ordered(logit_pred_list, mask_gt_list)
iou_dict.append(iou_ordered)
print('mask_ordered (IoU) = ', np.mean(np.concatenate(iou_dict)))
def calc_iou_ordered(logit_pred_list, mask_gt_argmax_list):
# logit_pred_list: [L, B, K, S1, S2, S3], softmax, the last channel is empty
# mask_gt_argmax_list: [L, B, S1, S2, S3], 0 represents empty
L = len(logit_pred_list)
B, K, S1, S2, S3 = logit_pred_list[0].size()
K -= 1
info_dict = {'I': np.zeros([L, B, K, K]), 'U': np.zeros([L, B, K, K])}
for l in range(L):
logit_pred = logit_pred_list[l]
mask_gt_argmax = mask_gt_argmax_list[l]
logit_pred_argmax = torch.argmax(logit_pred, dim=1, keepdim=True)
mask_gt_argmax = torch.unsqueeze(mask_gt_argmax, 1)
mask_pred_onehot = torch.zeros_like(logit_pred).scatter(1, logit_pred_argmax, 1)[:, :-1]
mask_gt_onehot = torch.zeros_like(logit_pred).scatter(1, mask_gt_argmax, 1)[:, 1:]
for b in range(B):
for i in range(K):
for j in range(K):
mask_gt = mask_gt_onehot[b, i]
mask_pred = mask_pred_onehot[b, j]
I = torch.sum(mask_gt * mask_pred).item()
U = torch.sum(mask_gt + mask_pred).item() - I
info_dict['I'][l, b, i, j] = I
info_dict['U'][l, b, i, j] = U
batch_ious = []
for b in range(B):
best_iou, best_p = 0, None
for p in list(itertools.permutations(range(K))):
cur_I = [info_dict['I'][l, b, i, p[i]] for l in range(L) for i in range(K)]
cur_U = [info_dict['U'][l, b, i, p[i]] for l in range(L) for i in range(K)]
cur_iou = np.mean(np.array(cur_I) / np.maximum(np.array(cur_U), 1))
if cur_iou > best_iou:
best_iou = cur_iou
batch_ious.append(best_iou)
return np.array(batch_ious)
def evaluation_motion_visible(args, model, loader):
print('==> evaluation_motion (visible surface)')
mse_dict = [0 for _ in range(args.seq_len)]
data_num = 0
for batch in tqdm(loader):
batch_size = batch['0-action'].size(0)
data_num += batch_size
last_s = model.get_init_repr(batch_size).cuda()
for step_id in range(args.seq_len):
output = model(
input_volume=batch['%d-tsdf' % step_id].cuda().unsqueeze(1),
last_s=last_s,
input_action=batch['%d-action' % step_id].cuda(),
input_motion=batch['%d-scene_flow_3d' % step_id].cuda() if args.model_type=='gtwarp' else None,
no_warp=args.model_type=='nowarp'
)
if not args.model_type in ['single', '3dflow'] :
last_s = output['s'].data
tsdf = batch['%d-tsdf' % step_id].cuda().unsqueeze(1)
mask = batch['%d-mask_3d' % step_id].cuda().unsqueeze(1)
surface_mask = ((tsdf > -0.99).float()) * ((tsdf < 0).float()) * ((mask > 0).float())
surface_mask[..., 0] = 0
target = batch['%d-scene_flow_3d' % step_id].cuda()
pred = output['motion']
mse = torch.sum((target - pred) ** 2 * surface_mask, dim=[1, 2, 3, 4]) / torch.sum(surface_mask, dim=[1, 2, 3, 4])
mse_dict[step_id] += torch.sum(mse).item() * 0.16
# 0.16(0.4^2) is the scale to convert the unit from "voxel" to "cm".
# The voxel size is 0.4cm. Here we use seuqre error.
print('motion_visible (MSE in cm) = ', np.mean([np.mean(mse_dict[i]) / data_num for i in range(args.seq_len)]))
def evaluation_motion_full(args, model, loader):
print('==> evaluation_motion (full volume)')
mse_dict = [0 for _ in range(args.seq_len)]
data_num = 0
for batch in tqdm(loader):
batch_size = batch['0-action'].size(0)
data_num += batch_size
last_s = model.get_init_repr(batch_size).cuda()
for step_id in range(args.seq_len):
output = model(
input_volume=batch['%d-tsdf' % step_id].cuda().unsqueeze(1),
last_s=last_s,
input_action=batch['%d-action' % step_id].cuda(),
input_motion=batch['%d-scene_flow_3d' % step_id].cuda() if args.model_type=='gtwarp' else None,
no_warp=args.model_type=='nowarp'
)
if not args.model_type in ['single', '3dflow'] :
last_s = output['s'].data
target = batch['%d-scene_flow_3d' % step_id].cuda()
pred = output['motion']
mse = torch.mean((target - pred) ** 2, dim=[1, 2, 3, 4])
mse_dict[step_id] += torch.sum(mse).item() * 0.16
# 0.16(0.4^2) is the scale to convert the unit from "voxel" to "cm".
# The voxel size is 0.4cm. Here we use seuqre error.
print('motion_full (MSE in cm) = ', np.mean([np.mean(mse_dict[i]) / data_num for i in range(args.seq_len)]))
if __name__ == '__main__':
main()
| 44.762097
| 134
| 0.597874
|
620cd740fd2575d89776a769e4b92024ea1d4a38
| 1,104
|
py
|
Python
|
PacoteDownload/ex084.py
|
NataliaNasu/cursoemvideo-python3
|
7c206a5ff347416cd83c7a665a7acaffe088f7e7
|
[
"MIT"
] | null | null | null |
PacoteDownload/ex084.py
|
NataliaNasu/cursoemvideo-python3
|
7c206a5ff347416cd83c7a665a7acaffe088f7e7
|
[
"MIT"
] | null | null | null |
PacoteDownload/ex084.py
|
NataliaNasu/cursoemvideo-python3
|
7c206a5ff347416cd83c7a665a7acaffe088f7e7
|
[
"MIT"
] | null | null | null |
#leia nome e peso de varias pessoas. mostre qntas foram cadastradas.
# crie uma listagem das mais pesadas e, das mais leves.
pessoas = list()
temporario = list()
maior = menor = 0
print(' \033[31mCADASTRO:\033[m ')
while True:
temporario.append(str(input('Nome: ')))
temporario.append(float(input('Peso: ')))
if len(pessoas) == 0:
maior = menor = temporario[1]
else:
if temporario[1] > maior:
maior = temporario[1]
if temporario[1] < menor:
menor = temporario[1]
pessoas.append(temporario[:])
temporario.clear()
opcao = str(input('Deseja continuar? [S/N] ')).upper()
while opcao not in 'SN':
opcao = str(input('Deseja continuar? [S/N] ')).upper()
if opcao == 'N':
break
print(f'Ao todo, foram cadastradas {len(pessoas)}.')
print(f'O maior peso foi {maior:.1f}kg. Peso de: ', end='')
for p in pessoas:
if p[1] == maior:
print(f'"{p[0]}', end='" ')
print()
print(f'O menor peso foi {menor:.1f}kg. Peso de: ', end='')
for p in pessoas:
if p[1] == menor:
print(f'"{p[0]}', end='" ')
| 31.542857
| 68
| 0.588768
|
30c4e9d091ea95a64e1ffed5eb661c60003a021b
| 56,478
|
py
|
Python
|
trac/versioncontrol/web_ui/changeset.py
|
trac-ja/trac-ja
|
8defc74c222e3dbe154dfb5eb34e8c1a1f663558
|
[
"BSD-3-Clause"
] | 1
|
2017-08-03T07:04:40.000Z
|
2017-08-03T07:04:40.000Z
|
trac/versioncontrol/web_ui/changeset.py
|
trac-ja/trac-ja
|
8defc74c222e3dbe154dfb5eb34e8c1a1f663558
|
[
"BSD-3-Clause"
] | null | null | null |
trac/versioncontrol/web_ui/changeset.py
|
trac-ja/trac-ja
|
8defc74c222e3dbe154dfb5eb34e8c1a1f663558
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2004-2006 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2005-2006 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
# Christian Boos <cboos@edgewall.org>
from __future__ import with_statement
from itertools import groupby
import os
import posixpath
import re
from StringIO import StringIO
from genshi.builder import tag
from trac.config import Option, BoolOption, IntOption
from trac.core import *
from trac.mimeview.api import Mimeview
from trac.perm import IPermissionRequestor
from trac.resource import Resource, ResourceNotFound
from trac.search import ISearchSource, search_to_sql, shorten_result
from trac.timeline.api import ITimelineEventProvider
from trac.util import as_bool, content_disposition, embedded_numbers, pathjoin
from trac.util.datefmt import from_utimestamp, pretty_timedelta
from trac.util.text import exception_to_unicode, to_unicode, \
unicode_urlencode, shorten_line, CRLF
from trac.util.translation import _, ngettext
from trac.versioncontrol.api import RepositoryManager, Changeset, Node, \
NoSuchChangeset
from trac.versioncontrol.diff import get_diff_options, diff_blocks, \
unified_diff
from trac.versioncontrol.web_ui.browser import BrowserModule
from trac.web import IRequestHandler, RequestDone
from trac.web.chrome import (Chrome, INavigationContributor, add_ctxtnav,
add_link, add_script, add_stylesheet,
prevnext_nav, web_context)
from trac.wiki import IWikiSyntaxProvider, WikiParser
from trac.wiki.formatter import format_to
class IPropertyDiffRenderer(Interface):
"""Render node properties in TracBrowser and TracChangeset views."""
def match_property_diff(name):
"""Indicate whether this renderer can treat the given property diffs
Returns a quality number, ranging from 0 (unsupported) to 9
(''perfect'' match).
"""
def render_property_diff(name, old_context, old_props,
new_context, new_props, options):
"""Render the given diff of property to HTML.
`name` is the property name as given to `match_property_diff()`,
`old_context` corresponds to the old node being render
(useful when the rendering depends on the node kind)
and `old_props` is the corresponding collection of all properties.
Same for `new_node` and `new_props`.
`options` are the current diffs options.
The rendered result can be one of the following:
- `None`: the property change will be shown the normal way
(''changed from `old` to `new`'')
- an `unicode` value: the change will be shown as textual content
- `Markup` or other Genshi content: the change will shown as block
markup
"""
class DefaultPropertyDiffRenderer(Component):
"""Default version control property difference renderer."""
implements(IPropertyDiffRenderer)
def match_property_diff(self, name):
return 1
def render_property_diff(self, name, old_context, old_props,
new_context, new_props, options):
old, new = old_props[name], new_props[name]
# Render as diff only if multiline (see #3002)
if '\n' not in old and '\n' not in new:
return None
unidiff = '--- \n+++ \n' + \
'\n'.join(unified_diff(old.splitlines(), new.splitlines(),
options.get('contextlines', 3)))
return tag.li('Property ', tag.strong(name),
Mimeview(self.env).render(old_context, 'text/x-diff',
unidiff))
class ChangesetModule(Component):
"""Renderer providing flexible functionality for showing sets of
differences.
If the differences shown are coming from a specific changeset,
then that changeset information can be shown too.
In addition, it is possible to show only a subset of the changeset:
Only the changes affecting a given path will be shown. This is called
the ''restricted'' changeset.
But the differences can also be computed in a more general way,
between two arbitrary paths and/or between two arbitrary revisions.
In that case, there's no changeset information displayed.
"""
implements(INavigationContributor, IPermissionRequestor, IRequestHandler,
ITimelineEventProvider, IWikiSyntaxProvider, ISearchSource)
property_diff_renderers = ExtensionPoint(IPropertyDiffRenderer)
timeline_show_files = Option('timeline', 'changeset_show_files', '0',
"""Number of files to show (`-1` for unlimited, `0` to disable).
This can also be `location`, for showing the common prefix for the
changed files. (since 0.11).
""")
timeline_long_messages = BoolOption('timeline', 'changeset_long_messages',
'false',
"""Whether wiki-formatted changeset messages should be multiline or
not.
If this option is not specified or is false and `wiki_format_messages`
is set to true, changeset messages will be single line only, losing
some formatting (bullet points, etc).""")
timeline_collapse = BoolOption('timeline', 'changeset_collapse_events',
'false',
"""Whether consecutive changesets from the same author having
exactly the same message should be presented as one event.
That event will link to the range of changesets in the log view.
(''since 0.11'')""")
max_diff_files = IntOption('changeset', 'max_diff_files', 0,
"""Maximum number of modified files for which the changeset view will
attempt to show the diffs inlined (''since 0.10'').""")
max_diff_bytes = IntOption('changeset', 'max_diff_bytes', 10000000,
"""Maximum total size in bytes of the modified files (their old size
plus their new size) for which the changeset view will attempt to show
the diffs inlined (''since 0.10'').""")
wiki_format_messages = BoolOption('changeset', 'wiki_format_messages',
'true',
"""Whether wiki formatting should be applied to changeset messages.
If this option is disabled, changeset messages will be rendered as
pre-formatted text.""")
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'browser'
def get_navigation_items(self, req):
return []
# IPermissionRequestor methods
def get_permission_actions(self):
return ['CHANGESET_VIEW']
# IRequestHandler methods
_request_re = re.compile(r"/changeset(?:/([^/]+)(/.*)?)?$")
def match_request(self, req):
match = re.match(self._request_re, req.path_info)
if match:
new, new_path = match.groups()
if new:
req.args['new'] = new
if new_path:
req.args['new_path'] = new_path
return True
def process_request(self, req):
"""The appropriate mode of operation is inferred from the request
parameters:
* If `new_path` and `old_path` are equal (or `old_path` is omitted)
and `new` and `old` are equal (or `old` is omitted),
then we're about to view a revision Changeset: `chgset` is True.
Furthermore, if the path is not the root, the changeset is
''restricted'' to that path (only the changes affecting that path,
its children or its ancestor directories will be shown).
* In any other case, the set of changes corresponds to arbitrary
differences between path@rev pairs. If `new_path` and `old_path`
are equal, the ''restricted'' flag will also be set, meaning in this
case that the differences between two revisions are restricted to
those occurring on that path.
In any case, either path@rev pairs must exist.
"""
req.perm.require('CHANGESET_VIEW')
# -- retrieve arguments
full_new_path = new_path = req.args.get('new_path')
new = req.args.get('new')
full_old_path = old_path = req.args.get('old_path')
old = req.args.get('old')
reponame = req.args.get('reponame')
xhr = req.get_header('X-Requested-With') == 'XMLHttpRequest'
# -- support for the revision log ''View changes'' form,
# where we need to give the path and revision at the same time
if old and '@' in old:
old, old_path = old.split('@', 1)
if new and '@' in new:
new, new_path = new.split('@', 1)
rm = RepositoryManager(self.env)
if reponame:
repos = rm.get_repository(reponame)
else:
reponame, repos, new_path = rm.get_repository_by_path(new_path)
if old_path:
old_reponame, old_repos, old_path = \
rm.get_repository_by_path(old_path)
if old_repos != repos:
raise TracError(_("Can't compare across different "
"repositories: %(old)s vs. %(new)s",
old=old_reponame, new=reponame))
if not repos:
if reponame or (new_path and new_path != '/'):
raise TracError(_("Repository '%(repo)s' not found",
repo=reponame or new_path.strip('/')))
else:
raise TracError(_("No repository specified and no default "
"repository configured."))
# -- normalize and check for special case
try:
new_path = repos.normalize_path(new_path)
new = repos.normalize_rev(new)
full_new_path = '/' + pathjoin(repos.reponame, new_path)
old_path = repos.normalize_path(old_path or new_path)
old = repos.normalize_rev(old or new)
full_old_path = '/' + pathjoin(repos.reponame, old_path)
except NoSuchChangeset, e:
raise ResourceNotFound(e.message, _('Invalid Changeset Number'))
if old_path == new_path and old == new: # revert to Changeset
old_path = old = None
style, options, diff_data = get_diff_options(req)
diff_opts = diff_data['options']
# -- setup the `chgset` and `restricted` flags, see docstring above.
chgset = not old and not old_path
if chgset:
restricted = new_path not in ('', '/') # (subset or not)
else:
restricted = old_path == new_path # (same path or not)
# -- redirect if changing the diff options or alias requested
if 'update' in req.args or reponame != repos.reponame:
contextall = diff_opts['contextall'] or None
reponame = repos.reponame or None
if chgset:
if restricted:
req.redirect(req.href.changeset(new, reponame, new_path,
contextall=contextall))
else:
req.redirect(req.href.changeset(new, reponame,
contextall=contextall))
else:
req.redirect(req.href.changeset(new, reponame,
new_path, old=old,
old_path=full_old_path,
contextall=contextall))
# -- preparing the data
if chgset:
prev = repos.get_node(new_path, new).get_previous()
if prev:
prev_path, prev_rev = prev[:2]
else:
prev_path, prev_rev = new_path, repos.previous_rev(new)
data = {'old_path': prev_path, 'old_rev': prev_rev,
'new_path': new_path, 'new_rev': new}
else:
if not new:
new = repos.youngest_rev
elif not old:
old = repos.youngest_rev
if not old_path:
old_path = new_path
data = {'old_path': old_path, 'old_rev': old,
'new_path': new_path, 'new_rev': new}
data.update({'repos': repos, 'reponame': repos.reponame or None,
'diff': diff_data,
'wiki_format_messages': self.wiki_format_messages})
if chgset:
chgset = repos.get_changeset(new)
req.perm(chgset.resource).require('CHANGESET_VIEW')
# TODO: find a cheaper way to reimplement r2636
req.check_modified(chgset.date, [
style, ''.join(options), repos.name,
diff_opts['contextlines'], diff_opts['contextall'],
repos.rev_older_than(new, repos.youngest_rev),
chgset.message, xhr,
pretty_timedelta(chgset.date, None, 3600)])
format = req.args.get('format')
if format in ['diff', 'zip']:
# choosing an appropriate filename
rpath = new_path.replace('/','_')
if chgset:
if restricted:
filename = 'changeset_%s_%s' % (rpath, new)
else:
filename = 'changeset_%s' % new
else:
if restricted:
filename = 'diff-%s-from-%s-to-%s' \
% (rpath, old, new)
elif old_path == '/': # special case for download (#238)
filename = '%s-%s' % (rpath, old)
else:
filename = 'diff-from-%s-%s-to-%s-%s' \
% (old_path.replace('/','_'), old, rpath, new)
if format == 'diff':
self._render_diff(req, filename, repos, data)
elif format == 'zip':
self._render_zip(req, filename, repos, data)
# -- HTML format
self._render_html(req, repos, chgset, restricted, xhr, data)
if chgset:
diff_params = 'new=%s' % new
else:
diff_params = unicode_urlencode({
'new_path': full_new_path, 'new': new,
'old_path': full_old_path, 'old': old})
add_link(req, 'alternate', '?format=diff&' + diff_params,
_('Unified Diff'), 'text/plain', 'diff')
add_link(req, 'alternate', '?format=zip&' + diff_params,
_('Zip Archive'), 'application/zip', 'zip')
add_script(req, 'common/js/diff.js')
add_stylesheet(req, 'common/css/changeset.css')
add_stylesheet(req, 'common/css/diff.css')
add_stylesheet(req, 'common/css/code.css')
if chgset:
if restricted:
prevnext_nav(req, _('Previous Change'), _('Next Change'))
else:
prevnext_nav(req, _('Previous Changeset'), _('Next Changeset'))
else:
rev_href = req.href.changeset(old, full_old_path,
old=new, old_path=full_new_path)
add_ctxtnav(req, _('Reverse Diff'), href=rev_href)
return 'changeset.html', data, None
# Internal methods
def _render_html(self, req, repos, chgset, restricted, xhr, data):
"""HTML version"""
data['restricted'] = restricted
display_rev = repos.display_rev
data['display_rev'] = display_rev
browser = BrowserModule(self.env)
reponame = repos.reponame or None
if chgset: # Changeset Mode (possibly restricted on a path)
path, rev = data['new_path'], data['new_rev']
# -- getting the change summary from the Changeset.get_changes
def get_changes():
for npath, kind, change, opath, orev in chgset.get_changes():
old_node = new_node = None
if (restricted and
not (npath == path or # same path
npath.startswith(path + '/') or # npath is below
path.startswith(npath + '/'))): # npath is above
continue
if change != Changeset.ADD:
old_node = repos.get_node(opath, orev)
if change != Changeset.DELETE:
new_node = repos.get_node(npath, rev)
else:
# support showing paths deleted below a copy target
old_node.path = npath
yield old_node, new_node, kind, change
def _changeset_title(rev):
rev = display_rev(rev)
if restricted:
return _('Changeset %(id)s for %(path)s', id=rev,
path=path)
else:
return _('Changeset %(id)s', id=rev)
data['changeset'] = chgset
title = _changeset_title(rev)
# Support for revision properties (#2545)
context = web_context(req, 'changeset', chgset.rev,
parent=repos.resource)
data['context'] = context
revprops = chgset.get_properties()
data['properties'] = browser.render_properties('revprop', context,
revprops)
oldest_rev = repos.oldest_rev
if chgset.rev != oldest_rev:
if restricted:
prev = repos.get_node(path, rev).get_previous()
if prev:
prev_path, prev_rev = prev[:2]
if prev_rev:
prev_href = req.href.changeset(prev_rev, reponame,
prev_path)
else:
prev_path = prev_rev = None
else:
add_link(req, 'first',
req.href.changeset(oldest_rev, reponame),
_('Changeset %(id)s', id=display_rev(oldest_rev)))
prev_path = data['old_path']
prev_rev = repos.previous_rev(chgset.rev)
if prev_rev:
prev_href = req.href.changeset(prev_rev, reponame)
if prev_rev:
add_link(req, 'prev', prev_href,
_changeset_title(prev_rev))
youngest_rev = repos.youngest_rev
if str(chgset.rev) != str(youngest_rev):
if restricted:
next_rev = repos.next_rev(chgset.rev, path)
if next_rev:
if repos.has_node(path, next_rev):
next_href = req.href.changeset(next_rev, reponame,
path)
else: # must be a 'D'elete or 'R'ename, show full cset
next_href = req.href.changeset(next_rev, reponame)
else:
add_link(req, 'last',
req.href.changeset(youngest_rev, reponame),
_('Changeset %(id)s',
id=display_rev(youngest_rev)))
next_rev = repos.next_rev(chgset.rev)
if next_rev:
next_href = req.href.changeset(next_rev, reponame)
if next_rev:
add_link(req, 'next', next_href,
_changeset_title(next_rev))
else: # Diff Mode
# -- getting the change summary from the Repository.get_changes
def get_changes():
for d in repos.get_changes(
new_path=data['new_path'], new_rev=data['new_rev'],
old_path=data['old_path'], old_rev=data['old_rev']):
yield d
title = self.title_for_diff(data)
data['changeset'] = False
data['title'] = title
if 'BROWSER_VIEW' not in req.perm:
return
def node_info(node, annotated):
href = req.href.browser(
reponame, node.created_path, rev=node.created_rev,
annotate='blame' if annotated else None)
title = _('Show revision %(rev)s of this file in browser',
rev=display_rev(node.rev))
return {'path': node.path, 'rev': node.rev,
'shortrev': repos.short_rev(node.rev),
'href': href, 'title': title}
# Reminder: node.path may not exist at node.rev
# as long as node.rev==node.created_rev
# ... and data['old_rev'] may have nothing to do
# with _that_ node specific history...
options = data['diff']['options']
def _prop_changes(old_node, new_node):
old_props = old_node.get_properties()
new_props = new_node.get_properties()
old_ctx = web_context(req, old_node.resource)
new_ctx = web_context(req, new_node.resource)
changed_properties = []
if old_props != new_props:
for k, v in sorted(old_props.items()):
new = old = diff = None
if not k in new_props:
old = v # won't be displayed, no need to render it
elif v != new_props[k]:
diff = self.render_property_diff(
k, old_ctx, old_props, new_ctx, new_props, options)
if not diff:
old = browser.render_property(k, 'changeset',
old_ctx, old_props)
new = browser.render_property(k, 'changeset',
new_ctx, new_props)
if new or old or diff:
changed_properties.append({'name': k, 'old': old,
'new': new, 'diff': diff})
for k, v in sorted(new_props.items()):
if not k in old_props:
new = browser.render_property(k, 'changeset',
new_ctx, new_props)
if new is not None:
changed_properties.append({'name': k, 'new': new,
'old': None})
return changed_properties
def _estimate_changes(old_node, new_node):
old_size = old_node.get_content_length()
new_size = new_node.get_content_length()
return old_size + new_size
def _content_changes(old_node, new_node):
"""Returns the list of differences.
The list is empty when no differences between comparable files
are detected, but the return value is None for non-comparable
files.
"""
mview = Mimeview(self.env)
if mview.is_binary(old_node.content_type, old_node.path):
return None
if mview.is_binary(new_node.content_type, new_node.path):
return None
old_content = old_node.get_content().read()
if mview.is_binary(content=old_content):
return None
new_content = new_node.get_content().read()
if mview.is_binary(content=new_content):
return None
old_content = mview.to_unicode(old_content, old_node.content_type)
new_content = mview.to_unicode(new_content, new_node.content_type)
if old_content != new_content:
context = options.get('contextlines', 3)
if context < 0 or options.get('contextall'):
context = None
tabwidth = self.config['diff'].getint('tab_width') or \
self.config['mimeviewer'].getint('tab_width', 8)
ignore_blank_lines = options.get('ignoreblanklines')
ignore_case = options.get('ignorecase')
ignore_space = options.get('ignorewhitespace')
return diff_blocks(old_content.splitlines(),
new_content.splitlines(),
context, tabwidth,
ignore_blank_lines=ignore_blank_lines,
ignore_case=ignore_case,
ignore_space_changes=ignore_space)
else:
return []
diff_bytes = diff_files = 0
if self.max_diff_bytes or self.max_diff_files:
for old_node, new_node, kind, change in get_changes():
if change in Changeset.DIFF_CHANGES and kind == Node.FILE \
and old_node.is_viewable(req.perm) \
and new_node.is_viewable(req.perm):
diff_files += 1
diff_bytes += _estimate_changes(old_node, new_node)
show_diffs = (not self.max_diff_files or \
0 < diff_files <= self.max_diff_files) and \
(not self.max_diff_bytes or \
diff_bytes <= self.max_diff_bytes or \
diff_files == 1)
# XHR is used for blame support: display the changeset view without
# the navigation and with the changes concerning the annotated file
annotated = False
if xhr:
show_diffs = False
annotated = repos.normalize_path(req.args.get('annotate'))
has_diffs = False
filestats = self._prepare_filestats()
changes = []
files = []
for old_node, new_node, kind, change in get_changes():
props = []
diffs = []
show_old = old_node and old_node.is_viewable(req.perm)
show_new = new_node and new_node.is_viewable(req.perm)
show_entry = change != Changeset.EDIT
show_diff = show_diffs or (new_node and new_node.path == annotated)
if change in Changeset.DIFF_CHANGES and show_old and show_new:
assert old_node and new_node
props = _prop_changes(old_node, new_node)
if props:
show_entry = True
if kind == Node.FILE and show_diff:
diffs = _content_changes(old_node, new_node)
if diffs != []:
if diffs:
has_diffs = True
# elif None (means: manually compare to (previous))
show_entry = True
if (show_old or show_new) and (show_entry or not show_diff):
info = {'change': change,
'old': old_node and node_info(old_node, annotated),
'new': new_node and node_info(new_node, annotated),
'props': props,
'diffs': diffs}
files.append(new_node.path if new_node else \
old_node.path if old_node else '')
filestats[change] += 1
if change in Changeset.DIFF_CHANGES:
if chgset:
href = req.href.changeset(new_node.rev, reponame,
new_node.path)
title = _('Show the changeset %(id)s restricted to '
'%(path)s', id=display_rev(new_node.rev),
path=new_node.path)
else:
href = req.href.changeset(
new_node.created_rev, reponame,
new_node.created_path,
old=old_node.created_rev,
old_path=pathjoin(repos.reponame,
old_node.created_path))
title = _('Show the %(range)s differences restricted '
'to %(path)s', range='[%s:%s]' % (
display_rev(old_node.rev),
display_rev(new_node.rev)),
path=new_node.path)
info['href'] = href
info['title'] = old_node and title
if change in Changeset.DIFF_CHANGES and not show_diff:
info['hide_diff'] = True
else:
info = None
changes.append(info) # the sequence should be immutable
data.update({'has_diffs': has_diffs, 'changes': changes, 'xhr': xhr,
'filestats': filestats, 'annotated': annotated,
'files': files,
'location': self._get_parent_location(files),
'longcol': 'Revision', 'shortcol': 'r'})
if xhr: # render and return the content only
stream = Chrome(self.env).render_template(req, 'changeset.html',
data, fragment=True)
content = stream.select('//div[@id="content"]')
str_content = content.render('xhtml', encoding='utf-8')
req.send_header('Content-Length', len(str_content))
req.end_headers()
req.write(str_content)
raise RequestDone
return data
def _render_diff(self, req, filename, repos, data):
"""Raw Unified Diff version"""
req.send_response(200)
req.send_header('Content-Type', 'text/x-patch;charset=utf-8')
req.send_header('Content-Disposition',
content_disposition('attachment', filename + '.diff'))
buf = StringIO()
mimeview = Mimeview(self.env)
for old_node, new_node, kind, change in repos.get_changes(
new_path=data['new_path'], new_rev=data['new_rev'],
old_path=data['old_path'], old_rev=data['old_rev']):
# TODO: Property changes
# Content changes
if kind == Node.DIRECTORY:
continue
new_content = old_content = ''
new_node_info = old_node_info = ('','')
if old_node:
if not old_node.is_viewable(req.perm):
continue
if mimeview.is_binary(old_node.content_type, old_node.path):
continue
old_content = old_node.get_content().read()
if mimeview.is_binary(content=old_content):
continue
old_node_info = (old_node.path, old_node.rev)
old_content = mimeview.to_unicode(old_content,
old_node.content_type)
if new_node:
if not new_node.is_viewable(req.perm):
continue
if mimeview.is_binary(new_node.content_type, new_node.path):
continue
new_content = new_node.get_content().read()
if mimeview.is_binary(content=new_content):
continue
new_node_info = (new_node.path, new_node.rev)
new_path = new_node.path
new_content = mimeview.to_unicode(new_content,
new_node.content_type)
else:
old_node_path = repos.normalize_path(old_node.path)
diff_old_path = repos.normalize_path(data['old_path'])
new_path = pathjoin(data['new_path'],
old_node_path[len(diff_old_path) + 1:])
if old_content != new_content:
options = data['diff']['options']
context = options.get('contextlines', 3)
if context < 0 or options.get('contextall'):
context = 3 # FIXME: unified_diff bugs with context=None
ignore_blank_lines = options.get('ignoreblanklines')
ignore_case = options.get('ignorecase')
ignore_space = options.get('ignorewhitespace')
if not old_node_info[0]:
old_node_info = new_node_info # support for 'A'dd changes
buf.write('Index: ' + new_path + CRLF)
buf.write('=' * 67 + CRLF)
buf.write('--- %s\t(revision %s)' % old_node_info + CRLF)
buf.write('+++ %s\t(revision %s)' % new_node_info + CRLF)
for line in unified_diff(old_content.splitlines(),
new_content.splitlines(), context,
ignore_blank_lines=ignore_blank_lines,
ignore_case=ignore_case,
ignore_space_changes=ignore_space):
buf.write(line + CRLF)
diff_str = buf.getvalue().encode('utf-8')
req.send_header('Content-Length', len(diff_str))
req.end_headers()
req.write(diff_str)
raise RequestDone
def _render_zip(self, req, filename, repos, data):
"""ZIP archive containing all the added and/or modified files."""
req.send_response(200)
req.send_header('Content-Type', 'application/zip')
req.send_header('Content-Disposition',
content_disposition('attachment', filename + '.zip'))
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED as compression
buf = StringIO()
zipfile = ZipFile(buf, 'w', compression)
for old_node, new_node, kind, change in repos.get_changes(
new_path=data['new_path'], new_rev=data['new_rev'],
old_path=data['old_path'], old_rev=data['old_rev']):
if (kind == Node.FILE or kind == Node.DIRECTORY) and \
change != Changeset.DELETE \
and new_node.is_viewable(req.perm):
zipinfo = ZipInfo()
# Note: unicode filenames are not supported by zipfile.
# UTF-8 is not supported by all Zip tools either,
# but as some do, UTF-8 is the best option here.
zipinfo.filename = new_node.path.strip('/').encode('utf-8')
zipinfo.flag_bits |= 0x800 # filename is encoded with utf-8
zipinfo.date_time = \
new_node.last_modified.astimezone(req.tz).timetuple()[:6]
zipinfo.compress_type = compression
# setting zipinfo.external_attr is needed since Python 2.5
if new_node.isfile:
zipinfo.external_attr = 0644 << 16L
content = new_node.get_content().read()
elif new_node.isdir:
zipinfo.filename += '/'
zipinfo.external_attr = 040755 << 16L
content = ''
zipfile.writestr(zipinfo, content)
zipfile.close()
zip_str = buf.getvalue()
req.send_header("Content-Length", len(zip_str))
req.end_headers()
req.write(zip_str)
raise RequestDone
def title_for_diff(self, data):
# TRANSLATOR: 'latest' (revision)
latest = _('latest')
if data['new_path'] == data['old_path']:
# ''diff between 2 revisions'' mode
return _('Diff [%(old_rev)s:%(new_rev)s] for %(path)s',
old_rev=data['old_rev'] or latest,
new_rev=data['new_rev'] or latest,
path=data['new_path'] or '/')
else:
# ''generalized diff'' mode
return _('Diff from %(old_path)s@%(old_rev)s to %(new_path)s@'
'%(new_rev)s',
old_path=data['old_path'] or '/',
old_rev=data['old_rev'] or latest,
new_path=data['new_path'] or '/',
new_rev=data['new_rev'] or latest)
def render_property_diff(self, name, old_node, old_props,
new_node, new_props, options):
"""Renders diffs of a node property to HTML."""
if name in BrowserModule(self.env).hidden_properties:
return
candidates = []
for renderer in self.property_diff_renderers:
quality = renderer.match_property_diff(name)
if quality > 0:
candidates.append((quality, renderer))
candidates.sort(reverse=True)
for (quality, renderer) in candidates:
try:
return renderer.render_property_diff(name, old_node, old_props,
new_node, new_props,
options)
except Exception, e:
self.log.warning('Diff rendering failed for property %s with '
'renderer %s: %s', name,
renderer.__class__.__name__,
exception_to_unicode(e, traceback=True))
def _get_location(self, files):
"""Return the deepest common path for the given files.
If all the files are actually the same, return that location."""
if len(files) == 1:
return files[0]
else:
return '/'.join(os.path.commonprefix([f.split('/')
for f in files]))
def _get_parent_location(self, files):
"""Only get a location when there are different files,
otherwise return the empty string."""
if files:
files.sort()
prev = files[0]
for f in files[1:]:
if f != prev:
return self._get_location(files)
return ''
def _prepare_filestats(self):
filestats = {}
for chg in Changeset.ALL_CHANGES:
filestats[chg] = 0
return filestats
# ITimelineEventProvider methods
def get_timeline_filters(self, req):
if 'CHANGESET_VIEW' in req.perm:
# Non-'hidden' repositories will be listed as additional
# repository filters, unless there is only a single repository.
filters = []
rm = RepositoryManager(self.env)
repositories = rm.get_real_repositories()
if len(repositories) > 1:
filters = [
('repo-' + repos.reponame,
u"\xa0\xa0-\xa0" + (repos.reponame or _('(default)')))
for repos in repositories
if not as_bool(repos.params.get('hidden'))
and repos.is_viewable(req.perm)]
filters.sort()
add_script(req, 'common/js/timeline_multirepos.js')
changeset_label = _('Changesets in all repositories')
else:
changeset_label = _('Repository changesets')
filters.insert(0, ('changeset', changeset_label))
return filters
else:
return []
def get_timeline_events(self, req, start, stop, filters):
all_repos = 'changeset' in filters
repo_filters = set(f for f in filters if f.startswith('repo-'))
if all_repos or repo_filters:
show_files = self.timeline_show_files
show_location = show_files == 'location'
if show_files in ('-1', 'unlimited'):
show_files = -1
elif show_files.isdigit():
show_files = int(show_files)
else:
show_files = 0 # disabled
if self.timeline_collapse:
collapse_changesets = lambda c: (c.author, c.message)
else:
collapse_changesets = lambda c: c.rev
uids_seen = {}
def generate_changesets(repos):
for _, changesets in groupby(repos.get_changesets(start, stop),
key=collapse_changesets):
viewable_changesets = []
for cset in changesets:
cset_resource = Resource('changeset', cset.rev,
parent=repos.resource)
if cset.is_viewable(req.perm):
repos_for_uid = [repos.reponame]
uid = repos.get_changeset_uid(cset.rev)
if uid:
# uid can be seen in multiple repositories
if uid in uids_seen:
uids_seen[uid].append(repos.reponame)
continue # already viewable, simply append
uids_seen[uid] = repos_for_uid
viewable_changesets.append((cset, cset_resource,
repos_for_uid))
if viewable_changesets:
cset = viewable_changesets[-1][0]
yield ('changeset', cset.date, cset.author,
(viewable_changesets,
show_location, show_files))
rm = RepositoryManager(self.env)
for repos in sorted(rm.get_real_repositories(),
key=lambda repos: repos.reponame):
if all_repos or ('repo-' + repos.reponame) in repo_filters:
try:
for event in generate_changesets(repos):
yield event
except TracError, e:
self.log.error("Timeline event provider for repository"
" '%s' failed: %r",
repos.reponame, exception_to_unicode(e))
def render_timeline_event(self, context, field, event):
changesets, show_location, show_files = event[3]
cset, cset_resource, repos_for_uid = changesets[0]
older_cset = changesets[-1][0]
message = cset.message or ''
reponame = cset_resource.parent.id
rev_b, rev_a = cset.rev, older_cset.rev
if field == 'url':
if rev_a == rev_b:
return context.href.changeset(rev_a, reponame or None)
else:
return context.href.log(reponame or None, rev=rev_b,
stop_rev=rev_a)
elif field == 'description':
if self.wiki_format_messages:
markup = ''
if self.timeline_long_messages: # override default flavor
context = context.child()
context.set_hints(wiki_flavor='html',
preserve_newlines=True)
else:
markup = message
message = None
if 'BROWSER_VIEW' in context.perm:
files = []
if show_location:
filestats = self._prepare_filestats()
for c, r, repos_for_c in changesets:
for chg in c.get_changes():
resource = c.resource.parent.child('source',
chg[0] or '/', r.id)
if not 'FILE_VIEW' in context.perm(resource):
continue
filestats[chg[2]] += 1
files.append(chg[0])
stats = [(tag.div(class_=kind),
tag.span(count, ' ',
count > 1 and
(kind == 'copy' and
'copies' or kind + 's') or kind))
for kind in Changeset.ALL_CHANGES
for count in (filestats[kind],) if count]
markup = tag.ul(
tag.li(stats, ' in ',
tag.strong(self._get_location(files) or '/')),
markup, class_="changes")
elif show_files:
unique_files = set()
for c, r, repos_for_c in changesets:
for chg in c.get_changes():
resource = c.resource.parent.child('source',
chg[0] or '/', r.id)
if not 'FILE_VIEW' in context.perm(resource):
continue
if show_files > 0 and len(files) > show_files:
break
unique_files.add((chg[0], chg[2]))
files = [tag.li(tag.div(class_=mod), path or '/')
for path, mod in sorted(unique_files)]
if show_files > 0 and len(files) > show_files:
files = files[:show_files] + [tag.li(u'\u2026')]
markup = tag(tag.ul(files, class_="changes"), markup)
if message:
markup += format_to(self.env, None,
context.child(cset_resource), message)
return markup
single = rev_a == rev_b
if not repos_for_uid[0]:
repos_for_uid[0] = _('(default)')
if reponame or len(repos_for_uid) > 1:
title = ngettext('Changeset in %(repo)s ',
'Changesets in %(repo)s ',
1 if single else 2, repo=', '.join(repos_for_uid))
else:
title = ngettext('Changeset ', 'Changesets ', 1 if single else 2)
drev_a = older_cset.repos.display_rev(rev_a)
if single:
title = tag(title, tag.em('[%s]' % drev_a))
else:
drev_b = cset.repos.display_rev(rev_b)
title = tag(title, tag.em('[%s-%s]' % (drev_a, drev_b)))
if field == 'title':
labels = []
for name, head in cset.get_branches():
if not head and name in ('default', 'master'):
continue
class_ = 'branch'
if head:
class_ += ' head'
labels.append(tag.span(name, class_=class_))
for name in cset.get_tags():
labels.append(tag.span(name, class_='tag'))
return title if not labels else tag(title, labels)
elif field == 'summary':
return _("%(title)s: %(message)s",
title=title, message=shorten_line(message))
# IWikiSyntaxProvider methods
CHANGESET_ID = r"(?:[0-9]+|[a-fA-F0-9]{8,})" # only "long enough" hexa ids
def get_wiki_syntax(self):
yield (
# [...] form: start with optional intertrac: [T... or [trac ...
r"!?\[(?P<it_changeset>%s\s*)" % WikiParser.INTERTRAC_SCHEME +
# hex digits + optional /path for the restricted changeset
# + optional query and fragment
r"%s(?:/[^\]]*)?(?:\?[^\]]*)?(?:#[^\]]*)?\]|" % self.CHANGESET_ID +
# r... form: allow r1 but not r1:2 (handled by the log syntax)
r"(?:\b|!)r[0-9]+\b(?!:[0-9])(?:/[a-zA-Z0-9_/+-]+)?",
lambda x, y, z:
self._format_changeset_link(x, 'changeset',
y[1:] if y[0] == 'r' else y[1:-1],
y, z))
def get_link_resolvers(self):
yield ('changeset', self._format_changeset_link)
yield ('diff', self._format_diff_link)
def _format_changeset_link(self, formatter, ns, chgset, label,
fullmatch=None):
intertrac = formatter.shorthand_intertrac_helper(ns, chgset, label,
fullmatch)
if intertrac:
return intertrac
# identifying repository
rm = RepositoryManager(self.env)
chgset, params, fragment = formatter.split_link(chgset)
sep = chgset.find('/')
if sep > 0:
rev, path = chgset[:sep], chgset[sep:]
else:
rev, path = chgset, '/'
try:
reponame, repos, path = rm.get_repository_by_path(path)
if not reponame:
reponame = rm.get_default_repository(formatter.context)
if reponame is not None:
repos = rm.get_repository(reponame)
if path == '/':
path = None
# rendering changeset link
if repos:
changeset = repos.get_changeset(rev)
if changeset.is_viewable(formatter.perm):
href = formatter.href.changeset(rev,
repos.reponame or None,
path)
return tag.a(label, class_="changeset",
title=shorten_line(changeset.message),
href=href + params + fragment)
errmsg = _("No permission to view changeset %(rev)s "
"on %(repos)s", rev=rev,
repos=reponame or _('(default)'))
elif reponame:
errmsg = _("Repository '%(repo)s' not found", repo=reponame)
else:
errmsg = _("No default repository defined")
except TracError, e:
errmsg = to_unicode(e)
return tag.a(label, class_="missing changeset", title=errmsg)
def _format_diff_link(self, formatter, ns, target, label):
params, query, fragment = formatter.split_link(target)
def pathrev(path):
if '@' in path:
return path.split('@', 1)
else:
return (path, None)
if '//' in params:
p1, p2 = params.split('//', 1)
old, new = pathrev(p1), pathrev(p2)
data = {'old_path': old[0], 'old_rev': old[1],
'new_path': new[0], 'new_rev': new[1]}
else:
old_path, old_rev = pathrev(params)
new_rev = None
if old_rev and ':' in old_rev:
old_rev, new_rev = old_rev.split(':', 1)
data = {'old_path': old_path, 'old_rev': old_rev,
'new_path': old_path, 'new_rev': new_rev}
title = self.title_for_diff(data)
href = None
if any(data.values()):
if query:
query = '&' + query[1:]
href = formatter.href.changeset(new_path=data['new_path'] or None,
new=data['new_rev'],
old_path=data['old_path'] or None,
old=data['old_rev']) + query
return tag.a(label, class_="changeset", title=title, href=href)
# ISearchSource methods
### FIXME: move this specific implementation into cache.py
def get_search_filters(self, req):
if 'CHANGESET_VIEW' in req.perm:
yield ('changeset', _('Changesets'))
def get_search_results(self, req, terms, filters):
if not 'changeset' in filters:
return
rm = RepositoryManager(self.env)
repositories = dict((repos.params['id'], repos)
for repos in rm.get_real_repositories())
with self.env.db_query as db:
sql, args = search_to_sql(db, ['rev', 'message', 'author'], terms)
for id, rev, ts, author, log in db("""
SELECT repos, rev, time, author, message
FROM revision WHERE """ + sql,
args):
try:
rev = int(rev)
except ValueError:
pass
repos = repositories.get(id)
if not repos:
continue # revisions for a no longer active repository
cset = repos.resource.child('changeset', rev)
if 'CHANGESET_VIEW' in req.perm(cset):
yield (req.href.changeset(rev, repos.reponame or None),
'[%s]: %s' % (rev, shorten_line(log)),
from_utimestamp(ts), author,
shorten_result(log, terms))
class AnyDiffModule(Component):
implements(IRequestHandler)
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/diff'
def process_request(self, req):
rm = RepositoryManager(self.env)
if req.get_header('X-Requested-With') == 'XMLHttpRequest':
dirname, prefix = posixpath.split(req.args.get('q'))
prefix = prefix.lower()
reponame, repos, path = rm.get_repository_by_path(dirname)
# an entry is a (isdir, name, path) tuple
def kind_order(entry):
return (not entry[0], embedded_numbers(entry[1]))
entries = []
if repos:
entries.extend((e.isdir, e.name,
'/' + pathjoin(repos.reponame, e.path))
for e in repos.get_node(path).get_entries()
if e.is_viewable(req.perm))
if not reponame:
entries.extend((True, repos.reponame, '/' + repos.reponame)
for repos in rm.get_real_repositories()
if repos.is_viewable(req.perm))
elem = tag.ul(
[tag.li(tag.b(path) if isdir else path)
for (isdir, name, path) in sorted(entries, key=kind_order)
if name.lower().startswith(prefix)])
xhtml = elem.generate().render('xhtml', encoding='utf-8')
req.send_header('Content-Length', len(xhtml))
req.write(xhtml)
return
# -- retrieve arguments
new_path = req.args.get('new_path')
new_rev = req.args.get('new_rev')
old_path = req.args.get('old_path')
old_rev = req.args.get('old_rev')
# -- normalize and prepare rendering
new_reponame, new_repos, new_path = \
rm.get_repository_by_path(new_path)
old_reponame, old_repos, old_path = \
rm.get_repository_by_path(old_path)
data = {}
if new_repos:
data.update(new_path='/' + pathjoin(new_repos.reponame, new_path),
new_rev=new_repos.normalize_rev(new_rev))
else:
data.update(new_path=req.args.get('new_path'), new_rev=new_rev)
if old_repos:
data.update(old_path='/' + pathjoin(old_repos.reponame, old_path),
old_rev=old_repos.normalize_rev(old_rev))
else:
data.update(old_path=req.args.get('old_path'), old_rev=old_rev)
add_script(req, 'common/js/suggest.js')
return 'diff_form.html', data, None
| 45.400322
| 79
| 0.519724
|
7109031a45a0b1008e4a0e0fbbd0864cbbcc6653
| 3,289
|
py
|
Python
|
invenio_records_resources/services/records/schema.py
|
tzubaki/invenio-records-resources
|
82e77d5f6e89abf38c871e912cc97b4bdf289c20
|
[
"MIT"
] | null | null | null |
invenio_records_resources/services/records/schema.py
|
tzubaki/invenio-records-resources
|
82e77d5f6e89abf38c871e912cc97b4bdf289c20
|
[
"MIT"
] | null | null | null |
invenio_records_resources/services/records/schema.py
|
tzubaki/invenio-records-resources
|
82e77d5f6e89abf38c871e912cc97b4bdf289c20
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
# Copyright (C) 2022 TU Wien.
#
# Invenio-Records-Resources is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Record schema."""
from datetime import timezone
from marshmallow import Schema, ValidationError, fields, pre_load
from marshmallow_utils.fields import Links, TZDateTime
from invenio_records_resources.errors import validation_error_to_list_errors
#
# The default record schema
#
class BaseRecordSchema(Schema):
"""Schema for records v1 in JSON."""
id = fields.Str()
created = TZDateTime(timezone=timezone.utc, format='iso', dump_only=True)
updated = TZDateTime(timezone=timezone.utc, format='iso', dump_only=True)
links = Links(dump_only=True)
revision_id = fields.Integer(dump_only=True)
@pre_load
def clean(self, data, **kwargs):
"""Removes dump_only fields.
Why: We want to allow the output of a Schema dump, to be a valid input
to a Schema load without causing strange issues.
"""
for name, field in self.fields.items():
if field.dump_only:
data.pop(name, None)
return data
class ServiceSchemaWrapper:
"""Schema wrapper that enhances load/dump of wrapped schema.
It:
- allows strict (raises errors) / lax (reports them) loading by schema
- constructs the context for the schema
* injects the field permission check in the context
"""
def __init__(self, service, schema):
"""Constructor."""
self.schema = schema
# TODO: Change constructor to accept a permission_policy_cls directly
self._permission_policy_cls = service.config.permission_policy_cls
def _build_context(self, base_context):
context = {**base_context}
default_identity = context["identity"] # identity required in context
def _permission_check(action, identity=default_identity, **kwargs):
return (
# TODO: See if context is necessary here
self._permission_policy_cls(action, **context, **kwargs)
.allows(identity)
)
context.setdefault('field_permission_check', _permission_check)
return context
def load(self, data, schema_args=None, context=None, raise_errors=True):
"""Load data with dynamic schema_args + context + raise or not."""
schema_args = schema_args or {}
base_context = context or {}
context = self._build_context(base_context)
try:
valid_data = self.schema(context=context, **schema_args).load(data)
errors = []
except ValidationError as e:
if raise_errors:
raise
valid_data = e.valid_data
errors = validation_error_to_list_errors(e)
return valid_data, errors
def dump(self, data, schema_args=None, context=None):
"""Dump data using wrapped schema and dynamic schema_args + context."""
schema_args = schema_args or {}
base_context = context or {}
context = self._build_context(base_context)
return self.schema(context=context, **schema_args).dump(data)
| 33.907216
| 79
| 0.658559
|
1b9e47518916743488b6231b346970ed8db70359
| 2,050
|
py
|
Python
|
pytorch_lightning/plugins/precision/ipu_precision.py
|
alessiobonfiglio/pytorch-lightning
|
c453caf57e8ee65aaf82d4d42b26d7634dbf7046
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/plugins/precision/ipu_precision.py
|
alessiobonfiglio/pytorch-lightning
|
c453caf57e8ee65aaf82d4d42b26d7634dbf7046
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/plugins/precision/ipu_precision.py
|
alessiobonfiglio/pytorch-lightning
|
c453caf57e8ee65aaf82d4d42b26d7634dbf7046
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Union
from torch.nn import Module
from torch.optim import Optimizer
import pytorch_lightning as pl
from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin
from pytorch_lightning.utilities import GradClipAlgorithmType
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.warnings import WarningCache
warning_cache = WarningCache()
class IPUPrecisionPlugin(PrecisionPlugin):
def __init__(self, precision: int) -> None:
super().__init__()
self.precision = precision
def backward(self, model: "pl.LightningModule", *args: Any, **kwargs: Any) -> None:
if is_overridden("backward", model):
warning_cache.warn(
"You have overridden the `LightningModule.backward` hook but it will be ignored since IPUs handle"
" the backward logic internally."
)
def clip_gradients(
self,
optimizer: Optimizer,
clip_val: Union[int, float],
gradient_clip_algorithm: GradClipAlgorithmType = GradClipAlgorithmType.NORM,
model: Optional[Module] = None,
) -> None:
"""Clips the gradients."""
if clip_val is None or float(clip_val) <= 0:
return
raise MisconfigurationException("IPUs currently do not support clipping gradients.")
| 38.679245
| 114
| 0.73122
|
3521b6eb31f823907d1d76262e1772f431ef4113
| 15,426
|
py
|
Python
|
molo/forms/tests/test_utils.py
|
praekeltfoundation/molo.forms
|
746abf9d48e6adc73f9544f7211328cfcfef0410
|
[
"BSD-3-Clause"
] | null | null | null |
molo/forms/tests/test_utils.py
|
praekeltfoundation/molo.forms
|
746abf9d48e6adc73f9544f7211328cfcfef0410
|
[
"BSD-3-Clause"
] | 4
|
2019-10-28T13:09:47.000Z
|
2020-12-22T12:32:49.000Z
|
molo/forms/tests/test_utils.py
|
praekeltfoundation/molo.forms
|
746abf9d48e6adc73f9544f7211328cfcfef0410
|
[
"BSD-3-Clause"
] | 1
|
2019-08-19T12:09:18.000Z
|
2019-08-19T12:09:18.000Z
|
from django.test import TestCase
from molo.core.tests.base import MoloTestCaseMixin
from molo.forms.models import (
MoloFormField,
MoloFormPage,
)
from ..utils import SkipLogicPaginator
from .utils import skip_logic_data
class TestSkipLogicPaginator(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.form = MoloFormPage(
title='Test Form',
slug='test-form',
)
self.section_index.add_child(instance=self.form)
self.form.save_revision().publish()
self.first_field = MoloFormField.objects.create(
page=self.form,
sort_order=1,
label='Your other favourite animal',
field_type='singleline',
required=True
)
self.fifth_field = MoloFormField.objects.create(
page=self.form,
sort_order=5,
label='A random animal',
field_type='singleline',
required=True
)
field_choices = ['next', 'end', 'question']
self.second_field = MoloFormField.objects.create(
page=self.form,
sort_order=2,
label='Your favourite animal',
field_type='dropdown',
skip_logic=skip_logic_data(
field_choices,
field_choices,
question=self.fifth_field,
),
required=True
)
self.third_field = MoloFormField.objects.create(
page=self.form,
sort_order=3,
label='Your least favourite animal',
field_type='dropdown',
skip_logic=skip_logic_data(
field_choices,
field_choices,
question=self.fifth_field,
),
required=True
)
self.fourth_field = MoloFormField.objects.create(
page=self.form,
sort_order=4,
label='Your least favourite animal',
field_type='singleline',
required=True,
page_break=True
)
self.hidden_field = MoloFormField.objects.create(
page=self.form,
sort_order=5,
default_value='cat',
label='Your least favourite animal',
field_type='hidden',
required=True
)
self.paginator = SkipLogicPaginator(self.form.get_form_fields())
def test_correct_num_pages(self):
self.assertEqual(self.paginator.num_pages, 4)
def test_page_breaks_correct(self):
self.assertEqual(self.paginator.page_breaks, [0, 2, 3, 4, 5])
def test_first_page_correct(self):
page = self.paginator.page(1)
self.assertEqual(
page.object_list, [
self.hidden_field,
self.first_field,
self.second_field
],
)
self.assertTrue(page.has_next())
def test_second_page_correct(self):
page = self.paginator.page(2)
self.assertEqual(page.object_list, [self.third_field])
self.assertTrue(page.has_next())
def test_third_page_correct(self):
third_page = self.paginator.page(3)
self.assertEqual(third_page.object_list, [self.fourth_field])
self.assertTrue(third_page.has_next())
def test_last_page_correct(self):
last_page = self.paginator.page(4)
self.assertEqual(last_page.object_list, [self.fifth_field])
self.assertFalse(last_page.has_next())
def test_is_end_if_skip_logic(self):
paginator = SkipLogicPaginator(
self.form.get_form_fields(),
{self.second_field.clean_name: 'end'}
)
first_page = paginator.page(1)
self.assertFalse(first_page.has_next())
def test_skip_question_if_skip_logic(self):
paginator = SkipLogicPaginator(
self.form.get_form_fields(),
{self.second_field.clean_name: 'question'}
)
page = paginator.page(1)
next_page_number = page.next_page_number()
self.assertEqual(next_page_number, 4)
second_page = paginator.page(next_page_number)
self.assertEqual(second_page.object_list, [self.fifth_field])
self.assertFalse(second_page.has_next())
def test_first_question_skip_to_next(self):
paginator = SkipLogicPaginator(
self.form.get_form_fields(),
{self.second_field.clean_name: 'next'},
)
self.assertEqual(paginator.previous_page, 1)
self.assertEqual(paginator.next_page, 2)
page = paginator.page(paginator.next_page)
self.assertEqual(page.object_list, [self.third_field])
self.assertEqual(page.number, 2)
def test_previous_page_if_skip_a_page(self):
paginator = SkipLogicPaginator(
self.form.get_form_fields(),
{
self.first_field.clean_name: 'python',
self.second_field.clean_name: 'question',
}
)
page = paginator.page(1)
next_page_number = page.next_page_number()
self.assertEqual(next_page_number, 4)
second_page = paginator.page(next_page_number)
previous_page_number = second_page.previous_page_number()
self.assertEqual(previous_page_number, 1)
self.assertEqual(
paginator.page(previous_page_number).object_list,
[
self.hidden_field,
self.first_field,
self.second_field],
)
def test_question_progression_index(self):
paginator = SkipLogicPaginator(
self.form.get_form_fields(),
{
self.first_field.clean_name: 'python',
self.second_field.clean_name: 'question',
}
)
self.assertEqual(paginator.previous_page, 1)
self.assertEqual(paginator.last_question_index, 1)
self.assertEqual(paginator.next_page, 4)
self.assertEqual(paginator.next_question_index, 4)
def test_no_data_index(self):
paginator = SkipLogicPaginator(self.form.get_form_fields())
self.assertEqual(paginator.previous_page, 1)
self.assertEqual(paginator.next_page, 1)
self.assertEqual(paginator.next_question_index, 0)
def test_no_data_index_with_checkbox(self):
self.first_field.field_type = 'checkbox'
self.first_field.skip_logic = skip_logic_data(
['', ''],
['next', 'end'],
)
self.first_field.save()
paginator = SkipLogicPaginator(
self.form.get_form_fields(),
data={'csrf': 'dummy'},
)
self.assertEqual(paginator.previous_page, 1)
self.assertEqual(paginator.last_question_index, 0)
self.assertEqual(paginator.next_page, 2)
self.assertEqual(paginator.next_question_index, 1)
def test_single_question_quiz_with_skip_logic_pages_correctly(self):
self.first_field.delete()
self.third_field.delete()
self.fourth_field.delete()
self.fifth_field.delete()
paginator = SkipLogicPaginator(self.form.get_form_fields())
self.assertEqual(paginator.num_pages, 1)
class TestSkipLogicEveryPage(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.form = MoloFormPage(
title='Test Form',
slug='test-form',
)
self.another_form = MoloFormPage(
title='Another Test Form',
slug='another-test-form',
)
self.section_index.add_child(instance=self.form)
self.form.save_revision().publish()
self.section_index.add_child(instance=self.another_form)
self.another_form.save_revision().publish()
field_choices = ['next', 'end']
self.fourth_field = MoloFormField.objects.create(
page=self.form,
sort_order=4,
label='A random animal',
field_type='dropdown',
skip_logic=skip_logic_data(
field_choices,
field_choices,
),
required=True
)
self.first_field = MoloFormField.objects.create(
page=self.form,
sort_order=1,
label='Your other favourite animal',
field_type='dropdown',
skip_logic=skip_logic_data(
field_choices + ['question', 'form'],
field_choices + ['question', 'form'],
question=self.fourth_field,
form=self.another_form,
),
required=True
)
self.second_field = MoloFormField.objects.create(
page=self.form,
sort_order=2,
label='Your favourite animal',
field_type='dropdown',
skip_logic=skip_logic_data(
field_choices,
field_choices,
),
required=True
)
self.third_field = MoloFormField.objects.create(
page=self.form,
sort_order=3,
label='Your least favourite animal',
field_type='dropdown',
skip_logic=skip_logic_data(
field_choices,
field_choices,
),
required=True
)
self.hidden_field = MoloFormField.objects.create(
page=self.form,
sort_order=5,
default_value='cat',
label='Your least favourite animal',
field_type='hidden',
required=True
)
self.paginator = SkipLogicPaginator(self.form.get_form_fields())
def test_initialises_correctly(self):
self.assertEqual(self.paginator.page_breaks, [0, 1, 2, 3, 4])
self.assertEqual(self.paginator.num_pages, 4)
def test_first_question_skip_to_last(self):
paginator = SkipLogicPaginator(
self.form.get_form_fields(),
{self.first_field.clean_name: 'question'},
)
self.assertEqual(paginator.previous_page, 1)
self.assertEqual(paginator.next_page, 4)
page = paginator.page(paginator.next_page)
self.assertEqual(page.object_list, [self.fourth_field])
self.assertEqual(page.number, 4)
def test_first_question_skip_to_next(self):
paginator = SkipLogicPaginator(
self.form.get_form_fields(),
{self.first_field.clean_name: 'next'},
)
self.assertEqual(paginator.previous_page, 1)
self.assertEqual(paginator.next_page, 2)
page = paginator.page(paginator.next_page)
self.assertEqual(page.object_list, [self.second_field])
self.assertEqual(page.number, 2)
def test_first_question_skip_to_form(self):
paginator = SkipLogicPaginator(
self.form.get_form_fields(),
{self.first_field.clean_name: 'form'},
)
self.assertEqual(paginator.previous_page, 1)
page = paginator.page(1)
self.assertFalse(page.has_next())
class SkipLogicPaginatorMulti(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.form = MoloFormPage(
title='Test Form',
slug='test-form',
)
self.section_index.add_child(instance=self.form)
self.form.save_revision().publish()
self.first_field = MoloFormField.objects.create(
page=self.form,
sort_order=1,
label='Your other favourite animal',
field_type='singleline',
required=True
)
field_choices = ['next', 'next']
self.second_field = MoloFormField.objects.create(
page=self.form,
sort_order=2,
label='Your favourite animal',
field_type='dropdown',
skip_logic=skip_logic_data(field_choices, field_choices),
required=True
)
self.last_field = MoloFormField.objects.create(
page=self.form,
sort_order=3,
label='Your least favourite animal',
field_type='singleline',
required=True
)
self.hidden_field = MoloFormField.objects.create(
page=self.form,
sort_order=5,
default_value='cat',
label='Your least favourite animal',
field_type='hidden',
required=True
)
self.paginator = SkipLogicPaginator(self.form.get_form_fields())
def test_correct_num_pages(self):
self.assertEqual(self.paginator.num_pages, 3)
def test_page_breaks_correct(self):
self.assertEqual(self.paginator.page_breaks, [0, 1, 2, 3])
def test_first_page_correct(self):
self.assertEqual(
self.paginator.page(1).object_list, [
self.hidden_field,
self.first_field
],
)
def test_middle_page_correct(self):
self.assertEqual(
self.paginator.page(2).object_list,
[self.second_field],
)
def test_last_page_correct(self):
last_page = self.paginator.page(3)
self.assertEqual(last_page.object_list, [self.last_field])
self.assertFalse(last_page.has_next())
class SkipLogicPaginatorPageBreak(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.form = MoloFormPage(
title='Test Form',
slug='test-form',
)
self.section_index.add_child(instance=self.form)
self.form.save_revision().publish()
self.first_field = MoloFormField.objects.create(
page=self.form,
sort_order=1,
label='Your other favourite animal',
field_type='singleline',
required=True,
page_break=True,
)
self.second_field = MoloFormField.objects.create(
page=self.form,
sort_order=2,
label='Your favourite animal',
field_type='singleline',
required=True,
page_break=True,
)
self.last_field = MoloFormField.objects.create(
page=self.form,
sort_order=3,
label='Your least favourite animal',
field_type='singleline',
required=True
)
self.hidden_field = MoloFormField.objects.create(
page=self.form,
sort_order=5,
default_value='cat',
label='Your least favourite animal',
field_type='hidden',
required=True
)
self.paginator = SkipLogicPaginator(self.form.get_form_fields())
def test_correct_num_pages(self):
self.assertEqual(self.paginator.num_pages, 3)
def test_page_breaks_correct(self):
self.assertEqual(self.paginator.page_breaks, [0, 1, 2, 3])
def test_first_page_correct(self):
self.assertEqual(
self.paginator.page(1).object_list, [
self.hidden_field,
self.first_field
],
)
def test_middle_page_correct(self):
self.assertEqual(
self.paginator.page(2).object_list,
[self.second_field],
)
def test_last_page_correct(self):
last_page = self.paginator.page(3)
self.assertEqual(last_page.object_list, [self.last_field])
self.assertFalse(last_page.has_next())
| 34.356347
| 72
| 0.597174
|
35d70d67f3ba82c64944b1df6cc591d70df98077
| 1,334
|
py
|
Python
|
aplpy/tests/test_ticks.py
|
nbrunett/aplpy
|
f5d128faf3568adea753d52c11ba43014d25d90a
|
[
"MIT"
] | null | null | null |
aplpy/tests/test_ticks.py
|
nbrunett/aplpy
|
f5d128faf3568adea753d52c11ba43014d25d90a
|
[
"MIT"
] | null | null | null |
aplpy/tests/test_ticks.py
|
nbrunett/aplpy
|
f5d128faf3568adea753d52c11ba43014d25d90a
|
[
"MIT"
] | 1
|
2018-02-26T03:04:19.000Z
|
2018-02-26T03:04:19.000Z
|
import matplotlib
matplotlib.use('Agg')
import numpy as np
from astropy.tests.helper import pytest
from .. import FITSFigure
def test_ticks_show_hide():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.ticks.hide()
f.ticks.show()
f.ticks.hide_x()
f.ticks.show_x()
f.ticks.hide_y()
f.ticks.show_y()
f.close()
def test_ticks_spacing():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.ticks.set_xspacing(0.5)
f.ticks.set_xspacing(1.)
f.ticks.set_yspacing(0.5)
f.ticks.set_yspacing(1.)
f.close()
def test_ticks_length():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.ticks.set_length(0)
f.ticks.set_length(1)
f.ticks.set_length(10)
f.close()
def test_ticks_color():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.ticks.set_color('black')
f.ticks.set_color('#003344')
f.ticks.set_color((1.0, 0.4, 0.3))
f.close()
def test_ticks_linewidth():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.ticks.set_linewidth(1)
f.ticks.set_linewidth(3)
f.ticks.set_linewidth(10)
f.close()
def test_ticks_minor_frequency():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.ticks.set_minor_frequency(1)
f.ticks.set_minor_frequency(5)
f.ticks.set_minor_frequency(10)
f.close()
| 20.212121
| 39
| 0.638681
|
704b28f4d1f97692593bfc84f48dcc9dcd53ccad
| 1,345
|
py
|
Python
|
core/audit.py
|
superstap/jimi
|
d921b815c726e169c5a35f01a81eea8a75b8321d
|
[
"Apache-2.0"
] | null | null | null |
core/audit.py
|
superstap/jimi
|
d921b815c726e169c5a35f01a81eea8a75b8321d
|
[
"Apache-2.0"
] | null | null | null |
core/audit.py
|
superstap/jimi
|
d921b815c726e169c5a35f01a81eea8a75b8321d
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import time, json
from pathlib import Path
from bson import json_util,ObjectId
import jimi
# audit Class
class _audit(jimi.db._document):
_dbCollection = jimi.db.db["audit"]
def add(self,eventSource, eventType, eventData):
auditData = { "time" : time.time(), "systemID" : systemSettings["systemID"], "source" : eventSource, "type" : eventType, "data" : eventData }
try:
if auditSettings["db"]["enabled"]:
self._dbCollection.insert_one(jimi.helpers.unicodeEscapeDict(auditData))
except KeyError:
self._dbCollection.insert_one(jimi.helpers.unicodeEscapeDict(auditData))
try:
if auditSettings["file"]["enabled"]:
filename = "{0}{1}{2}.txt".format(datetime.date.today().day,datetime.date.today().month,datetime.date.today().year)
logFile = Path("{0}/{1}".format(auditSettings["file"]["logdir"],filename))
with open(logFile, "a") as logFile:
logLine = "{0}\r\n".format(json.loads(json_util.dumps(auditData))).replace(": True",": true").replace(": False",": false").replace(": None",": null")
logFile.write(logLine)
except KeyError:
pass
auditSettings = jimi.settings.getSetting("audit",None)
systemSettings = jimi.config["system"]
| 44.833333
| 169
| 0.62974
|
152a3c67d37ccd307b6e109d0c2ff7092888cede
| 17,652
|
py
|
Python
|
plotext/utility.py
|
asartori86/plotext
|
255eb8f86cb528a6bce53e9d6f866f653adc4909
|
[
"MIT"
] | null | null | null |
plotext/utility.py
|
asartori86/plotext
|
255eb8f86cb528a6bce53e9d6f866f653adc4909
|
[
"MIT"
] | null | null | null |
plotext/utility.py
|
asartori86/plotext
|
255eb8f86cb528a6bce53e9d6f866f653adc4909
|
[
"MIT"
] | null | null | null |
import os
import shutil
import sys
import math
from datetime import datetime as dt
import docstrings as _docstrings
##############################################
############ Set Functions #############
##############################################
def set_first_to_both(x = None, y = None):# by setting one parameter to a value, both are set
if x != None and y == None:
y = x
return [x, y]
def set_list_to_both(x = None, y = None): # by setting a parameter to a list, both are set
if type(x) == list:
x, y = x[0], x[1]
return [x, y]
def set_if_none(x = None, x_none = None): # set a parameter when none is provided.
if x == None:
x = x_none
return x
def set_list_if_none(data, data_none):
for i in range(len(data)):
data[i] = set_if_none(data[i], data_none[i])
return data
##############################################
######### Utility Functions ############
##############################################
def sort_data(data1, data2): # sort data2 from data1 and remove duplicates of data1 and 2 based on data1
res = zip(*sorted(zip(data1, data2)))
data1, data2 = list(map(list, res))
d1 = []; d2 = []
for i in range(len(data1)):
if data1[i] not in d1:
d1.append(data1[i])
d2.append(data2[i])
return [d1, d2]
def get_data(*args):
if len(args) == 0:
x, y = [], []
elif len(args) == 1:
y = args[0]
x = list(range(len(y)))
else:
x = args[0]
y = args[1]
x, y = list(x), list(y)
length = min(len(x), len(y))
if len(x) != len(y):
x = x[ : length]
y = y[ : length]
return x, y
def transpose(lists):
lists = list(zip(*lists))
return list(map(list, lists))
def get_lim_data(data):
m = min([min(el, default = 0) for el in data], default = 0)
M = max([max(el, default = 0) for el in data], default = 0)
if m == M:
m = m - 1
M = M + 1
lim_data = [m, M]
lim_data.sort()
return lim_data
def linspace(lower, upper, length):
if length == 1:
return [0.5 * (lower + upper)]
return [lower + x * (upper - lower) / (length - 1) for x in range(length - 1)] + [upper]
def arange(start, stop, step = 1):
res = []
i = start
while i < stop:
res.append(i)
i = i + step
return res
def get_ticks(lim, frequency):
ticks = linspace(min(lim), max(lim), frequency)
ticks = [int(el) if el == int(el) else el for el in ticks]
return ticks
def get_log_ticks(lim, frequency):
ticks = list(linspace(lim[0], lim[1], frequency))
labels = [10 ** el for el in ticks]
labels = get_labels(labels)
return ticks, labels
def get_labels(ticks):
l = len(ticks)
if len(ticks) == 1:
c = len(str(ticks[0]))
else:
c = max([distinguish(ticks[i], ticks[i + 1]) for i in range(l - 1)])
ticks = [round_to_character(el, c) for el in ticks]
#ticks = [int(el) if int(el) == el else el for el in ticks]
#ticks_sn = [scientific_notation(*scientific_base(el)) for el in ticks]
ticks = [str(el) for el in ticks]
#ticks = [ticks[i] if len(ticks[i]) <= c else ticks_sn[i] for i in range(l)]
return ticks
int_len = lambda x: len(str(int(x)))
def distinguish(a, b):
dif = abs(a - b)
ca, cb, cd = map(int_len, [a, b, dif])
dec = 0 if dif == 0 or int(dif) > 0 else -math.floor(math.log10(dif)) + cd + 1
res = max(ca, cb, dec)
res = res + 1 if a < 0 or b < 0 else res
return res
def round_to_character(n, c):
int_len = len(str(int(n)))
d = c - int_len - 1
if d < 0:
d = 0
return round(n, d)
def scientific_base(num):
base = abs(num)
exp = 0 if num == 0 else int(math.log10(base))
base = num / (10 ** exp)
base = int(base) if int(base) == base else base
return [base, exp]
def scientific_notation(base, exp):
return str(base) + 'E' + str(exp)
def get_matrix_data(data, plot_lim, bins):
bins = 1 if bins == 0 else bins
dz = (plot_lim[1] - plot_lim[0]) / bins
data = [int((el - plot_lim[0]) / dz) if el != plot_lim[1] else bins - 1 for el in data]
return data
def get_matrix_data_hd(data, plot_lim, bins):
return [el / 2 for el in get_matrix_data(data, plot_lim, 2 * bins)]
def update_matrix(matrix, x, y, marker, color):
if matrix == []:
return []
if marker == "small":
return update_matrix_small(matrix, x, y, color)
cols, rows = len(matrix[0]), len(matrix)
for i in range(len(x)):
c, r = x[i], y[i]
if 0 <= r < rows and 0 <= c < cols:
c, r = int(c), int(r)
matrix[rows - 1 - r][c][:2] = [marker, color]
return matrix
def update_matrix_small(matrix, x, y, color):
cols, rows = len(matrix[0]), len(matrix)
for i in range(len(x)):
c, r = x[i], y[i]
new = small_marker(c, r)
c, r = int(c), int(r)
if 0 <= r < rows and 0 <= c < cols:
old = matrix[rows - 1 - r][c][0]
old = " " if old not in blocks.keys() else old
old = blocks[old]
new = sum_small(old, new)
new = matrix_to_block(new)
matrix[rows - 1 - r][c][:2] = [new, color]
return matrix
def small_marker(c, r):
c = (2 * c) % 2
r = (2 * r) % 2
c, r = int(c), int(r)
new = [[(c == 0) * (r == 1), (c == 1) * (r == 1)]]
new += [[(c == 0) * (r == 0), (c == 1) * (r == 0)]]
return new
def sum_small(block1, block2):
new = [[0, 0], [0, 0]]
for i in range(2):
for j in range(2):
new[i][j] = block1[i][j] + block2[i][j]
new[i][j] = int(bool(new[i][j]))
return new
blocks = {" ": [[0, 0], [0, 0]],
"▘": [[1, 0], [0, 0]],
"▖": [[0, 0], [1, 0]],
"▗": [[0, 0], [0, 1]],
"▝": [[0, 1], [0, 0]],
"▌": [[1, 0], [1, 0]],
"▐": [[0, 1], [0, 1]],
"▄": [[0, 0], [1, 1]],
"▀": [[1, 1], [0, 0]],
"▚": [[1, 0], [0, 1]],
"▞": [[0, 1], [1, 0]],
"▛": [[1, 1], [1, 0]],
"▙": [[1, 0], [1, 1]],
"▟": [[0, 1], [1, 1]],
"▜": [[1, 1], [0, 1]],
"█": [[1, 1], [1, 1]]}
def matrix_to_block(matrix):
for k in blocks.keys():
if matrix == blocks[k]:
return k
def get_line(x, y):
x_line = []
y_line = []
for n in range(len(x) - 1):
Dy, Dx = y[n + 1] - y[n], x[n + 1] - x[n]
Ay, Ax = abs(Dy), abs(Dx)
Sy, Sx = 0 if Dy == 0 else int(abs(Dy) / Dy), 0 if Dx == 0 else int(abs(Dx) / Dx)
if Ax >= Ay and Ax != 0:
x_line_n = [x[n] + i * Sx for i in range(Ax)]
y_line_n = [y[n] + i * Dy / Ax for i in range(Ax)]
elif Ay > Ax and Ay != 0:
y_line_n = [y[n] + i * Sy for i in range(Ay)]
x_line_n = [x[n] + i * Dx / Ay for i in range(Ay)]
elif Ax == 0:
y_line_n = arange(y[n], y[n + 1] + 1)
x_line_n = [x[n]] * len(y_line_n)
elif Ay == 0:
x_line_n = arange(x[n], x[n + 1] + 1)
y_line_n = [y[n]] * len(x_line_n)
x_line.extend(x_line_n)
y_line.extend(y_line_n)
x_line += [x[-1]]
y_line += [y[-1]]
return x_line, y_line
def fill_data(x, y, y0):
y_new = y
x_new = x
for i in range(len(y)):
yi = int(y[i])
y_temp = range(min(y0, yi), max(y0, yi))
y_new += y_temp
x_new += [x[i]] * len(y_temp)
return [x, y]
def frame_matrix(matrix, symbol = None):
l, w = len(matrix), len(matrix[0])
frame = [symbol] * 6 if symbol!= None else ["┌", "─", "┐", "│", "┘", "└"]
side = [[frame[3]] * l]
matrix = transpose(side + transpose(matrix) + side)
up = [[frame[0]] + [frame[1]] * w + [frame[2]]]
down = [[frame[5]] + [frame[1]] * w + [frame[4]]]
matrix = up + matrix + down
return matrix
def insert(sub_matrix, matrix):
if matrix == []:
return []
l, w = len(sub_matrix), len(sub_matrix[0])
L, W = len(matrix), len(matrix[0])
if L >= l and W >= w:
for i in range(l):
for j in range(w):
matrix[i][j] = sub_matrix[i][j]
return matrix
def join(matrix1, matrix2, separator = None, orientation = "vertical"):
if orientation == "horizontal":
matrix1 = transpose(matrix1)
matrix2 = transpose(matrix2)
return transpose(join(matrix1, matrix2, separator, "vertical"))
if matrix1 == [] or matrix2 == []:
return matrix1 + matrix2
w = min(len(matrix1[0]), len(matrix2[0]))
separator = [] if separator == None else [[separator] * w]
matrix1 = [el[:w] for el in matrix1]
matrix2 = [el[:w] for el in matrix2]
return matrix1 + separator + matrix2
def log(data):
return [math.log10(el) for el in data]
##############################################
######### Bar/Hist Functions ###########
##############################################
def bar_xdata(x):
x_type = [type(el) == str for el in x]
if any(x_type):
x_labels = list(map(str, x))
x = list(range(len(x)))
else:
x_labels = get_labels(x)
return x, x_labels
def bars(x, y, width = 4 / 5):
x, y = x[:], y[:]
bins = len(x)
bin_size_half = width / 2
# adjust the bar width according to the number of bins
if bins > 1:
bin_size_half *= (max(x) - min(x)) / (bins - 1)
#bin_size_half = (max(x) - min(x)) / (bins) * width / 2
#x[0] += bin_size_half
#x[bins - 1] -= bin_size_half
xbar = []
ybar = []
for i in range(bins):
xbar.append([x[i] - bin_size_half, x[i] - bin_size_half,
x[i] + bin_size_half, x[i] + bin_size_half,
x[i] - bin_size_half])
ybar.append([0, y[i], y[i], 0, 0])
return xbar, ybar
def hist_data(data, bins = 10):
data = [round(el, 15) for el in data]
m, M = min(data), max(data)
data = [(el - m) / (M - m) * bins if el != M else bins - 1 for el in data]
data = [int(el) for el in data]
histx = linspace(m, M, bins)
histy = [0] * bins
for el in data:
histy[el] += 1
return histx, histy
##############################################
######### Date/Time Functions ###########
##############################################
today = dt.today()
def time(day = None, month = None, year = None, hour = 0, minute = 0, second = 0):
year = today.year if year == None else year
month = today.month if month == None else month
day = today.day if day == None else day
return dt(year = year, month = month, day = day, hour = hour, minute = minute, second = second).timestamp()
def string_to_date(string):
string = string.split(" ")
if len(string) == 1:
date = string[0] if '/' in string[0] else today.strftime("%d/%m/%Y")
time = string[0] if ':' in string[0] else '0:0:0'
string = [date, time]
date = string[0]
time = string[1]
if date.count('/') == 1:
date += '/' + str(today.year)
if time.count(':') == 1:
time += ':00'
time = dt.strptime(date + ' ' + time, '%d/%m/%Y %H:%M:%S')
day, month, year, hour, minute, second = time.day, time.month, time.year, time.hour, time.minute, time.second
return [day, month, year, hour, minute, second]
def string_to_time(string):
return time(*string_to_date(string))
##############################################
##### Plotting Utility Functions #######
##############################################
def get_canvas(matrix):
canvas = ''
for r in range(len(matrix)):
for c in range(len(matrix[0])):
marker = matrix[r][c][0]
fullground = matrix[r][c][1]
background = matrix[r][c][2]
marker = add_color(marker, fullground, background)
canvas += marker
canvas += '\n'
return canvas + '\n'
_terminal_printed_lines_cnt = 0
def write(string):
global _terminal_printed_lines_cnt
sys.stdout.write(string)
_terminal_printed_lines_cnt += string.count('\n')
##############################################
###### Platform/Shell Functions ########
##############################################
def platform():
if sys.platform.startswith("win"):
return "windows"
else:
return "linux"
def shell():
if 'idlelib.run' in sys.modules:
return "idle"
elif "spyder" in sys.modules:
return "spyder"
else:
return "regular"
##############################################
########### Other Functions ############
##############################################
def check_path(path):
home = os.path.expanduser("~")
if path == None:
path = os.path.join(home, "plot.txt")
basedir = os.path.dirname(path)
if os.path.exists(basedir):
return path
else:
print("warning: parent directory doesn't exists.")
path = os.path.join(home, os.path.basename(path))
return path
def terminal_size():
try:
return list(os.get_terminal_size())
except OSError:
return shutil.get_terminal_size(fallback=(120, 50))
def docstrings():
fun = dir(_docstrings)
name = [el for el in fun if el[0] != '_']
fun = [getattr(_docstrings, el) for el in name]
name = [el.replace('_doc', '') for el in name]
name = [add_color(el, 'indigo') for el in name]
name = [add_color(el, 'bold') for el in name]
for i in range(len(fun)):
print()
print()
print(name[i])
print(fun[i])
def version():
init_path = "__init__.py"
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, init_path), 'r') as fp:
lines = fp.read()
for line in lines.splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
version = line.split(delim)[1]
print("plotext version:", version)
return version
else:
print("Unable to find version string.")
def sleep_1us():
time = 1 / 10 ** 6
[i for i in range(int(time * 2.58 * 10 ** 7))]
def sleep(time = 0.01):
ms = int(time * 10 ** 6)
for m in range(ms):
sleep_1us()
#[i for i in range(int(time * 15269989))]
def sin(length = 1000, peaks = 2, decay = 0, phase = 0):
f = 2 * math.pi / length * peaks
ph = math.pi * phase
d = 1 / length * decay
return [math.sin(f * el + ph) * math.exp(- d * el) for el in range(length)]
##############################################
######## Color/Marker Functions ########
##############################################
fullground_color = {'none': 0, 'black': 30, 'iron': 90, 'gray': 2, 'cloud': 37, 'white': 97, 'red': 31, 'tomato': 91, 'basil': 32, 'green': 92, 'yellow': 93, 'gold': 33, 'blue': 34, 'indigo': 94, 'teal': 36, 'artic': 96, 'lilac': 95, 'violet': 35, 'italic': 3, 'bold': 1, 'flash': 5}
background_color = {'none': 28, 'black': 40, 'iron': 100, 'cloud': 47, 'white': 107, 'red': 41, 'tomato': 101, 'basil': 42, 'green': 102, 'yellow': 103, 'gold': 43, 'blue': 44, 'indigo': 104, 'teal': 46, 'artic': 106, 'lilac': 105, 'violet': 45}
color_sequence = ["blue", "tomato", "gold", "iron", "basil", "none", "gray", "cloud", "lilac", "black", "artic", "red", "green", "yellow", "indigo", "teal", "violet", "white", "flash"]
def apply_color(text, code):
if code == 0 or code == 28:
return text
return '\033[' + str(code) + 'm' + text + '\033[0m'
def add_color(text = "", color = "none", background = "none"):
color = fullground_color[color]
background = background_color[background]
if color != "none":
text = apply_color(text, color)
if background != "none":
text = apply_color(text, background)
return text
def remove_color(string):
for c in list(fullground_color.values()) + list(background_color.values()):
string = string.replace('\x1b[' + str(c) + 'm', '')
return string
def colors():
key = list(fullground_color.keys())
title = "Fullground\tBackground"
lines = '─' * 26
#title = _add_color(title, "none", "black")
title = add_color(title, "bold")
#title = _apply_color(title, 4)
out = '\n' + title + '\n' + lines
for i in range(len(key)):
full_color = ""
if key[i] in fullground_color.keys():
back = "none" if key[i] not in ["black"] else "cloud"
full_color = add_color(key[i]+ "\t\t" , key[i], back)
back_color = add_color("not available", "italic")
if key[i] in background_color.keys():
full = "black" if key[i] not in ["none", "black", "iron"] else "white"
back_color = add_color(key[i], full, key[i])
out += "\n" * 1 + full_color + back_color
out += "\n\n" + "Fullground colors can be set to the 'color' attribute or given as input to plt.ticks_color()."
out += "\n" + "Background colors can be given as input to plt.canvas_color() and plt.axes_color()."
print(out)
marker = {'small': '▘',
'big': '█',
'dot': '•',
'heart': '♥',
'smile': '☺',
'dollar': '$',
'euro': '€'}
marker_sequence = ["small", "dot", "x", "o", "heart", "dollar", "smile", "euro", "big", "@", "^", "é", "a", "b", "c", "d", "e", "f"]
def markers():
print()
title = "Code\tMarker"
lines = '─' * 14
title = add_color(title, "bold")
print(title + '\n' + lines)
for el in marker:
print(el + '\t' + marker[el])
print("\nThese codes can be set to the 'marker' attribute.")
| 33.242938
| 283
| 0.506232
|
7163e95ea4cbfac2e46b0b42ed1be81f41b25c5d
| 8,756
|
py
|
Python
|
test/functional/wallet_abandonconflict.py
|
durgeshkmr/Libra-Coin
|
c40293ac5c8f289e4c06b46d0c7f3ca76ff591a6
|
[
"MIT"
] | null | null | null |
test/functional/wallet_abandonconflict.py
|
durgeshkmr/Libra-Coin
|
c40293ac5c8f289e4c06b46d0c7f3ca76ff591a6
|
[
"MIT"
] | null | null | null |
test/functional/wallet_abandonconflict.py
|
durgeshkmr/Libra-Coin
|
c40293ac5c8f289e4c06b46d0c7f3ca76ff591a6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Libra Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the abandontransaction RPC.
The abandontransaction RPC marks a transaction and all its in-wallet
descendants as abandoned which allows their inputs to be respent. It can be
used to replace "stuck" or evicted transactions. It only works on transactions
which are not included in a block and are not currently in the mempool. It has
no effect on transactions which are already abandoned.
"""
from decimal import Decimal
from test_framework.test_framework import LibraTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, connect_nodes, disconnect_nodes, sync_blocks, sync_mempools
class AbandonConflictTest(LibraTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[1].generate(100)
sync_blocks(self.nodes)
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
sync_mempools(self.nodes)
self.nodes[1].generate(1)
# Can not abandon non-wallet transaction
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', lambda: self.nodes[0].abandontransaction(txid='ff' * 32))
# Can not abandon confirmed transaction
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: self.nodes[0].abandontransaction(txid=txA))
sync_blocks(self.nodes)
newbalance = self.nodes[0].getbalance()
assert(balance - newbalance < Decimal("0.001")) #no more than fees lost
balance = newbalance
# Disconnect nodes so node0's transactions don't get into node1's mempool
disconnect_nodes(self.nodes[0], 1)
# Identify the 10libra outputs
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10"))
nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10"))
nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10"))
inputs =[]
# spend 10libra outputs from txA and txB
inputs.append({"txid":txA, "vout":nA})
inputs.append({"txid":txB, "vout":nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998libra output
nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid":txAB1, "vout":nAB})
inputs.append({"txid":txC, "vout":nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# Create a child tx spending ABC2
signed3_change = Decimal("24.999")
inputs = [ {"txid":txABC2, "vout":0} ]
outputs = { self.nodes[0].getnewaddress(): signed3_change }
signed3 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
# note tx is never directly referenced, only abandoned as a child of the above
self.nodes[0].sendrawtransaction(signed3["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + signed3_change)
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
# Verify txs no longer in either node's mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - signed3_change)
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if it is received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so it is unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs =[]
inputs.append({"txid":txA, "vout":nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransactionwithwallet(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
# Verify that B and C's 10 LIBRA outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 LIBRA output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
| 49.468927
| 137
| 0.669941
|
f3c07db438412abed6e16b50ad423ed156d0d7b3
| 2,854
|
py
|
Python
|
tensorflow/contrib/py2tf/convert/gradients_function.py
|
worldveil/tensorflow
|
f5de234d7f601214443f371e90fbadc8f128bb9a
|
[
"Apache-2.0"
] | 1
|
2021-04-22T08:23:27.000Z
|
2021-04-22T08:23:27.000Z
|
tensorflow/contrib/py2tf/convert/gradients_function.py
|
worldveil/tensorflow
|
f5de234d7f601214443f371e90fbadc8f128bb9a
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/py2tf/convert/gradients_function.py
|
worldveil/tensorflow
|
f5de234d7f601214443f371e90fbadc8f128bb9a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Allows converting Eager-style gradients to graph versions."""
# TODO(mdan): This is not needed. Remove once the static analysis works.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.py2tf.pyct import templates
class GradientsFunctionTransformer(gast.NodeTransformer):
"""Hack: transforms eager-style gradients to TF compatible calls.
Requires an expression of exactly this form:
... = tfe.value_and_gradients_function(...)(...)
"""
# pylint:disable=invalid-name
def visit_Assign(self, node):
self.generic_visit(node)
val = node.value
if isinstance(val, gast.Call):
if isinstance(val.func, gast.Call):
if isinstance(val.func.func, gast.Attribute):
if isinstance(val.func.func.value, gast.Name):
if (val.func.func.value.id == 'tfe' and
val.func.func.attr == 'value_and_gradients_function'):
# pylint:disable=unused-argument,undefined-variable
def template(loss_var, loss_fn, args, d_vars, wrt_vars):
loss_var = loss_fn(args)
d_vars = tf.gradients(loss_var, [wrt_vars])
# pylint:enable=unused-argument,undefined-variable
# How to get these values? Print out the node.
loss_var = gast.Name(node.targets[0].elts[0].id, gast.Store(),
None)
loss_fn = gast.Name(val.func.args[0].id, gast.Load(), None)
args = tuple(
gast.Name(a.id, gast.Param(), None) for a in val.args)
d_vars = node.targets[0].elts[1]
wrt_vars = [val.args[e.n] for e in val.func.args[1].elts]
node = templates.replace(
template,
loss_var=loss_var,
loss_fn=loss_fn,
args=args,
d_vars=d_vars,
wrt_vars=wrt_vars)
return node
# pylint:enable=invalid-name
def transform(node):
transformer = GradientsFunctionTransformer()
node = transformer.visit(node)
return node
| 35.234568
| 80
| 0.629292
|
38dd6965a671304913011fb8c839899fedabbd92
| 192
|
py
|
Python
|
test/mock_framework.py
|
mqtran01/python-yahoo-finance
|
9a4bb17d24cf81214c1778cae3f231a3f0e93516
|
[
"MIT"
] | 3
|
2018-11-22T13:42:48.000Z
|
2021-03-03T00:55:03.000Z
|
test/mock_framework.py
|
mqtran01/python-yahoo-finance
|
9a4bb17d24cf81214c1778cae3f231a3f0e93516
|
[
"MIT"
] | 5
|
2020-03-24T16:37:19.000Z
|
2021-07-15T05:20:05.000Z
|
test/mock_framework.py
|
mqtran01/python-yahoo-finance
|
9a4bb17d24cf81214c1778cae3f231a3f0e93516
|
[
"MIT"
] | 2
|
2019-04-15T18:54:32.000Z
|
2021-03-04T02:23:12.000Z
|
class MockResponse:
def __init__(self, text):
self.text = text
def json(self):
return self.json_data
@property
def cookies(self):
return {'B': '1234'}
| 19.2
| 29
| 0.578125
|
b5aba75a61998ce408bf4722457952c8f1a2efba
| 7,085
|
py
|
Python
|
reviewboard/accounts/tests/test_user.py
|
amalik2/reviewboard
|
676aa2dce38ce619a74f2d4cb3cfae9bce21416e
|
[
"MIT"
] | 921
|
2015-01-01T15:26:28.000Z
|
2022-03-29T11:30:38.000Z
|
reviewboard/accounts/tests/test_user.py
|
amalik2/reviewboard
|
676aa2dce38ce619a74f2d4cb3cfae9bce21416e
|
[
"MIT"
] | 5
|
2015-03-17T18:57:47.000Z
|
2020-10-02T13:24:31.000Z
|
reviewboard/accounts/tests/test_user.py
|
amalik2/reviewboard
|
676aa2dce38ce619a74f2d4cb3cfae9bce21416e
|
[
"MIT"
] | 285
|
2015-01-12T06:24:36.000Z
|
2022-03-29T11:03:50.000Z
|
"""Unit tests for additions to django.contrib.auth.models.User."""
from __future__ import unicode_literals
from django.contrib.auth.models import AnonymousUser, User
from djblets.testing.decorators import add_fixtures
from reviewboard.site.models import LocalSite
from reviewboard.testing import TestCase
class UserTests(TestCase):
"""Unit tests for additions to django.contrib.auth.models.User."""
fixtures = ['test_users']
def test_is_profile_visible_with_public(self):
"""Testing User.is_profile_visible with public profiles"""
user1 = User.objects.get(username='admin')
user2 = User.objects.get(username='doc')
self.assertTrue(user1.is_profile_visible(user2))
def test_is_profile_visible_with_private(self):
"""Testing User.is_profile_visible with private profiles"""
user1 = User.objects.get(username='admin')
user2 = User.objects.get(username='doc')
profile = user1.get_profile()
profile.is_private = True
profile.save(update_fields=('is_private',))
self.assertFalse(user1.is_profile_visible(user2))
self.assertTrue(user1.is_profile_visible(user1))
user2.is_staff = True
self.assertTrue(user1.is_profile_visible(user2))
def test_is_profile_visible_unauthenticated(self):
"""Testing User.is_profile_visible with an unauthenticated user"""
user = User.objects.get(username='doc')
self.assertFalse(user.is_profile_visible(AnonymousUser()))
def test_is_profile_visible_no_user(self):
"""Testing User.is_profile_visible with no user"""
user = User.objects.get(username='doc')
self.assertFalse(user.is_profile_visible(None))
def test_is_profile_visible_staff(self):
"""Testing User.is_profile_public with a staff user"""
user = User.objects.get(username='doc')
admin = User.objects.get(username='admin')
profile = user.get_profile()
profile.is_private = True
profile.save(update_fields=('is_private',))
self.assertTrue(user.is_profile_visible(admin))
def test_is_profile_visible_owner(self):
"""Testing User.is_profile_visible for the profile owner"""
user = User.objects.get(username='doc')
profile = user.get_profile()
profile.is_private = True
profile.save(update_fields=('is_private',))
self.assertTrue(user.is_profile_visible(user))
def test_is_profile_visible_local_site_member(self):
"""Testing User.is_profile_visible for a LocalSite member viewing a
LocalSite member with a public profile
"""
to_view = User.objects.get(username='doc')
viewer = User.objects.get(username='grumpy')
site = LocalSite.objects.create()
site.users = [to_view, viewer]
self.assertTrue(to_view.is_profile_visible(viewer))
def test_is_profile_visible_local_site_member_private(self):
"""Testing User.is_profile_visible for a LocalSite member viewing a
LocalSite member with a private profile
"""
to_view = User.objects.get(username='doc')
viewer = User.objects.get(username='grumpy')
profile = to_view.get_profile()
profile.is_private = True
profile.save(update_fields=('is_private',))
site = LocalSite.objects.create()
site.users = [to_view, viewer]
self.assertFalse(to_view.is_profile_visible(viewer))
def test_is_profile_visible_local_site_admin(self):
"""Testing user.is_profile_visible for a LocalSite admin viewing a
LocalSite member with a public profile
"""
to_view = User.objects.get(username='doc')
viewer = User.objects.get(username='grumpy')
site = LocalSite.objects.create()
site.users = [to_view, viewer]
site.admins = [viewer]
self.assertTrue(to_view.is_profile_visible(viewer))
def test_is_profile_visible_local_site_admin_private(self):
"""Testing user.is_profile_visible for a LocalSite admin viewing a
LocalSite member with a private profile
"""
to_view = User.objects.get(username='doc')
viewer = User.objects.get(username='grumpy')
profile = to_view.get_profile()
profile.is_private = True
profile.save(update_fields=('is_private',))
site = LocalSite.objects.create()
site.users = [to_view, viewer]
site.admins = [viewer]
self.assertTrue(to_view.is_profile_visible(viewer))
def test_is_admin_for_user_admin_vs_user(self):
"""Testing User.is_admin_for_user for an admin"""
admin = User.objects.get(username='admin')
user = User.objects.get(username='doc')
with self.assertNumQueries(0):
self.assertTrue(admin.is_admin_for_user(user))
def test_is_admin_for_user_admin_vs_none(self):
"""Testing User.is_admin_for_user for an admin when the user is None"""
admin = User.objects.get(username='admin')
with self.assertNumQueries(0):
self.assertTrue(admin.is_admin_for_user(None))
def test_is_admin_for_user_admin_vs_anonymous(self):
"""Testing User.is_admin_for_user for an admin when the user is
anonymous
"""
admin = User.objects.get(username='admin')
with self.assertNumQueries(0):
self.assertTrue(admin.is_admin_for_user(AnonymousUser()))
def test_is_admin_for_user_user_vs_user(self):
"""Testing User.is_admin_for_user for a regular user"""
user = User.objects.get(username='doc')
with self.assertNumQueries(1):
self.assertFalse(user.is_admin_for_user(user))
with self.assertNumQueries(0):
self.assertFalse(user.is_admin_for_user(user))
@add_fixtures(['test_site'])
def test_is_admin_for_user_localsite_admin_vs_localsite_user(self):
"""Testing User.is_admin_for_user for a LocalSite admin when the user
is a member of that LocalSite
"""
site_admin = User.objects.get(username='doc')
site_user = User.objects.get(username='admin')
with self.assertNumQueries(1):
self.assertTrue(site_admin.is_admin_for_user(site_user))
with self.assertNumQueries(0):
self.assertTrue(site_admin.is_admin_for_user(site_user))
@add_fixtures(['test_site'])
def test_is_admin_for_user_localsite_admin_vs_other_localsite_user(self):
"""Testing User.is_admin_for_user for a LocalSite admin when the user
is a member of another LocalSite
"""
site_admin = User.objects.get(username='doc')
site_user = User.objects.get(username='grumpy')
site = LocalSite.objects.create(name='local-site-3')
site.users.add(site_admin)
site.users.add(site_user)
with self.assertNumQueries(1):
self.assertFalse(site_admin.is_admin_for_user(site_user))
with self.assertNumQueries(0):
self.assertFalse(site_admin.is_admin_for_user(site_user))
| 36.709845
| 79
| 0.68398
|
79e84e4a8f6a0e00e462cbede15b35bd6c0f7b3e
| 35,169
|
py
|
Python
|
sdk/tables/azure-data-tables/tests/test_table_batch.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 1
|
2021-04-30T04:44:41.000Z
|
2021-04-30T04:44:41.000Z
|
sdk/tables/azure-data-tables/tests/test_table_batch.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | null | null | null |
sdk/tables/azure-data-tables/tests/test_table_batch.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from datetime import datetime, timedelta
from dateutil.tz import tzutc
import os
import sys
import uuid
from devtools_testutils import AzureTestCase
from azure.core import MatchConditions
from azure.core.credentials import AzureSasCredential
from azure.core.exceptions import (
ResourceExistsError,
ResourceNotFoundError,
ClientAuthenticationError
)
from azure.data.tables import (
EdmType,
TableEntity,
EntityProperty,
UpdateMode,
TableTransactionError,
TableServiceClient,
TableEntity,
UpdateMode,
generate_table_sas,
TableSasPermissions,
RequestTooLargeError,
TransactionOperation
)
from _shared.testcase import TableTestCase
from preparers import TablesPreparer
#------------------------------------------------------------------------------
TEST_TABLE_PREFIX = 'table'
#------------------------------------------------------------------------------
class StorageTableBatchTest(AzureTestCase, TableTestCase):
def _set_up(self, tables_storage_account_name, tables_primary_storage_account_key):
self.ts = TableServiceClient(self.account_url(tables_storage_account_name, "table"), tables_primary_storage_account_key)
self.table_name = self.get_resource_name('uttable')
self.table = self.ts.get_table_client(self.table_name)
if self.is_live:
try:
self.ts.create_table(self.table_name)
except ResourceExistsError:
pass
self.test_tables = []
def _tear_down(self):
if self.is_live:
try:
self.ts.delete_table(self.table_name)
except:
pass
for table_name in self.test_tables:
try:
self.ts.delete_table(table_name)
except:
pass
#--Helpers-----------------------------------------------------------------
def _get_table_reference(self, prefix=TEST_TABLE_PREFIX):
table_name = self.get_resource_name(prefix)
self.test_tables.append(table_name)
return self.ts.get_table_client(table_name)
def _create_pk_rk(self, pk, rk):
try:
pk = pk if pk is not None else self.get_resource_name('pk').decode('utf-8')
rk = rk if rk is not None else self.get_resource_name('rk').decode('utf-8')
except AttributeError:
pk = pk if pk is not None else self.get_resource_name('pk')
rk = rk if rk is not None else self.get_resource_name('rk')
return pk, rk
def _create_random_entity_dict(self, pk=None, rk=None):
"""
Creates a dictionary-based entity with fixed values, using all
of the supported data types.
"""
# partition = pk if pk is not None else self.get_resource_name('pk').decode('utf-8')
# row = rk if rk is not None else self.get_resource_name('rk').decode('utf-8')
partition, row = self._create_pk_rk(pk, rk)
properties = {
'PartitionKey': partition,
'RowKey': row,
'age': 39,
'sex': u'male',
'married': True,
'deceased': False,
'optional': None,
'ratio': 3.1,
'evenratio': 3.0,
'large': 933311100,
'Birthday': datetime(1973, 10, 4, tzinfo=tzutc()),
'birthday': datetime(1970, 10, 4, tzinfo=tzutc()),
'binary': b'binary',
'other': EntityProperty(value=20, type=EdmType.INT32),
'clsid': uuid.UUID('c9da6455-213d-42c9-9a79-3e9149a57833')
}
return TableEntity(**properties)
def _create_updated_entity_dict(self, partition, row):
'''
Creates a dictionary-based entity with fixed values, with a
different set of values than the default entity. It
adds fields, changes field values, changes field types,
and removes fields when compared to the default entity.
'''
return {
'PartitionKey': partition,
'RowKey': row,
'age': u'abc',
'sex': u'female',
'sign': u'aquarius',
'birthday': datetime(1991, 10, 4, tzinfo=tzutc())
}
def _assert_default_entity(self, entity):
'''
Asserts that the entity passed in matches the default entity.
'''
assert entity['age'] == 39
assert entity['sex'] == 'male'
assert entity['married'] == True
assert entity['deceased'] == False
assert not "optional" in entity
assert entity['ratio'] == 3.1
assert entity['evenratio'] == 3.0
assert entity['large'] == 933311100
assert entity['Birthday'] == datetime(1973, 10, 4, tzinfo=tzutc())
assert entity['birthday'] == datetime(1970, 10, 4, tzinfo=tzutc())
assert entity['binary'].value == b'binary'
assert entity['other'] == 20
assert entity['clsid'] == uuid.UUID('c9da6455-213d-42c9-9a79-3e9149a57833')
assert '_metadata' in entity
def _assert_updated_entity(self, entity):
'''
Asserts that the entity passed in matches the updated entity.
'''
assert entity.age == 'abc'
assert entity.sex == 'female'
assert not hasattr(entity, "married")
assert not hasattr(entity, "deceased")
assert entity.sign == 'aquarius'
assert not hasattr(entity, "optional")
assert not hasattr(entity, "ratio")
assert not hasattr(entity, "evenratio")
assert not hasattr(entity, "large")
assert not hasattr(entity, "Birthday")
assert entity.birthday, datetime(1991, 10, 4, tzinfo=tzutc())
assert not hasattr(entity, "other")
assert not hasattr(entity, "clsid")
assert entity['_metadata']['etag'] is not None
def _assert_valid_batch_transaction(self, transaction, length):
assert length == len(transaction)
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_batch_single_insert(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Act
entity = TableEntity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert'
entity.test = EntityProperty(True)
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(1234567890)
entity.test5 = datetime.utcnow()
batch = [('create', entity)]
transaction_result = self.table.submit_transaction(batch)
# Assert
self._assert_valid_batch_transaction(transaction_result, 1)
assert 'etag' in transaction_result[0]
e = self.table.get_entity(row_key=entity.RowKey, partition_key=entity.PartitionKey)
assert e.test == entity.test.value
assert e.test2 == entity.test2
assert e.test3 == entity.test3
assert e.test4 == entity.test4.value
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_batch_single_update(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Act
entity = TableEntity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert'
entity.test = EntityProperty(True)
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(1234567890)
entity.test5 = datetime.utcnow()
resp = self.table.create_entity(entity)
assert resp is not None
entity.test3 = 5
entity.test5 = datetime.utcnow()
batch = [('update', entity, {'mode':UpdateMode.MERGE})]
transaction_result = self.table.submit_transaction(batch)
# Assert
self._assert_valid_batch_transaction(transaction_result, 1)
assert 'etag' in transaction_result[0]
result = self.table.get_entity(row_key=entity.RowKey, partition_key=entity.PartitionKey)
assert result.PartitionKey == u'001'
assert result.RowKey == u'batch_insert'
assert result.test3 == 5
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_batch_update(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Act
entity = TableEntity()
entity.PartitionKey = u'001'
entity.RowKey = u'batch_update'
entity.test = EntityProperty(True)
entity.test2 = u'value'
entity.test3 = 3
entity.test4 = EntityProperty(1234567890)
entity.test5 = datetime.utcnow()
self.table.create_entity(entity)
entity = self.table.get_entity(u'001', u'batch_update')
assert 3 == entity.test3
entity.test2 = u'value1'
batch = [('update', entity)]
transaction_result = self.table.submit_transaction(batch)
# Assert
self._assert_valid_batch_transaction(transaction_result, 1)
assert 'etag' in transaction_result[0]
result = self.table.get_entity('001', 'batch_update')
assert 'value1' == result.test2
assert entity.PartitionKey == u'001'
assert entity.RowKey == u'batch_update'
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_batch_merge(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Act
entity = TableEntity()
entity.PartitionKey = u'001'
entity.RowKey = u'batch_merge'
entity.test = EntityProperty(True)
entity.test2 = u'value'
entity.test3 = 3
entity.test4 = EntityProperty(1234567890)
entity.test5 = datetime.utcnow()
self.table.create_entity(entity)
resp_entity = self.table.get_entity(partition_key=u'001', row_key=u'batch_merge')
assert 3 == entity.test3
entity = TableEntity()
entity.PartitionKey = u'001'
entity.RowKey = u'batch_merge'
entity.test2 = u'value1'
batch = [('update', entity, {'mode': UpdateMode.MERGE})]
transaction_result = self.table.submit_transaction(batch)
# Assert
self._assert_valid_batch_transaction(transaction_result, 1)
assert 'etag' in transaction_result[0]
resp_entity = self.table.get_entity(partition_key=u'001', row_key=u'batch_merge')
assert entity.test2 == resp_entity.test2
assert 1234567890 == resp_entity.test4
assert entity.PartitionKey == resp_entity.PartitionKey
assert entity.RowKey == resp_entity.RowKey
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_batch_update_if_match(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_entity_dict()
resp = self.table.create_entity(entity=entity)
etag = resp['etag']
# Act
sent_entity = self._create_updated_entity_dict(entity['PartitionKey'], entity['RowKey'])
batch = [(
'update',
sent_entity,
{'etag': etag, 'match_condition':MatchConditions.IfNotModified, 'mode':UpdateMode.REPLACE}
)]
transaction_result = self.table.submit_transaction(batch)
# Assert
self._assert_valid_batch_transaction(transaction_result, 1)
assert 'etag' in transaction_result[0]
entity = self.table.get_entity(partition_key=entity['PartitionKey'], row_key=entity['RowKey'])
self._assert_updated_entity(entity)
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_batch_update_if_doesnt_match(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_entity_dict()
self.table.create_entity(entity)
# Act
sent_entity1 = self._create_updated_entity_dict(entity['PartitionKey'], entity['RowKey'])
batch = [(
'update',
sent_entity1,
{'etag': u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"', 'match_condition':MatchConditions.IfNotModified}
)]
with pytest.raises(TableTransactionError):
self.table.submit_transaction(batch)
# Assert
received_entity = self.table.get_entity(entity['PartitionKey'], entity['RowKey'])
self._assert_default_entity(received_entity)
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_batch_single_op_if_doesnt_match(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Act
entity = TableEntity()
entity.PartitionKey = 'batch_inserts'
entity.test = EntityProperty(True)
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(1234567890)
batch = []
transaction_count = 0
for i in range(10):
entity.RowKey = str(i)
batch.append(('create', entity.copy()))
transaction_count += 1
entity = self._create_random_entity_dict()
self.table.create_entity(entity)
# Act
sent_entity1 = self._create_updated_entity_dict(entity['PartitionKey'], entity['RowKey'])
batch = [(
'update',
sent_entity1,
{'etag':u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"', 'match_condition': MatchConditions.IfNotModified}
)]
with pytest.raises(TableTransactionError):
self.table.submit_transaction(batch)
# Assert
received_entity = self.table.get_entity(entity['PartitionKey'], entity['RowKey'])
self._assert_default_entity(received_entity)
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_batch_insert_replace(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Act
entity = TableEntity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert_replace'
entity.test = True
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(1234567890)
entity.test5 = datetime.utcnow()
batch = [('upsert', entity, {'mode': UpdateMode.REPLACE})]
transaction_result = self.table.submit_transaction(batch)
# Assert
self._assert_valid_batch_transaction(transaction_result, 1)
assert 'etag' in transaction_result[0]
entity = self.table.get_entity('001', 'batch_insert_replace')
assert entity is not None
assert 'value' == entity.test2
assert 1234567890 == entity.test4
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_batch_insert_merge(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Act
entity = TableEntity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert_merge'
entity.test = True
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(1234567890)
entity.test5 = datetime.utcnow()
batch = [('upsert', entity, {'mode': UpdateMode.MERGE})]
transaction_result = self.table.submit_transaction(batch)
# Assert
self._assert_valid_batch_transaction(transaction_result, 1)
assert 'etag' in transaction_result[0]
entity = self.table.get_entity('001', 'batch_insert_merge')
assert entity is not None
assert 'value' == entity.test2
assert 1234567890 == entity.test4
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_batch_delete(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Act
entity = TableEntity()
entity.PartitionKey = u'001'
entity.RowKey = u'batch_delete'
entity.test = EntityProperty(True)
entity.test2 = u'value'
entity.test3 = 3
entity.test4 = EntityProperty(1234567890)
entity.test5 = datetime.utcnow()
self.table.create_entity(entity)
entity = self.table.get_entity(partition_key=u'001', row_key=u'batch_delete')
assert 3 == entity.test3
batch = [('delete', entity)]
transaction_result = self.table.submit_transaction(batch)
# Assert
self._assert_valid_batch_transaction(transaction_result, 1)
assert 'etag' not in transaction_result[0]
with pytest.raises(ResourceNotFoundError):
entity = self.table.get_entity(partition_key=entity.PartitionKey, row_key=entity.RowKey)
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_batch_inserts(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Act
entity = TableEntity()
entity.PartitionKey = 'batch_inserts'
entity.test = EntityProperty(True)
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(1234567890)
transaction_count = 0
batch = []
for i in range(100):
entity.RowKey = str(i)
batch.append(('create', entity.copy()))
transaction_count += 1
transaction_result = self.table.submit_transaction(batch)
# Assert
self._assert_valid_batch_transaction(transaction_result, transaction_count)
assert 'etag' in transaction_result[0]
entities = list(self.table.query_entities("PartitionKey eq 'batch_inserts'"))
# Assert
assert entities is not None
assert transaction_count == len(entities)
e = self.table.get_entity('batch_inserts', '1')
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_batch_all_operations_together(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Act
entity = TableEntity()
entity.PartitionKey = '003'
entity.RowKey = 'batch_all_operations_together-1'
entity.test = EntityProperty(True)
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(1234567890)
entity.test5 = datetime.utcnow()
self.table.create_entity(entity)
entity.RowKey = 'batch_all_operations_together-2'
self.table.create_entity(entity)
entity.RowKey = 'batch_all_operations_together-3'
self.table.create_entity(entity)
entity.RowKey = 'batch_all_operations_together-4'
self.table.create_entity(entity)
transaction_count = 0
batch = []
entity.RowKey = 'batch_all_operations_together'
batch.append((TransactionOperation.CREATE, entity.copy()))
transaction_count += 1
entity.RowKey = 'batch_all_operations_together-1'
batch.append((TransactionOperation.DELETE, entity.copy()))
transaction_count += 1
entity.RowKey = 'batch_all_operations_together-2'
entity.test3 = 10
batch.append((TransactionOperation.UPDATE, entity.copy()))
transaction_count += 1
entity.RowKey = 'batch_all_operations_together-3'
entity.test3 = 100
batch.append((TransactionOperation.UPDATE, entity.copy(), {'mode': UpdateMode.REPLACE}))
transaction_count += 1
entity.RowKey = 'batch_all_operations_together-4'
entity.test3 = 10
batch.append((TransactionOperation.UPSERT, entity.copy()))
transaction_count += 1
entity.RowKey = 'batch_all_operations_together-5'
batch.append((TransactionOperation.UPSERT, entity.copy(), {'mode': UpdateMode.REPLACE}))
transaction_count += 1
transaction_result = self.table.submit_transaction(batch)
# Assert
self._assert_valid_batch_transaction(transaction_result, transaction_count)
assert 'etag' in transaction_result[0]
assert 'etag' not in transaction_result[1]
assert 'etag' in transaction_result[2]
assert 'etag' in transaction_result[3]
assert 'etag' in transaction_result[4]
assert 'etag' in transaction_result[5]
# Assert
entities = list(self.table.query_entities("PartitionKey eq '003'"))
assert 5 == len(entities)
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_batch_reuse(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
table2 = self._get_table_reference('table2')
table2.create_table()
# Act
entity = TableEntity()
entity.PartitionKey = '003'
entity.RowKey = 'batch_all_operations_together-1'
entity.test = EntityProperty(True)
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(1234567890)
entity.test5 = datetime.utcnow()
batch = []
batch.append(('upsert', entity.copy()))
entity.RowKey = 'batch_all_operations_together-2'
batch.append(('upsert', entity.copy()))
entity.RowKey = 'batch_all_operations_together-3'
batch.append(('upsert', entity.copy()))
entity.RowKey = 'batch_all_operations_together-4'
batch.append(('upsert', entity.copy()))
resp1 = self.table.submit_transaction(batch)
resp2 = table2.submit_transaction(batch)
entities = list(self.table.query_entities("PartitionKey eq '003'"))
assert 4 == len(entities)
table2 = list(table2.query_entities("PartitionKey eq '003'"))
assert 4 == len(entities)
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_batch_same_row_operations_fail(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_entity_dict('001', 'batch_negative_1')
self.table.create_entity(entity)
# Act
batch = []
entity = self._create_updated_entity_dict(
'001', 'batch_negative_1')
batch.append(('update', entity.copy()))
entity = self._create_random_entity_dict(
'001', 'batch_negative_1')
batch.append(('update', entity.copy(), {'mode': UpdateMode.REPLACE}))
# Assert
with pytest.raises(TableTransactionError):
self.table.submit_transaction(batch)
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_batch_different_partition_operations_fail(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_entity_dict('001', 'batch_negative_1')
self.table.create_entity(entity)
# Act
batch = []
entity = self._create_updated_entity_dict(
'001', 'batch_negative_1')
batch.append(('update', entity.copy()))
entity = self._create_random_entity_dict(
'002', 'batch_negative_1')
batch.append(('update', entity.copy()))
with pytest.raises(ValueError):
self.table.submit_transaction(batch)
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_batch_too_many_ops(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_entity_dict('001', 'batch_negative_1')
self.table.create_entity(entity)
# Act
with pytest.raises(TableTransactionError):
batch = []
for i in range(0, 101):
entity = TableEntity()
entity.PartitionKey = 'large'
entity.RowKey = 'item{0}'.format(i)
batch.append(('create', entity.copy()))
self.table.submit_transaction(batch)
# Assert
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_batch_different_partition_keys(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_entity_dict('001', 'batch_negative_1')
entity2 = self._create_random_entity_dict('002', 'batch_negative_1')
batch = [('create', entity), ('create', entity2)]
with pytest.raises(ValueError):
self.table.submit_transaction(batch)
# Assert
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_new_non_existent_table(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_entity_dict('001', 'batch_negative_1')
tc = self.ts.get_table_client("doesntexist")
batch = [('create', entity)]
with pytest.raises(TableTransactionError):
resp = tc.submit_transaction(batch)
# Assert
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_new_invalid_key(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
invalid_key = tables_primary_storage_account_key[0:-6] + "==" # cut off a bit from the end to invalidate
self.ts = TableServiceClient(self.account_url(tables_storage_account_name, "table"), invalid_key)
self.table_name = self.get_resource_name('uttable')
self.table = self.ts.get_table_client(self.table_name)
entity = self._create_random_entity_dict('001', 'batch_negative_1')
batch = [('create', entity)]
with pytest.raises(ClientAuthenticationError):
resp = self.table.submit_transaction(batch)
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@TablesPreparer()
def test_new_delete_nonexistent_entity(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_entity_dict('001', 'batch_negative_1')
batch = [('delete', entity)]
with pytest.raises(TableTransactionError):
resp = self.table.submit_transaction(batch)
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@pytest.mark.live_test_only
@TablesPreparer()
def test_batch_sas_auth(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
token = generate_table_sas(
tables_storage_account_name,
tables_primary_storage_account_key,
self.table_name,
permission=TableSasPermissions(add=True, read=True, update=True, delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
start=datetime.utcnow() - timedelta(minutes=1),
)
token = AzureSasCredential(token)
# Act
service = TableServiceClient(
self.account_url(tables_storage_account_name, "table"),
credential=token,
)
table = service.get_table_client(self.table_name)
entity = TableEntity()
entity.PartitionKey = 'batch_inserts'
entity.test = EntityProperty(True)
entity.test2 = 'value'
entity.test3 = 3
entity.test4 = EntityProperty(1234567890)
batch = []
transaction_count = 0
for i in range(10):
entity.RowKey = str(i)
batch.append(('create', entity.copy()))
transaction_count += 1
transaction_result = table.submit_transaction(batch)
assert transaction_result
total_entities = 0
for e in table.list_entities():
total_entities += 1
assert total_entities == transaction_count
finally:
self._tear_down()
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires Python3")
@pytest.mark.live_test_only # Request bodies are very large
@TablesPreparer()
def test_batch_request_too_large(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
batch = []
entity = {
'PartitionKey': 'pk001',
'Foo': os.urandom(1024*64),
'Bar': os.urandom(1024*64),
'Baz': os.urandom(1024*64)
}
for i in range(50):
entity['RowKey'] = str(i)
batch.append(('create', entity.copy()))
with pytest.raises(RequestTooLargeError):
self.table.submit_transaction(batch)
finally:
self._tear_down()
class TestTableUnitTest(TableTestCase):
#--Test cases for batch ---------------------------------------------
def test_inferred_types(self):
# Arrange
# Act
entity = TableEntity()
entity.PartitionKey = '003'
entity.RowKey = 'batch_all_operations_together-1'
entity.test = EntityProperty(True)
entity.test2 = EntityProperty(b'abcdef')
entity.test3 = EntityProperty(u'c9da6455-213d-42c9-9a79-3e9149a57833')
entity.test4 = EntityProperty(datetime(1973, 10, 4, tzinfo=tzutc()))
entity.test5 = EntityProperty(u"stringystring")
entity.test6 = EntityProperty(3.14159)
entity.test7 = EntityProperty(100)
entity.test8 = EntityProperty(2 ** 33, EdmType.INT64)
# Assert
assert entity.test.type == EdmType.BOOLEAN
assert entity.test2.type == EdmType.BINARY
assert entity.test3.type == EdmType.GUID
assert entity.test4.type == EdmType.DATETIME
assert entity.test5.type == EdmType.STRING
assert entity.test6.type == EdmType.DOUBLE
assert entity.test7.type == EdmType.INT32
assert entity.test8.type == EdmType.INT64
| 39.382979
| 128
| 0.610651
|
a956ddca2b5e0f5150988278a5308b837fb53518
| 426
|
py
|
Python
|
module_06/src/locators/checkout.py
|
memsprop/2021_python_selenium
|
eb3de282b11933cb6886d4008fb1b557147afaae
|
[
"Unlicense"
] | null | null | null |
module_06/src/locators/checkout.py
|
memsprop/2021_python_selenium
|
eb3de282b11933cb6886d4008fb1b557147afaae
|
[
"Unlicense"
] | null | null | null |
module_06/src/locators/checkout.py
|
memsprop/2021_python_selenium
|
eb3de282b11933cb6886d4008fb1b557147afaae
|
[
"Unlicense"
] | null | null | null |
"""Locators for Your Cart items"""
from selenium.webdriver.common.by import By
class CartLoc:
"""Cart item locators.
Locators are relative to parent container div.
"""
CART_LST = (By.CLASS_NAME, 'cart_list')
CHECKOUT_BTN = (By.XPATH, "//*[@class='cart_footer']/a[contains(@class,'btn_action')]")
CONTINUE_SHOPPING_BTN = (By.XPATH, "//*[@class='cart_footer']/a[contains(@class,'btn_secondary')]")
| 26.625
| 103
| 0.673709
|
3bfe17d4dc49d2a3d5767107c06da5f3d692c8c7
| 4,464
|
py
|
Python
|
ddd20/prepare_dataset.py
|
yeshwanthv5/federated-learning
|
313477a8a05d1fc8d43051ea7f09c3f61865807b
|
[
"MIT"
] | null | null | null |
ddd20/prepare_dataset.py
|
yeshwanthv5/federated-learning
|
313477a8a05d1fc8d43051ea7f09c3f61865807b
|
[
"MIT"
] | null | null | null |
ddd20/prepare_dataset.py
|
yeshwanthv5/federated-learning
|
313477a8a05d1fc8d43051ea7f09c3f61865807b
|
[
"MIT"
] | null | null | null |
import os
import itertools
import pdb
import argparse
import time
def write_sbatch_conf(f, exp_name = "default", grace_partition = "pi_panda", dir_name = "./"):
f.write('#!/bin/bash\n')
f.write('#SBATCH --job-name=' + exp_name + '\n')
f.write('#SBATCH --ntasks=1 --nodes=1\n')
f.write('#SBATCH --partition=' + grace_partition + '\n')
f.write('#SBATCH --mem=64G\n')
f.write('#SBATCH --cpus-per-task=8\n')
f.write('#SBATCH --time=24:00:00\n')
f.write('#SBATCH --output=' + dir_name + exp_name + '.log\n')
f.write('module load miniconda\n')
f.write('conda activate ddd20\n')
def generate_script(file_name, exp_name = "default", grace_partition = "pi_panda", origin_dir = "data/fordfocus", out_dir="out_dir", to_do_file = "jul16/rec1500220388.hdf5"):
in_full_file_prefix = origin_dir + "/" + os.path.splitext(to_do_file)[0]
base_id = os.path.basename(in_full_file_prefix)
out_full_file_prefix = out_dir + "/" + base_id
f = open(file_name, 'w', buffering = 1)
write_sbatch_conf(f, exp_name, grace_partition, out_dir + "/logs/")
s = 'echo "Working on ' + out_full_file_prefix + '"\n'
f.write(s)
s = "ipython ./export.py -- " + in_full_file_prefix + ".hdf5 --binsize 0.05 --export_aps 1 --export_dvs 0 --out_file " + out_full_file_prefix + "_aps_frames.hdf5\n"
f.write(s)
# s = "ipython ./export.py -- " + in_full_file_prefix + ".hdf5 --binsize 0.01 --export_aps 0 --export_dvs 1 --out_file " + out_full_file_prefix + "_with_timesteps.hdf5 --split_timesteps --timesteps 5\n"
# f.write(s)
s = "ipython ./export.py -- " + in_full_file_prefix + ".hdf5 --binsize 0.05 --export_aps 0 --export_dvs 1 --out_file " + out_full_file_prefix + "_dvs_accum_frames.hdf5\n"
f.write(s)
# Prepare and resize
# ------------ Prepare APS -------------#
s = "ipython ./prepare_cnn_data.py -- --filename " + out_full_file_prefix + "_aps_frames.hdf5 --rewrite 1 --skip_mean_std 0\n"
f.write(s)
# ----------- Prepare timestep split DVS ------- #
# s = "ipython ./prepare_cnn_data.py -- --filename " + out_full_file_prefix + "_with_timesteps.hdf5 --rewrite 1 --skip_mean_std 1 --split_timesteps --timesteps 5"
# ----------- Prepare accumulated DVS ----------- #
# f.write(s)
s = "ipython ./prepare_cnn_data.py -- --filename " + out_full_file_prefix + "_dvs_accum_frames.hdf5 --rewrite 1 --skip_mean_std 0\n"
f.write(s)
f.close()
def main():
# Constants
grace_partition = "bigmem"
origin_dir = "data/fordfocus/"
bin_size = "10ms"
result_dir = "processed_dataset_run2/"
day_files = ['jul16/rec1500220388.hdf5', 'jul18/rec1500383971.hdf5', 'jul18/rec1500402142.hdf5', 'jul28/rec1501288723.hdf5', 'jul29/rec1501349894.hdf5', 'aug01/rec1501614399.hdf5', 'aug08/rec1502241196.hdf5', 'aug15/rec1502825681.hdf5', 'jul02/rec1499023756.hdf5', 'jul05/rec1499275182.hdf5', 'jul08/rec1499533882.hdf5', 'jul16/rec1500215505.hdf5', 'jul17/rec1500314184.hdf5', 'jul17/rec1500329649.hdf5', 'aug05/rec1501953155.hdf5']
night_files = ['jul09/rec1499656391.hdf5', 'jul09/rec1499657850.hdf5', 'aug01/rec1501649676.hdf5', 'aug01/rec1501650719.hdf5', 'aug05/rec1501994881.hdf5', 'aug09/rec1502336427.hdf5', 'aug09/rec1502337436.hdf5', 'jul01/rec1498946027.hdf5', 'aug01/rec1501651162.hdf5', 'jul02/rec1499025222.hdf5', 'aug09/rec1502338023.hdf5', 'aug09/rec1502338983.hdf5', 'aug09/rec1502339743.hdf5', 'jul01/rec1498949617.hdf5', 'aug12/rec1502599151.hdf5']
out_dir = result_dir + "day"
for to_do_file in day_files:
file_name = "rog_scipt" + str(time.time()) + ".sh"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
os.makedirs(out_dir+"/logs")
exp_name = "ddd_preprocessing_" + os.path.splitext(os.path.basename(to_do_file))[0]
generate_script(file_name, exp_name, grace_partition, origin_dir, out_dir, to_do_file)
os.system("sbatch " + file_name)
out_dir = result_dir + "night"
for to_do_file in night_files:
file_name = "rog_scipt" + str(time.time()) + ".sh"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
os.makedirs(out_dir+"/logs")
exp_name = "ddd_preprocessing_" + os.path.splitext(os.path.basename(to_do_file))[0]
generate_script(file_name, exp_name, grace_partition, origin_dir, out_dir, to_do_file)
os.system("sbatch " + file_name)
if __name__ == "__main__":
main()
| 54.439024
| 438
| 0.667787
|
21c8aabf4152a1b3b883dff0c5038d4b2babbb93
| 8,669
|
py
|
Python
|
tiny_face_eval.py
|
atom06/Tiny_Faces_in_Tensorflow_v2
|
18251ba7e2bc6cbe70428fff92ac31f18e73cb4b
|
[
"MIT"
] | 1
|
2021-12-14T18:44:08.000Z
|
2021-12-14T18:44:08.000Z
|
tiny_face_eval.py
|
atom06/Tiny_Faces_in_Tensorflow_v2
|
18251ba7e2bc6cbe70428fff92ac31f18e73cb4b
|
[
"MIT"
] | 1
|
2021-12-14T20:48:21.000Z
|
2021-12-14T20:48:21.000Z
|
tiny_face_eval.py
|
abtGIT/Tiny_Faces_in_Tensorflow_v2
|
18251ba7e2bc6cbe70428fff92ac31f18e73cb4b
|
[
"MIT"
] | 1
|
2021-05-12T16:18:58.000Z
|
2021-05-12T16:18:58.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tiny_face_model
import util
from argparse import ArgumentParser
import cv2
import scipy.io
import numpy as np
import matplotlib.pyplot as plt
import cv2
import pickle
import pylab as pl
import time
import os
import sys
from scipy.special import expit
import glob
MAX_INPUT_DIM = 5000.0
def overlay_bounding_boxes(raw_img, refined_bboxes, lw):
"""Overlay bounding boxes of face on images.
Args:
raw_img:
A target image.
refined_bboxes:
Bounding boxes of detected faces.
lw:
Line width of bounding boxes. If zero specified,
this is determined based on confidence of each detection.
Returns:
None.
"""
# Overlay bounding boxes on an image with the color based on the confidence.
for r in refined_bboxes:
_score = expit(r[4])
cm_idx = int(np.ceil(_score * 255))
rect_color = [int(np.ceil(x * 255)) for x in util.cm_data[cm_idx]] # parula
_lw = lw
if lw == 0: # line width of each bounding box is adaptively determined.
bw, bh = r[2] - r[0] + 1, r[3] - r[0] + 1
_lw = 1 if min(bw, bh) <= 20 else max(2, min(3, min(bh / 20, bw / 20)))
_lw = int(np.ceil(_lw * _score))
_r = [int(x) for x in r[:4]]
cv2.rectangle(raw_img, (_r[0], _r[1]), (_r[2], _r[3]), rect_color, _lw)
def evaluate(weight_file_path, data_dir, output_dir, prob_thresh=0.5, nms_thresh=0.1, lw=3, display=False):
"""Detect faces in images.
Args:
prob_thresh:
The threshold of detection confidence.
nms_thresh:
The overlap threshold of non maximum suppression
weight_file_path:
A pretrained weight file in the pickle format
generated by matconvnet_hr101_to_tf.py.
data_dir:
A directory which contains images.
output_dir:
A directory into which images with detected faces are output.
lw:
Line width of bounding boxes. If zero specified,
this is determined based on confidence of each detection.
display:
Display tiny face images on window.
Returns:
None.
"""
# placeholder of input images. Currently batch size of one is supported.
x = tf.compat.v1.placeholder(tf.float32, [1, None, None, 3]) # n, h, w, c
# Create the tiny face model which weights are loaded from a pretrained model.
model = tiny_face_model.Model(weight_file_path)
score_final = model.tiny_face(x)
# Find image files in data_dir.
filenames = []
for ext in ('*.png', '*.gif', '*.jpg', '*.jpeg'):
filenames.extend(glob.glob(os.path.join(data_dir, ext)))
# Load an average image and clusters(reference boxes of templates).
with open(weight_file_path, "rb") as f:
_, mat_params_dict = pickle.load(f)
average_image = model.get_data_by_key("average_image")
clusters = model.get_data_by_key("clusters")
clusters_h = clusters[:, 3] - clusters[:, 1] + 1
clusters_w = clusters[:, 2] - clusters[:, 0] + 1
normal_idx = np.where(clusters[:, 4] == 1)
# main
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
for filename in filenames:
fname = filename.split(os.sep)[-1]
raw_img = cv2.imread(filename)
raw_img = cv2.cvtColor(raw_img, cv2.COLOR_BGR2RGB)
raw_img_f = raw_img.astype(np.float32)
def _calc_scales():
raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]
min_scale = min(np.floor(np.log2(np.max(clusters_w[normal_idx] / raw_w))),
np.floor(np.log2(np.max(clusters_h[normal_idx] / raw_h))))
max_scale = min(1.0, -np.log2(max(raw_h, raw_w) / MAX_INPUT_DIM))
scales_down = np.arange(min_scale, 0, 1.)
scales_up = np.arange(0.5, max_scale, 0.5)
scales_pow = np.hstack((scales_down, scales_up))
scales = np.power(2.0, scales_pow)
return scales
scales = _calc_scales()
start = time.time()
# initialize output
bboxes = np.empty(shape=(0, 5))
# process input at different scales
for s in scales:
print("Processing {} at scale {:.4f}".format(fname, s))
img = cv2.resize(raw_img_f, (0, 0), fx=s, fy=s, interpolation=cv2.INTER_LINEAR)
img = img - average_image
img = img[np.newaxis, :]
# we don't run every template on every scale ids of templates to ignore
tids = list(range(4, 12)) + ([] if s <= 1.0 else list(range(18, 25)))
ignoredTids = list(set(range(0, clusters.shape[0])) - set(tids))
# run through the net
score_final_tf = sess.run(score_final, feed_dict={x: img})
# collect scores
score_cls_tf, score_reg_tf = score_final_tf[:, :, :, :25], score_final_tf[:, :, :, 25:125]
prob_cls_tf = expit(score_cls_tf)
prob_cls_tf[0, :, :, ignoredTids] = 0.0
def _calc_bounding_boxes():
# threshold for detection
_, fy, fx, fc = np.where(prob_cls_tf > prob_thresh)
# interpret heatmap into bounding boxes
cy = fy * 8 - 1
cx = fx * 8 - 1
ch = clusters[fc, 3] - clusters[fc, 1] + 1
cw = clusters[fc, 2] - clusters[fc, 0] + 1
# extract bounding box refinement
Nt = clusters.shape[0]
tx = score_reg_tf[0, :, :, 0:Nt]
ty = score_reg_tf[0, :, :, Nt:2*Nt]
tw = score_reg_tf[0, :, :, 2*Nt:3*Nt]
th = score_reg_tf[0, :, :, 3*Nt:4*Nt]
# refine bounding boxes
dcx = cw * tx[fy, fx, fc]
dcy = ch * ty[fy, fx, fc]
rcx = cx + dcx
rcy = cy + dcy
rcw = cw * np.exp(tw[fy, fx, fc])
rch = ch * np.exp(th[fy, fx, fc])
scores = score_cls_tf[0, fy, fx, fc]
tmp_bboxes = np.vstack((rcx - rcw / 2, rcy - rch / 2, rcx + rcw / 2, rcy + rch / 2))
tmp_bboxes = np.vstack((tmp_bboxes / s, scores))
tmp_bboxes = tmp_bboxes.transpose()
return tmp_bboxes
tmp_bboxes = _calc_bounding_boxes()
bboxes = np.vstack((bboxes, tmp_bboxes)) # <class 'tuple'>: (5265, 5)
print("time {:.2f} secs for {}".format(time.time() - start, fname))
# non maximum suppression
# refind_idx = util.nms(bboxes, nms_thresh)
refind_idx = tf.image.non_max_suppression(tf.convert_to_tensor(value=bboxes[:, :4], dtype=tf.float32),
tf.convert_to_tensor(value=bboxes[:, 4], dtype=tf.float32),
max_output_size=bboxes.shape[0], iou_threshold=nms_thresh)
refind_idx = sess.run(refind_idx)
refined_bboxes = bboxes[refind_idx]
overlay_bounding_boxes(raw_img, refined_bboxes, lw)
if display:
# plt.axis('off')
plt.imshow(raw_img)
plt.show()
# save image with bounding boxes
raw_img = cv2.cvtColor(raw_img, cv2.COLOR_RGB2BGR)
cv2.imwrite(os.path.join(output_dir, fname), raw_img)
def main():
argparse = ArgumentParser()
argparse.add_argument('--weight_file_path', type=str, help='Pretrained weight file.', default="/path/to/mat2tf.pkl")
argparse.add_argument('--data_dir', type=str, help='Image data directory.', default="/path/to/input_image_directory")
argparse.add_argument('--output_dir', type=str, help='Output directory for images with faces detected.', default="/path/to/output_directory")
argparse.add_argument('--prob_thresh', type=float, help='The threshold of detection confidence(default: 0.5).', default=0.5)
argparse.add_argument('--nms_thresh', type=float, help='The overlap threshold of non maximum suppression(default: 0.1).', default=0.1)
argparse.add_argument('--line_width', type=int, help='Line width of bounding boxes(0: auto).', default=3)
argparse.add_argument('--display', type=bool, help='Display each image on window.', default=False)
args = argparse.parse_args()
# check arguments
assert os.path.exists(args.weight_file_path), "weight file: " + args.weight_file_path + " not found."
assert os.path.exists(args.data_dir), "data directory: " + args.data_dir + " not found."
assert os.path.exists(args.output_dir), "output directory: " + args.output_dir + " not found."
assert args.line_width >= 0, "line_width should be >= 0."
with tf.Graph().as_default():
evaluate(
weight_file_path=args.weight_file_path, data_dir=args.data_dir, output_dir=args.output_dir,
prob_thresh=args.prob_thresh, nms_thresh=args.nms_thresh,
lw=args.line_width, display=args.display)
if __name__ == '__main__':
main()
| 38.02193
| 143
| 0.638828
|
82f43e468c0b1d8d58bc13ef1b28c40348d183a6
| 4,217
|
py
|
Python
|
webscraper/finpi.py
|
rodrigolessa/demo.scraping.trademark
|
ab9c55262ea740434885c5efe5066915f7987320
|
[
"Apache-2.0"
] | null | null | null |
webscraper/finpi.py
|
rodrigolessa/demo.scraping.trademark
|
ab9c55262ea740434885c5efe5066915f7987320
|
[
"Apache-2.0"
] | null | null | null |
webscraper/finpi.py
|
rodrigolessa/demo.scraping.trademark
|
ab9c55262ea740434885c5efe5066915f7987320
|
[
"Apache-2.0"
] | null | null | null |
# Sample:
# create a concatenated string from 0 to 19 (e.g. "012..1819")
# nums = [str(n) for n in range(20)]
# print "".join(nums)
# much more efficient then: nums += str(n)
# Best
# nums = map(str, range(20))
# print "".join(nums)
# import the necessary packages
from requests import exceptions
import argparse
import requests
#import cv2
import os
# Construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-q", "--query", required=True,
help="search query to search Bing Image API for")
ap.add_argument("-o", "--output", required=True,
help="path to output directory of images")
args = vars(ap.parse_args())
# Microsoft Cognitive Services API key
API_KEY = "447918fee9e8438e85bf0be72b84d915"
API_KEY_2 = "b385a571dad74d45a6a49046b6a462f1"
# Maximum number of results for a given search
MAX_RESULTS = 500
# Group size for results
GROUP_SIZE = 250
#https://gru.inpi.gov.br/pePI/servlet/LoginController?action=login
#https://gru.inpi.gov.br/pePI/jsp/marcas/Pesquisa_classe_basica.jsp
#https://gru.inpi.gov.br/pePI/jsp/marcas/Pesquisa_num_processo.jsp
#JSESSIONID
#BA8335BFAFA605D747B08CD5CCEBDC98
#Request URL: https://gru.inpi.gov.br/pePI/servlet/MarcasServletController
#Request Method: POST
# set the endpoint API URL
URL = "https://api.cognitive.microsoft.com/bing/v7.0/images/search"
#https://api.cognitive.microsoft.com/bing/v7.0/suggestions
#https://api.cognitive.microsoft.com/bing/v7.0/entities
#https://api.cognitive.microsoft.com/bing/v7.0/images
#https://api.cognitive.microsoft.com/bing/v7.0/news
#https://api.cognitive.microsoft.com/bing/v7.0/spellcheck
#https://api.cognitive.microsoft.com/bing/v7.0/videos
#https://api.cognitive.microsoft.com/bing/v7.0/images/visualsearch
#https://api.cognitive.microsoft.com/bing/v7.0
# When attempting to download images from the web both the Python
# programming language and the requests library have a number of
# exceptions that can be thrown so let's build a list of them now
# so we can filter on them
EXCEPTIONS = set([IOError, FileNotFoundError,
exceptions.RequestException, exceptions.HTTPError,
exceptions.ConnectionError, exceptions.Timeout])
# Search term
term = args["query"]
# Headers and
headers = {"Ocp-Apim-Subscription-Key" : API_KEY}
# search parameters
params = {"q": term, "offset": 0, "count": GROUP_SIZE}
# Debug
print("Searching API for '{}'".format(term))
# The search
search = requests.get(URL, headers=headers, params=params)
search.raise_for_status()
# Grab the results from the search,
results = search.json()
# including the total number of estimated results returned by the API
estNumResults = min(results["totalEstimatedMatches"], MAX_RESULTS)
# Debug
print("{} results for '{}'".format(estNumResults, term))
# initialize the total number of images downloaded thus far
total = 0
# Loop over the estimated number of results in `GROUP_SIZE` groups
for offset in range(0, estNumResults, GROUP_SIZE):
# Update the search parameters using the current offset
print("Making request for group {}-{} of {}...".format(offset, offset + GROUP_SIZE, estNumResults))
params["offset"] = offset
# The search
search = requests.get(URL, headers=headers, params=params)
search.raise_for_status()
results = search.json()
print("Saving images for group {}-{} of {}...".format(offset, offset + GROUP_SIZE, estNumResults))
# Loop over the results
for v in results["value"]:
# Try to download the image
try:
# Make a request to download
print(" - fetching: {}".format(v["contentUrl"]))
r = requests.get(v["contentUrl"], timeout=30)
# Build the path to the output image
ext = v["contentUrl"][v["contentUrl"].rfind("."):]
p = os.path.sep.join([args["output"], "{}{}".format(str(total).zfill(8), ext)])
# Write the image
f = open(p, "wb")
f.write(r.content)
f.close()
# Catch any errors
except Exception as e:
# check to see if our exception is in the list
if type(e) in EXCEPTIONS:
print(" - skipping: {}".format(v["contentUrl"]))
continue
# TODO: Try to load the image from disk
# TODO: If the image is `None` then we could not properly load
# TODO: Remove the image
# Update counter
total += 1
| 30.781022
| 100
| 0.727531
|
961b3bd01ec3f27385e63694e05c77b8c5d5828a
| 3,347
|
py
|
Python
|
mtgelo/scraper/database.py
|
shawntabrizi/mtgelo
|
84a6c08413b427c676846df0f0ac0fdafbbf40a8
|
[
"MIT"
] | null | null | null |
mtgelo/scraper/database.py
|
shawntabrizi/mtgelo
|
84a6c08413b427c676846df0f0ac0fdafbbf40a8
|
[
"MIT"
] | null | null | null |
mtgelo/scraper/database.py
|
shawntabrizi/mtgelo
|
84a6c08413b427c676846df0f0ac0fdafbbf40a8
|
[
"MIT"
] | null | null | null |
import sqlite3
import os
from mtgelo.scraper.unicode_parser import *
__file__ = "C:\\Users\shawn\PycharmProjects\mtgelo\mtgelo\scraper\database.py"
def reset_db():
delete_db()
create_db()
def connect_db():
file_dir = os.path.dirname(__file__)
filename = os.path.join(file_dir, '..\db\playerhistory.db')
return sqlite3.connect(filename)
def delete_db():
conn = connect_db()
c = conn.cursor()
c.executescript('drop table if exists playerHistory')
conn.close()
def create_db():
conn = connect_db()
c = conn.cursor()
#c.executescript('drop table if exists playerHistory')
c.executescript('''create table playerHistory(
coverageid num,
eventid num,
roundid num,
rowid num,
eventtype text,
eventname text,
date text,
round text,
matchtable text,
playerfirstname text,
playerlastname text,
playercountry text,
result text,
opponentfirstname text,
opponentlastname text,
opponentcountry text,
won text,
lost text,
drew text,
modified text,
constraint unique_row unique (coverageid,eventid,roundid,rowid)
)''')
conn.close()
def create_playerranking():
conn = connect_db()
c = conn.cursor()
c.executescript('drop table if exists playerRank')
c.executescript('''create table playerRank(
playername text,
rating NUMERIC ,
sigma NUMERIC,
count NUMERIC,
mtgelo NUMERIC,
tsrating NUMERIC
)''')
conn.close()
def playerHistoryToDB(playerHistory):
#print playerHistory
conn = connect_db()
c = conn.cursor()
print("ADDING", playerHistory)
if (len(playerHistory) != 20):
print("BAD ITEM!!!!")
else:
c.execute('insert or replace into playerHistory values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)', playerHistory)
conn.commit()
conn.close()
def playerRankToDB(player_ratings):
# print playerHistory
conn = connect_db()
c = conn.cursor()
print("Adding")
c.executemany('insert into playerRank values (?,?,?,?,?,?)',
player_ratings)
conn.commit()
conn.close()
def dbNormalizeNames():
conn = connect_db()
c = conn.cursor()
c.execute('select * FROM playerHistory')
db = c.fetchall()
strip_db = [[strip_accents(item.lower()) if isinstance(item, str) else item for item in row] for row in db]
c.executemany("""REPLACE into playerHistory VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)""", strip_db)
conn.commit()
conn.close()
| 32.182692
| 121
| 0.480729
|
8e9d5108472245dfce5373c9cd20ee66cdc885bc
| 450
|
py
|
Python
|
data/scripts/templates/object/building/player/city/shared_cantina_naboo.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/building/player/city/shared_cantina_naboo.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/building/player/city/shared_cantina_naboo.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/player/city/shared_cantina_naboo.iff"
result.attribute_template_id = -1
result.stfName("building_name","cantina")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 26.470588
| 73
| 0.726667
|
fbe8e88e1c3ec27075c4c08a3221791d7052bbaf
| 502
|
py
|
Python
|
WebCLI/views/my_algorithms.py
|
quantum-ohtu/WebMark
|
485e30b0339835d342d8f14be95ebcd135f832f0
|
[
"MIT"
] | 1
|
2021-05-22T03:32:04.000Z
|
2021-05-22T03:32:04.000Z
|
WebCLI/views/my_algorithms.py
|
quantum-ohtu/WebMark
|
485e30b0339835d342d8f14be95ebcd135f832f0
|
[
"MIT"
] | 137
|
2021-01-25T10:54:32.000Z
|
2021-10-07T06:42:46.000Z
|
WebCLI/views/my_algorithms.py
|
quantum-ohtu/WebMark
|
485e30b0339835d342d8f14be95ebcd135f832f0
|
[
"MIT"
] | 3
|
2021-05-18T17:36:54.000Z
|
2021-06-04T13:49:48.000Z
|
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from ..models import Algorithm
from .homepage import AlgorithmListView
class MyAlgorithmListView(AlgorithmListView):
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
return super(MyAlgorithmListView, self).get(request, *args, **kwargs)
def get_queryset(self):
return Algorithm.objects.filter(user=self.request.user).order_by("name")
| 33.466667
| 80
| 0.772908
|
9312631b0c4dcd8322a54e15facc14505af833ad
| 7,655
|
py
|
Python
|
platform.py
|
leroyle/nordicnrf52
|
52afc08fadd83154002d9bf22090d56966bfffa9
|
[
"Apache-2.0"
] | null | null | null |
platform.py
|
leroyle/nordicnrf52
|
52afc08fadd83154002d9bf22090d56966bfffa9
|
[
"Apache-2.0"
] | null | null | null |
platform.py
|
leroyle/nordicnrf52
|
52afc08fadd83154002d9bf22090d56966bfffa9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import platform
from platformio.managers.platform import PlatformBase
from platformio.util import get_systype
class Nordicnrf52Platform(PlatformBase):
def is_embedded(self):
return True
def configure_default_packages(self, variables, targets):
upload_protocol = ""
board = variables.get("board")
frameworks = variables.get("pioframework", [])
if board:
upload_protocol = variables.get(
"upload_protocol",
self.board_config(board).get("upload.protocol", ""))
if self.board_config(board).get("build.bsp.name",
"nrf5") == "adafruit":
self.frameworks["arduino"][
"package"] = "framework-arduinoadafruitnrf52"
self.packages["framework-cmsis"]["optional"] = False
self.packages["tool-adafruit-nrfutil"]["optional"] = False
if "mbed" in frameworks:
deprecated_boards_file = os.path.join(
self.get_dir(), "misc", "mbed_deprecated_boards.json")
if os.path.isfile(deprecated_boards_file):
with open(deprecated_boards_file) as fp:
if board in json.load(fp):
self.packages["framework-mbed"]["version"] = "~6.51506.0"
self.packages["toolchain-gccarmnoneeabi"]["version"] = "~1.90201.0"
if "zephyr" in frameworks:
for p in self.packages:
if p in ("tool-cmake", "tool-dtc", "tool-ninja"):
self.packages[p]["optional"] = False
self.packages["toolchain-gccarmnoneeabi"]["version"] = "~1.80201.0"
if "windows" not in get_systype():
self.packages["tool-gperf"]["optional"] = False
if board == "nano33ble":
self.packages["toolchain-gccarmnoneeabi"]["version"] = "~1.80201.0"
self.frameworks["arduino"]["package"] = "framework-arduino-mbed"
self.frameworks["arduino"][
"script"
] = "builder/frameworks/arduino/mbed-core/arduino-core-mbed.py"
if set(["bootloader", "erase"]) & set(targets):
self.packages["tool-nrfjprog"]["optional"] = False
elif (upload_protocol and upload_protocol != "nrfjprog"
and "tool-nrfjprog" in self.packages):
del self.packages["tool-nrfjprog"]
# configure J-LINK tool
jlink_conds = [
"jlink" in variables.get(option, "")
for option in ("upload_protocol", "debug_tool")
]
if board:
board_config = self.board_config(board)
jlink_conds.extend([
"jlink" in board_config.get(key, "")
for key in ("debug.default_tools", "upload.protocol")
])
jlink_pkgname = "tool-jlink"
if not any(jlink_conds) and jlink_pkgname in self.packages:
del self.packages[jlink_pkgname]
return PlatformBase.configure_default_packages(self, variables,
targets)
def get_boards(self, id_=None):
result = PlatformBase.get_boards(self, id_)
if not result:
return result
if id_:
return self._add_default_debug_tools(result)
else:
for key, value in result.items():
result[key] = self._add_default_debug_tools(result[key])
return result
def _add_default_debug_tools(self, board):
debug = board.manifest.get("debug", {})
upload_protocols = board.manifest.get("upload", {}).get(
"protocols", [])
if "tools" not in debug:
debug["tools"] = {}
# J-Link / ST-Link / BlackMagic Probe
for link in ("blackmagic", "jlink", "stlink", "cmsis-dap"):
if link not in upload_protocols or link in debug['tools']:
continue
if link == "blackmagic":
debug["tools"]["blackmagic"] = {
"hwids": [["0x1d50", "0x6018"]],
"require_debug_port": True
}
elif link == "jlink":
assert debug.get("jlink_device"), (
"Missed J-Link Device ID for %s" % board.id)
debug["tools"][link] = {
"server": {
"package": "tool-jlink",
"arguments": [
"-singlerun",
"-if", "SWD",
"-select", "USB",
"-device", debug.get("jlink_device"),
"-port", "2331"
],
"executable": ("JLinkGDBServerCL.exe"
if platform.system() == "Windows" else
"JLinkGDBServer")
}
}
else:
server_args = [
"-s", "$PACKAGE_DIR/scripts",
"-f", "interface/%s.cfg" % link
]
if link == "stlink":
server_args.extend([
"-c",
"transport select hla_swd; set WORKAREASIZE 0x4000"
])
server_args.extend(["-f", "target/nrf52.cfg"])
debug["tools"][link] = {
"server": {
"package": "tool-openocd",
"executable": "bin/openocd",
"arguments": server_args
}
}
server_args.extend(debug.get("openocd_extra_args", []))
debug["tools"][link]["onboard"] = link in debug.get("onboard_tools", [])
debug["tools"][link]["default"] = link in debug.get("default_tools", [])
board.manifest['debug'] = debug
return board
def configure_debug_options(self, initial_debug_options, ide_data):
debug_options = copy.deepcopy(initial_debug_options)
adapter_speed = initial_debug_options.get("speed")
if adapter_speed:
server_options = debug_options.get("server") or {}
server_executable = server_options.get("executable", "").lower()
if "openocd" in server_executable:
debug_options["server"]["arguments"].extend(
["-c", "adapter speed %s" % adapter_speed]
)
elif "jlink" in server_executable:
debug_options["server"]["arguments"].extend(
["-speed", adapter_speed or "4000"]
)
return debug_options
| 41.830601
| 86
| 0.508165
|
57d0d671131dc48672f3fe73cfd6845b0c5e9659
| 3,082
|
py
|
Python
|
Project_1/test_pendulum.py
|
eugene-UiO/IN1910
|
a8dbc78ba36b2881ad4790d05c8a4ae0ad762128
|
[
"MIT"
] | null | null | null |
Project_1/test_pendulum.py
|
eugene-UiO/IN1910
|
a8dbc78ba36b2881ad4790d05c8a4ae0ad762128
|
[
"MIT"
] | 1
|
2021-11-12T12:07:55.000Z
|
2021-11-12T12:07:55.000Z
|
Project_1/test_pendulum.py
|
henrik-uio/IN1910
|
a8dbc78ba36b2881ad4790d05c8a4ae0ad762128
|
[
"MIT"
] | null | null | null |
import nose.tools as nt # Using nosetests
import numpy as np
from pendulum import Pendulum
def test_special_method_call_in_Pendulum_class():
"""Tests that theta and omega is computed correctly."""
# Test values
theta = np.pi/4 # Angular position of the pendulum
omega = 0.1 # Angular velocity of the pendulum
analytic = [0.1, -3.1530534197454685]
eps = 10**(-7)
pendel = Pendulum(L=2.2)
computed = pendel(0,[theta,omega])
assert(abs(computed[0] - analytic[0]) < eps)
assert(abs(computed[1] - analytic[1]) < eps)
def test_special_method_call_in_Pendulum_class_keeps_a_peldelum_at_rest():
"""Tests that the pendulum is kept at rest."""
# Test values
theta0 = 0
omega0 = 0
analytic = [0, 0]
eps = 10**(-7)
pendel = Pendulum()
computed = pendel(0,[theta0,omega0])
assert(abs(computed[0] - analytic[0]) < eps)
assert(abs(computed[1] - analytic[1]) < eps)
@nt.raises(AttributeError)
def test_error_if_solve_method_has_not_been_called():
"""
Test that the solve method has been called. Error raised if attributes dont exist.
"""
pendel = Pendulum()
theta = pendel.theta
omega = pendel.omega
time = pendel.t
def test_only_the_latest_solution_is_stored():
"""Tests that latest solution overwrites previous ones."""
y0_1 = [0, 0]
T_1 = 5
dt_1 = 0.1
y0_2 = [2, 3]
T_2 = 15
dt_2 = 0.01
y0_3 = [1, 4]
T_3 = 10
dt_3 = 0.05
pendel = Pendulum()
pendel.solve(y0_1, T_1, dt_1)
len_1 = len(pendel.t) #store previous length
pendel.solve(y0_2, T_2, dt_2)
len_2 = len(pendel.t) #store previous length
pendel.solve(y0_3, T_3, dt_3)
#Check length of t
assert(len(pendel.t) != len_1)
assert(len(pendel.t) != len_2)
pendel2 = Pendulum()
pendel2.solve(y0_3, T_3, dt_3)
# Solve pendel2 for case #3 only
# Check so that pendel is the latest solution
for i in range(len(pendel.x)):
assert(pendel.x[i] == pendel2.x[i])
assert(pendel.y[i] == pendel2.y[i])
def test_solve_method_in_Pendulum_class_theta_omega_zero_arrays():
"""
Test solve method keeps pendulum at rest for initial y0=[0,0] while t=i*dt.
"""
y0 = [0, 0]
T = 5
dt = 0.1
pendel = Pendulum()
pendel.solve(y0, T, dt)
for i in range(len(pendel.t)):
assert(pendel.t[i] == i*pendel.dt)
assert(pendel.theta[i] == 0)
assert(pendel.omega[i] == 0)
def test_x_and_y_positions_are_correct():
"""
Tests that x and y position is computed correctly by testing x^2 + y^2 = L^2.
"""
y0 = [2, 3]
T = 15
dt = 0.01
eps = 10**(-7)
pendel = Pendulum(L=2)
pendel.solve(y0, T, dt)
sol = pendel.solution
array_of_L = np.zeros(len(sol.y[0])) + (pendel.L**2)
computed_radius_squared = pendel.x**2 + pendel.y**2
for i in range(len(sol.y[0])):
assert(abs(computed_radius_squared[i] - array_of_L[i]) < eps)
if __name__ == "__main__":
import nose
nose.run()
| 26.118644
| 86
| 0.615834
|
1ea78da791583c9f569bb736e5754e121205d7c2
| 269
|
py
|
Python
|
automobile/config/desktop.py
|
hrgadeha/concept
|
150ac108f048b7c22ec0389e17cd887dfa13ecac
|
[
"MIT"
] | null | null | null |
automobile/config/desktop.py
|
hrgadeha/concept
|
150ac108f048b7c22ec0389e17cd887dfa13ecac
|
[
"MIT"
] | null | null | null |
automobile/config/desktop.py
|
hrgadeha/concept
|
150ac108f048b7c22ec0389e17cd887dfa13ecac
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Automobile",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Automobile")
}
]
| 17.933333
| 44
| 0.620818
|
fa20faae8dfbe0cc7ab4aedf8f3bc907b658de26
| 1,682
|
py
|
Python
|
algorithms/209. Minimum Size Subarray Sum.py
|
woozway/py3-leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | 1
|
2020-12-02T13:54:30.000Z
|
2020-12-02T13:54:30.000Z
|
algorithms/209. Minimum Size Subarray Sum.py
|
woozway/py3-leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | null | null | null |
algorithms/209. Minimum Size Subarray Sum.py
|
woozway/py3-leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | null | null | null |
"""
1. Clarification
2. Possible solutions
- Brute force
- Prefix sum + Binary search
- Two Pointers
3. Coding
4. Tests
"""
# # T=O(n^2), S=O(1), Time Limit Exceeded
# class Solution:
# def minSubArrayLen(self, target: int, nums: List[int]) -> int:
# if target < 1 or not nums: return 0
# n = len(nums)
# ans = n + 1
# for i in range(n):
# total = 0
# for j in range(i, n):
# total += nums[j]
# if total >= target:
# ans = min(ans, j - i + 1)
# break
# return 0 if ans == n + 1 else ans
# # T=O(nlgn), S=O(n)
# class Solution:
# def minSubArrayLen(self, target: int, nums: List[int]) -> int:
# if target < 1 or not nums: return 0
# n = len(nums)
# ans = n + 1
# sums = [0]
# for i in range(n):
# sums.append(sums[-1] + nums[i])
# for i in range(n):
# tmp = target + sums[i]
# bound = bisect.bisect_left(sums, tmp)
# if bound != len(sums):
# ans = min(ans, bound - i)
# return 0 if ans == n + 1 else ans
# T=O(n), S=O(1)
class Solution:
def minSubArrayLen(self, target: int, nums: List[int]) -> int:
if target < 1 or not nums: return 0
n = len(nums)
ans = n + 1
start, end = 0, 0
total = 0
while end < n:
total += nums[end]
while total >= target:
ans = min(ans, end - start + 1)
total -= nums[start]
start += 1
end += 1
return 0 if ans == n + 1 else ans
| 27.57377
| 68
| 0.452438
|
351466e01e3c82fe397f8b1dcad1090d9cb8e5a6
| 2,281
|
py
|
Python
|
tests/pytests/materials/obsolete/TestElasticIsotropic3D.py
|
Grant-Block/pylith
|
f6338261b17551eba879da998a5aaf2d91f5f658
|
[
"MIT"
] | 93
|
2015-01-08T16:41:22.000Z
|
2022-02-25T13:40:02.000Z
|
tests/pytests/materials/obsolete/TestElasticIsotropic3D.py
|
Grant-Block/pylith
|
f6338261b17551eba879da998a5aaf2d91f5f658
|
[
"MIT"
] | 277
|
2015-02-20T16:27:35.000Z
|
2022-03-30T21:13:09.000Z
|
tests/pytests/materials/obsolete/TestElasticIsotropic3D.py
|
Grant-Block/pylith
|
f6338261b17551eba879da998a5aaf2d91f5f658
|
[
"MIT"
] | 71
|
2015-03-24T12:11:08.000Z
|
2022-03-03T04:26:02.000Z
|
#!/usr/bin/env python
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
#
## @file tests/pytests/materials/TestElasticIsotropic3D.py
## @brief Unit testing of ElasticIsotropic3D object.
import unittest
from pylith.materials.ElasticIsotropic3D import ElasticIsotropic3D
# ----------------------------------------------------------------------
class TestElasticIsotropic3D(unittest.TestCase):
"""
Unit testing of ElasticIsotropic3D object.
"""
def setUp(self):
"""
Setup test subject.
"""
self.material = ElasticIsotropic3D()
return
def test_constructor(self):
"""
Test constructor.
"""
self.assertEqual(3, self.material.dimension())
return
def test_useElasticBehavior(self):
"""
Test useElasticBehavior().
"""
self.material.useElasticBehavior(False)
return
def testHasStateVars(self):
self.failIf(self.material.hasStateVars())
return
def testTensorSize(self):
self.assertEqual(6, self.material.tensorSize())
return
def testNeedNewJacobian(self):
"""
Test needNewJacobian().
"""
# Default should be False.
self.failIf(self.material.needNewJacobian())
# Changing time step should not require new Jacobian.
self.material.timeStep(1.0)
self.material.timeStep(2.0)
self.failIf(self.material.needNewJacobian())
return
def testStableTimeStepImplicit(self):
"""
Test stableTimeStepImplicit().
"""
from pylith.topology.Mesh import Mesh
mesh = Mesh()
dt = self.material.stableTimeStepImplicit(mesh)
from pylith.utils.utils import maxdouble
self.assertAlmostEqual(1.0, dt/maxdouble())
def test_factory(self):
"""
Test factory method.
"""
from pylith.materials.ElasticIsotropic3D import material
m = material()
return
# End of file
| 22.362745
| 72
| 0.628672
|
d9d6e8cc642e36ae3ecd0e21c45b1922843b2a6f
| 3,714
|
py
|
Python
|
memsource_cli/models/business_unit_edit_dto.py
|
unofficial-memsource/memsource-cli-client
|
a6639506b74e95476da87f4375953448b76ea90c
|
[
"Apache-2.0"
] | 16
|
2019-09-25T00:20:38.000Z
|
2021-05-04T05:56:10.000Z
|
memsource_cli/models/business_unit_edit_dto.py
|
zerodayz/memsource-cli-client
|
c2574f1467539a49e6637c874e88d75c7ef789b3
|
[
"Apache-2.0"
] | 26
|
2019-09-30T14:00:03.000Z
|
2021-05-12T11:15:18.000Z
|
memsource_cli/models/business_unit_edit_dto.py
|
zerodayz/memsource-cli-client
|
c2574f1467539a49e6637c874e88d75c7ef789b3
|
[
"Apache-2.0"
] | 1
|
2021-05-24T16:19:14.000Z
|
2021-05-24T16:19:14.000Z
|
# coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:support@memsource.com>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class BusinessUnitEditDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str'
}
attribute_map = {
'name': 'name'
}
def __init__(self, name=None): # noqa: E501
"""BusinessUnitEditDto - a model defined in Swagger""" # noqa: E501
self._name = None
self.discriminator = None
if name is not None:
self.name = name
@property
def name(self):
"""Gets the name of this BusinessUnitEditDto. # noqa: E501
:return: The name of this BusinessUnitEditDto. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this BusinessUnitEditDto.
:param name: The name of this BusinessUnitEditDto. # noqa: E501
:type: str
"""
if name is not None and len(name) > 255:
raise ValueError("Invalid value for `name`, length must be less than or equal to `255`") # noqa: E501
if name is not None and len(name) < 0:
raise ValueError("Invalid value for `name`, length must be greater than or equal to `0`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BusinessUnitEditDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BusinessUnitEditDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.95
| 421
| 0.578352
|
bbb9fb971b4f00620931fcafb4c1a17e7284c42c
| 1,164
|
py
|
Python
|
Round 2/red-tape-committee.py
|
kamyu104/GoogleCodeJame-2016
|
5222eeb34e7762a92e6d42a985e1fc6fc0dd151d
|
[
"MIT"
] | 19
|
2016-04-10T13:55:50.000Z
|
2020-05-16T07:10:05.000Z
|
Round 2/red-tape-committee.py
|
kamyu104/GoogleCodeJame-2016
|
5222eeb34e7762a92e6d42a985e1fc6fc0dd151d
|
[
"MIT"
] | null | null | null |
Round 2/red-tape-committee.py
|
kamyu104/GoogleCodeJame-2016
|
5222eeb34e7762a92e6d42a985e1fc6fc0dd151d
|
[
"MIT"
] | 19
|
2016-04-12T10:04:03.000Z
|
2021-07-22T22:38:13.000Z
|
# Copyright (c) 2016 kamyu. All rights reserved.
#
# Google Code Jam 2016 Round 2 - Problem B. Red Tape Committee
# https://code.google.com/codejam/contest/10224486/dashboard#s=p1
#
# Time: O(NlogN + K^3)
# Space: O(K)
#
def red_tape_committee():
N, K = map(int, raw_input().strip().split())
P = sorted(map(float, raw_input().strip().split()))
result = 0
for M in xrange(K+1):
# The best way to create a tie is to choose department members
# from one or both extremes.
V = P[:M] + P[N-K+M:]
# dp[i][j]: the probability of i members with j yes votes.
dp = [[1.0], []]
for i in xrange(1, K + 1):
dp[i % 2] = [0] * (len(dp[(i - 1) % 2]) + 1)
for j in xrange(i):
dp[i % 2][j] += dp[(i - 1) % 2][j] * (1 - V[i - 1]) # vote no
dp[i % 2][j + 1] += dp[(i - 1) % 2][j] * V[i - 1] # vote yes
# The probability of tie is the probability of
# K members with (K/2) yes votes.
result = max(result, dp[K % 2][K / 2])
return result
for case in xrange(input()):
print 'Case #%d: %.10f' % (case+1, red_tape_committee())
| 31.459459
| 78
| 0.525773
|
bf18fa58c0d559b55dfb9867a4cedb7533c3048a
| 5,750
|
py
|
Python
|
python/graph_sawyer.py
|
kalyanvasudev/gpmp2
|
1ee99c743d978ab20dc804c8cd9cfa7813084957
|
[
"BSD-3-Clause"
] | null | null | null |
python/graph_sawyer.py
|
kalyanvasudev/gpmp2
|
1ee99c743d978ab20dc804c8cd9cfa7813084957
|
[
"BSD-3-Clause"
] | null | null | null |
python/graph_sawyer.py
|
kalyanvasudev/gpmp2
|
1ee99c743d978ab20dc804c8cd9cfa7813084957
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from gtsam import *
from gpmp2 import *
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D #<-- Note the capitalization!
from gpmp_utils.generate3Ddataset import generate3Ddataset
from gpmp_utils.signedDistanceField3D import signedDistanceField3D
from gpmp_utils.generateArm import generateArm
from gpmp_utils.plotMap3D import plotMap3D
from gpmp_utils.plotRobotModel import plotRobotModel
from gpmp_utils.set3DPlotRange import set3DPlotRange
from gpmp_utils.plotArm import plotArm
from pyrobot import Robot
from graph_utils import *
import time
if __name__ == "__main__":
problem = Problem()
problem.use_GP_inter = True
problem.gp_factor_function = GaussianProcessPriorLinear
problem.obstacle_factor_function = ObstacleSDFFactorArm
problem.obstalce_gp_factor_function = ObstacleSDFFactorGPArm
problem.dataset = generate3Ddataset('WAMDeskDataset')
origin = np.asarray([problem.dataset.origin_x, problem.dataset.origin_y, \
problem.dataset.origin_z])
# dataset
origin_point3 = Point3(origin)
cell_size = problem.dataset.cell_size
# sdf
print('calculating signed distance field ...');
field = signedDistanceField3D(problem.dataset.map, problem.dataset.cell_size)
# init sdf
problem.sdf = SignedDistanceField(origin_point3, cell_size, field.shape[0],
field.shape[1], field.shape[2])
for z in range(field.shape[2]):
problem.sdf.initFieldData(z, field[:,:,z])
print('calculating signed distance field done')
# arm: WAM arm
problem.gpmp_robot = generateArm('SAWYERArm')
# Make PyRobot Object
robot = Robot('sawyer')
robot.arm.go_home()
problem.start_conf = robot.arm.get_joint_angles()
#start_conf[0] = np.pi/2
robot.arm.move_to_neutral()
problem.end_conf = robot.arm.get_joint_angles()
#end_conf[0] = np.pi/2
problem.start_vel = np.zeros(7)
problem.end_vel = np.zeros(7)
# plot problem setting
figure0 = plt.figure(0)
axis0 = Axes3D(figure0)
axis0.set_title('Problem Settings')
set3DPlotRange(figure0, axis0, problem.dataset)
plotRobotModel(figure0, axis0, problem.gpmp_robot, problem.start_conf)
plotRobotModel(figure0, axis0, problem.gpmp_robot, problem.end_conf)
plotMap3D(figure0, axis0, problem.dataset.corner_idx, origin, cell_size)
## settings
problem.total_time_sec = 4.0
problem.total_time_step = 20
problem.total_check_step = 100
problem.delta_t = problem.total_time_sec / problem.total_time_step
problem.check_inter = problem.total_check_step / problem.total_time_step - 1
problem.avg_vel = (problem.end_conf / problem.total_time_step) / problem.delta_t
# GP
problem.Qc = np.identity(7)
problem.Qc_model = noiseModel_Gaussian.Covariance(problem.Qc)
# algo settings
problem.cost_sigma = 0.02
problem.epsilon_dist = 0.2
# noise model
problem.fix_sigma = 0.0001
problem.pose_fix_model = noiseModel_Isotropic.Sigma(7, problem.fix_sigma)
problem.vel_fix_model = noiseModel_Isotropic.Sigma(7, problem.fix_sigma)
#% plot settings
plot_inter_traj = False
plot_inter = 4
if plot_inter_traj:
total_plot_step = problem.total_time_step * (plot_inter + 1)
else:
total_plot_step = problem.total_time_step
problem.pause_time = problem.total_time_sec / total_plot_step
inits = get_initializations( 4, problem)
#print(inits)
start = time.time()
problem.dropout_prob = 0.5
problem.seed_val = 1
planner_graph = get_planner_graph(inits, problem)
end = time.time()
print('Time taken to build planner graph:', end - start)
print(len(planner_graph))
gtsam_graph, init_values = get_gtsam_graph(planner_graph, problem)
#print(gtsam_graph)
use_trustregion_opt = True
if use_trustregion_opt:
parameters = DoglegParams()
parameters.setVerbosity('ERROR')
optimizer = DoglegOptimizer(gtsam_graph, init_values, parameters)
else:
parameters = GaussNewtonParams()
#parameters.setRelativeErrorTol(1e-5)
#parameters.setMaxIterations(100)
parameters.setVerbosity('ERROR')
optimizer = GaussNewtonOptimizer(gtsam_graph, init_values, parameters)
print('Initial Error = %d\n', gtsam_graph.error(init_values))
start = time.time()
optimizer.optimizeSafely()
end = time.time()
print('Time taken to optimize:', end - start)
result = optimizer.values()
print('Final Error = %d\n', gtsam_graph.error(result))
update_planner_graph(result, planner_graph)
start = time.time()
planner = Planner(result, gtsam_graph, planner_graph)
end = time.time()
print('Time taken to plan:', end - start)
path = planner.get_shortest_path()
print(path)
# plot final values
figure2 = plt.figure(2)
axis2 = Axes3D(figure2)
axis2.set_title('Result Values')
plotMap3D(figure2, axis2, problem.dataset.corner_idx, origin, cell_size)
set3DPlotRange(figure2, axis2, problem.dataset)
for i in range(total_plot_step):
conf = path[i]
plotArm(figure2, axis2, problem.gpmp_robot.fk_model(), conf, 'b', 2)
plt.pause(problem.pause_time)
plt.show()
# ### Executing the Final Trajectory on Sawyer in Gazebo
# robot.arm.set_joint_positions(start_conf)
# from joint_trajectory_client_sawyer_ros import Trajectory
# joint_names = [ 'right_j' + str(x) for x in range(7)]
# action_name = '/robot/limb/right/follow_joint_trajectory'
# traj = Trajectory(joint_names, action_name)
# #Encode results into list
# final_trajectory = []
# for i in range(total_plot_step):
# conf = path[i]
# traj.add_point(conf, pause_time*5)
# final_trajectory.append(conf)
# traj.start()
# traj.wait(20) # TODO: Change this to wait till finish of trajectory execution
| 28.894472
| 82
| 0.736522
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.