hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7c1247fafd266f5ae8d91a41ee6395bdd80f2bed
| 2,109
|
py
|
Python
|
tests/test_layouts.py
|
bczsalba/PyTermGUI
|
3e7cc741363e5ff37de552cf97c8f43405834cc3
|
[
"MIT"
] | 40
|
2022-01-28T20:49:11.000Z
|
2022-02-19T22:33:52.000Z
|
tests/test_layouts.py
|
bczsalba/PyTermGUI
|
3e7cc741363e5ff37de552cf97c8f43405834cc3
|
[
"MIT"
] | 12
|
2022-01-28T15:46:36.000Z
|
2022-02-15T06:52:15.000Z
|
tests/test_layouts.py
|
bczsalba/PyTermGUI
|
3e7cc741363e5ff37de552cf97c8f43405834cc3
|
[
"MIT"
] | 5
|
2022-01-29T02:26:24.000Z
|
2022-02-07T18:12:58.000Z
|
import re
import pytest
from pytermgui.window_manager.layouts import Auto, Static
import pytermgui as ptg
def test_auto():
layout = ptg.Layout()
layout.add_slot("Header")
layout.add_break()
layout.add_slot("Body")
layout.add_break()
layout.add_slot("Footer")
assert str(layout.header.width) == "Auto(value=0)"
layout.apply()
assert len(layout.slots) == 5
def test_static():
layout = ptg.Layout()
layout.add_slot("one", width=10, height=15)
layout.add_break()
layout.add_slot("two")
assert str(layout.one.width) == "Static(value=10)"
layout.apply()
assert isinstance(layout.one.width, Static)
assert isinstance(layout.one.height, Static)
assert isinstance(layout.two.width, Auto)
assert isinstance(layout.two.height, Auto)
def test_relative():
layout = ptg.Layout()
layout.add_slot("one", width=0.9, height=0.1)
assert (
re.match(
r"Relative\(value=[\d]+, scale=0\.9, bound=<function Layout.add_slot.<locals>.<lambda> at 0x[0-9a-fA-F]+>\)",
str(layout.one.width),
)
is not None
)
layout.apply()
assert layout.one.width.value == int(ptg.terminal.width * 0.9)
with pytest.raises(TypeError):
layout.one.width.value = 10
def test_detach():
layout = ptg.Layout()
slot = layout.add_slot("Body")
slot.content = ptg.Window()
layout.apply()
slot.detach_content()
def test_wrong_detach():
layout = ptg.Layout()
slot = layout.add_slot("Body")
with pytest.raises(AttributeError):
slot.detach_content()
def test_wrong_getattr():
layout = ptg.Layout()
layout.add_slot("Body")
with pytest.raises(AttributeError):
layout.body1
def test_add_index():
layout = ptg.Layout()
layout.add_slot("Body")
layout.add_slot("Header", index=0)
def test_assign():
layout = ptg.Layout()
layout.add_slot("Body")
layout.assign(ptg.Container())
def test_wrong_assign():
layout = ptg.Layout()
layout.assign(ptg.Container(), index=2)
assert layout.slots == []
| 21.30303
| 121
| 0.644855
|
c4a9365069b6aab9b3559904c585c60b66a3144d
| 3,253
|
py
|
Python
|
jupyterlab/settings_handler.py
|
maartenbreddels/jupyterlab
|
3b4c1a3df53b7446516a4cb1138cc57ae91a7b80
|
[
"BSD-3-Clause"
] | null | null | null |
jupyterlab/settings_handler.py
|
maartenbreddels/jupyterlab
|
3b4c1a3df53b7446516a4cb1138cc57ae91a7b80
|
[
"BSD-3-Clause"
] | null | null | null |
jupyterlab/settings_handler.py
|
maartenbreddels/jupyterlab
|
3b4c1a3df53b7446516a4cb1138cc57ae91a7b80
|
[
"BSD-3-Clause"
] | null | null | null |
"""Tornado handlers for frontend config storage."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import os
from tornado import web
from notebook.base.handlers import APIHandler, json_errors
try:
from jsonschema import ValidationError
from jsonschema import Draft4Validator as Validator
except ImportError:
Validator = None
class SettingsHandler(APIHandler):
def initialize(self, schemas_dir, settings_dir):
self.schemas_dir = schemas_dir
self.settings_dir = settings_dir
@json_errors
@web.authenticated
def get(self, section_name):
self.set_header('Content-Type', "application/json")
path = os.path.join(self.schemas_dir, section_name + ".json")
if not os.path.exists(path):
raise web.HTTPError(404, "Schema not found: %r" % section_name)
with open(path) as fid:
# Attempt to load the schema file.
try:
schema = json.load(fid)
except Exception as e:
name = section_name
message = "Failed parsing schema ({}): {}".format(name, str(e))
raise web.HTTPError(500, message)
path = os.path.join(self.settings_dir, section_name + '.json')
settings = dict()
if os.path.exists(path):
with open(path) as fid:
# Attempt to load the settings file.
try:
settings = json.load(fid)
except Exception as e:
self.log.warn(str(e))
# Validate the data against the schema.
if Validator is not None and len(settings):
validator = Validator(schema)
try:
validator.validate(settings)
except ValidationError as e:
self.log.warn(str(e))
settings = dict()
resp = dict(id=section_name, data=dict(user=settings), schema=schema)
self.finish(json.dumps(resp))
@json_errors
@web.authenticated
def patch(self, section_name):
if not self.settings_dir:
raise web.HTTPError(404, "No current settings directory")
path = os.path.join(self.schemas_dir, section_name + '.json')
if not os.path.exists(path):
raise web.HTTPError(404, "Schema not found for: %r" % section_name)
data = self.get_json_body() # Will raise 400 if content is not valid JSON
# Validate the data against the schema.
if Validator is not None:
with open(path) as fid:
schema = json.load(fid)
validator = Validator(schema)
try:
validator.validate(data)
except ValidationError as e:
raise web.HTTPError(400, str(e))
# Create the settings dir as needed.
if not os.path.exists(self.settings_dir):
os.makedirs(self.settings_dir)
path = os.path.join(self.settings_dir, section_name + '.json')
with open(path, 'w') as fid:
json.dump(data, fid)
self.set_status(204)
# The path for a lab settings section.
settings_path = r"/lab/api/settings/(?P<section_name>[\w.-]+)"
| 32.53
| 82
| 0.601599
|
4933993e05bce808f462453b91b50bc9a2822728
| 4,546
|
py
|
Python
|
selectable/tests/test_base.py
|
climapulse/django-selectable
|
bb6a3258e48628811cb6b0ec465a124446f75f7a
|
[
"BSD-2-Clause"
] | null | null | null |
selectable/tests/test_base.py
|
climapulse/django-selectable
|
bb6a3258e48628811cb6b0ec465a124446f75f7a
|
[
"BSD-2-Clause"
] | null | null | null |
selectable/tests/test_base.py
|
climapulse/django-selectable
|
bb6a3258e48628811cb6b0ec465a124446f75f7a
|
[
"BSD-2-Clause"
] | null | null | null |
from django.urls import reverse
from django.utils.html import escape
from django.utils.safestring import SafeData, mark_safe
from ..base import ModelLookup
from . import Thing
from .base import BaseSelectableTestCase, SimpleModelLookup
__all__ = (
'ModelLookupTestCase',
'MultiFieldLookupTestCase',
'LookupEscapingTestCase',
)
class ModelLookupTestCase(BaseSelectableTestCase):
lookup_cls = SimpleModelLookup
def get_lookup_instance(self):
return self.__class__.lookup_cls()
def test_get_name(self):
name = self.__class__.lookup_cls.name()
self.assertEqual(name, 'tests-simplemodellookup')
def test_get_url(self):
url = self.__class__.lookup_cls.url()
test_url = reverse('selectable-lookup', args=['tests-simplemodellookup'])
self.assertEqual(url, test_url)
def test_format_item(self):
lookup = self.get_lookup_instance()
thing = Thing()
item_info = lookup.format_item(thing)
self.assertTrue('id' in item_info)
self.assertTrue('value' in item_info)
self.assertTrue('label' in item_info)
def test_get_query(self):
lookup = self.get_lookup_instance()
thing = self.create_thing(data={'name': 'Thing'})
other_thing = self.create_thing(data={'name': 'Other Thing'})
qs = lookup.get_query(request=None, term='other')
self.assertTrue(thing.pk not in qs.values_list('id', flat=True))
self.assertTrue(other_thing.pk in qs.values_list('id', flat=True))
def test_create_item(self):
value = self.get_random_string()
lookup = self.get_lookup_instance()
thing = lookup.create_item(value)
self.assertEqual(thing.__class__, Thing)
self.assertEqual(thing.name, value)
self.assertFalse(thing.pk)
def test_get_item(self):
lookup = self.get_lookup_instance()
thing = self.create_thing(data={'name': 'Thing'})
item = lookup.get_item(thing.pk)
self.assertEqual(thing, item)
def test_format_item_escaping(self):
"Id, value and label should be escaped."
lookup = self.get_lookup_instance()
thing = self.create_thing(data={'name': 'Thing'})
item_info = lookup.format_item(thing)
self.assertFalse(isinstance(item_info['id'], SafeData))
self.assertFalse(isinstance(item_info['value'], SafeData))
self.assertTrue(isinstance(item_info['label'], SafeData))
class MultiFieldLookup(ModelLookup):
model = Thing
search_fields = ('name__icontains', 'description__icontains', )
class MultiFieldLookupTestCase(ModelLookupTestCase):
lookup_cls = MultiFieldLookup
def test_get_name(self):
name = self.__class__.lookup_cls.name()
self.assertEqual(name, 'tests-multifieldlookup')
def test_get_url(self):
url = self.__class__.lookup_cls.url()
test_url = reverse('selectable-lookup', args=['tests-multifieldlookup'])
self.assertEqual(url, test_url)
def test_description_search(self):
lookup = self.get_lookup_instance()
thing = self.create_thing(data={'description': 'Thing'})
other_thing = self.create_thing(data={'description': 'Other Thing'})
qs = lookup.get_query(request=None, term='other')
self.assertTrue(thing.pk not in qs.values_list('id', flat=True))
self.assertTrue(other_thing.pk in qs.values_list('id', flat=True))
class HTMLLookup(ModelLookup):
model = Thing
search_fields = ('name__icontains', )
class SafeHTMLLookup(ModelLookup):
model = Thing
search_fields = ('name__icontains', )
def get_item_label(self, item):
"Mark label as safe."
return mark_safe(item.name)
class LookupEscapingTestCase(BaseSelectableTestCase):
def test_escape_html(self):
"HTML should be escaped by default."
lookup = HTMLLookup()
bad_name = "<script>alert('hacked');</script>"
escaped_name = escape(bad_name)
thing = self.create_thing(data={'name': bad_name})
item_info = lookup.format_item(thing)
self.assertEqual(item_info['label'], escaped_name)
def test_conditional_escape(self):
"Methods should be able to mark values as safe."
lookup = SafeHTMLLookup()
bad_name = "<script>alert('hacked');</script>"
escaped_name = escape(bad_name)
thing = self.create_thing(data={'name': bad_name})
item_info = lookup.format_item(thing)
self.assertEqual(item_info['label'], bad_name)
| 34.969231
| 81
| 0.679059
|
20c810250db4cc9abd6fcb791b4a4d03c0af0acf
| 4,303
|
py
|
Python
|
litex_things/deps/litex_boards/litex_boards/partner/platforms/c10lprefkit.py
|
bjonnh/fomu-playground
|
9f95ed7b28d15ce219d09c16c2c8d6b5594adceb
|
[
"0BSD"
] | null | null | null |
litex_things/deps/litex_boards/litex_boards/partner/platforms/c10lprefkit.py
|
bjonnh/fomu-playground
|
9f95ed7b28d15ce219d09c16c2c8d6b5594adceb
|
[
"0BSD"
] | null | null | null |
litex_things/deps/litex_boards/litex_boards/partner/platforms/c10lprefkit.py
|
bjonnh/fomu-playground
|
9f95ed7b28d15ce219d09c16c2c8d6b5594adceb
|
[
"0BSD"
] | null | null | null |
# This file is Copyright (c) 2019 Antti Lukats <antti.lukats@gmail.com>
# This file is Copyright (c) 2019 Florent Kermarrec <florent@enjoy-digital.fr>
# License: BSD
from litex.build.generic_platform import *
from litex.build.altera import AlteraPlatform
from litex.build.altera.programmer import USBBlaster
# IOs ----------------------------------------------------------------------------------------------
_io = [
("clk12", 0, Pins("G21"), IOStandard("3.3-V LVTTL")),
("clk25", 0, Pins("AA12"), IOStandard("3.3-V LVTTL")),
("user_led", 0, Pins("C18"), IOStandard("3.3-V LVTTL")),
("user_led", 1, Pins("D19"), IOStandard("3.3-V LVTTL")),
("user_led", 2, Pins("C19"), IOStandard("3.3-V LVTTL")),
("user_led", 3, Pins("C17"), IOStandard("3.3-V LVTTL")),
("user_led", 4, Pins("D18"), IOStandard("3.3-V LVTTL")),
("cpu_reset", 0, Pins("V15"), IOStandard("3.3-V LVTTL")),
("sw", 0, Pins("U10"), IOStandard("3.3-V LVTTL")),
("sw", 1, Pins("U11"), IOStandard("3.3-V LVTTL")),
("sw", 2, Pins("V11"), IOStandard("3.3-V LVTTL")),
("sw", 3, Pins("T10"), IOStandard("3.3-V LVTTL")),
("sw", 4, Pins("T11"), IOStandard("3.3-V LVTTL")),
("serial", 0,
Subsignal("tx", Pins("B21"), IOStandard("3.3-V LVTTL")),
Subsignal("rx", Pins("C20"), IOStandard("3.3-V LVTTL")),
),
("sdram_clock", 0, Pins("AA3"), IOStandard("3.3-V LVTTL")),
("sdram", 0,
Subsignal("a", Pins(
"V5 Y3 W6 Y4 AB5 AB6 AA6 AA7",
"AB7 AA5 V6 AA8 AB8")),
Subsignal("ba", Pins("Y6 V7")),
Subsignal("cs_n", Pins("W7")),
Subsignal("cke", Pins("AA4")),
Subsignal("ras_n", Pins("V8")),
Subsignal("cas_n", Pins("Y7")),
Subsignal("we_n", Pins("W8")),
Subsignal("dq", Pins(
"AB16 Y17 AA16 AA19 AB18 AA20 AB19 AB20",
"Y13 Y15 AA13 AB15 AB13 AA15 AA14 AB14")),
Subsignal("dm", Pins("Y14 W13")),
IOStandard("3.3-V LVTTL")
),
("epcs", 0,
Subsignal("data0", Pins("K1")),
Subsignal("dclk", Pins("K2")),
Subsignal("ncs0", Pins("E2")),
Subsignal("asd0", Pins("D1")),
IOStandard("3.3-V LVTTL")
),
("hyperram", 0,
Subsignal("clk", Pins("T16")),
Subsignal("rst_n", Pins("U12")),
Subsignal("dq", Pins("T15 W17 U14 R15 R14 V16 U16 U17")),
Subsignal("cs_n", Pins("V13")),
Subsignal("rwds", Pins("U13")),
IOStandard("3.3-V LVTTL")
),
("gpio_leds", 0,
Pins("AB10 AA10 AA9 Y10 W10 U9 U8 U7"),
IOStandard("3.3-V LVTTL")
),
("eth_clocks", 0,
Subsignal("tx", Pins("U21")),
Subsignal("rx", Pins("V22")),
IOStandard("3.3-V LVTTL"),
),
("eth", 0,
Subsignal("rst_n", Pins("R19")),
Subsignal("mdio", Pins("AA21")),
Subsignal("mdc", Pins("AA22")),
Subsignal("rx_dv", Pins("W21")),
Subsignal("rx_er", Pins("V21")),
Subsignal("rx_data", Pins("W22 W20 Y21 Y22")),
Subsignal("tx_en", Pins("T18")),
Subsignal("tx_data", Pins("T17 U20 U19 T20")),
Subsignal("col", Pins("T19")),
Subsignal("crs", Pins("R20")),
IOStandard("3.3-V LVTTL"),
),
("eth_clocks", 1,
Subsignal("tx", Pins("N16")),
Subsignal("rx", Pins("V22")),
IOStandard("3.3-V LVTTL"),
),
("eth", 1,
Subsignal("rst_n", Pins("M21")),
Subsignal("mdio", Pins("N20")),
Subsignal("mdc", Pins("N18")),
Subsignal("rx_dv", Pins("R18")),
Subsignal("rx_er", Pins("P17")),
Subsignal("rx_data", Pins("M20 M19 M16 N19")),
Subsignal("tx_en", Pins("R22")),
Subsignal("tx_data", Pins("R21 N21 M22 N22")),
Subsignal("col", Pins("P21")),
Subsignal("crs", Pins("P22")),
IOStandard("3.3-V LVTTL"),
),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(AlteraPlatform):
default_clk_name = "clk12"
default_clk_period = 1e9/12e6
def __init__(self):
AlteraPlatform.__init__(self, "10CL055YU484A7G", _io)
self.add_platform_command("set_global_assignment -name FAMILY \"Cyclone 10 LP\"")
def create_programmer(self):
return USBBlaster()
| 34.150794
| 100
| 0.519405
|
bfbf6a6bc7e86821dae9f64082f4fe3c9e241739
| 2,648
|
py
|
Python
|
Lime/lime_test.py
|
Wenhao-Yang/DeepSpeaker-pytorch
|
99eb8de3357c85e2b7576da2a742be2ffd773ead
|
[
"MIT"
] | 8
|
2020-08-26T13:32:56.000Z
|
2022-01-18T21:05:46.000Z
|
Lime/lime_test.py
|
Wenhao-Yang/DeepSpeaker-pytorch
|
99eb8de3357c85e2b7576da2a742be2ffd773ead
|
[
"MIT"
] | 1
|
2020-07-24T17:06:16.000Z
|
2020-07-24T17:06:16.000Z
|
Lime/lime_test.py
|
Wenhao-Yang/DeepSpeaker-pytorch
|
99eb8de3357c85e2b7576da2a742be2ffd773ead
|
[
"MIT"
] | 5
|
2020-12-11T03:31:15.000Z
|
2021-11-23T15:57:55.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: 874681044@qq.com
@Software: PyCharm
@File: lime_test.py
@Time: 2019/12/7 下午8:11
@Overview:
"""
from PIL import Image
import torch.nn as nn
import numpy as np
import os, json
import torch
from torchvision import models, transforms
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torchvision.models import Inception3
def get_image(path):
with open(os.path.abspath(path), 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
img = get_image('../Data/dogs.png')
plt.imshow(img)
# plt.show()
# resize and take the center part of image to what our model expects
def get_input_transform():
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transf = transforms.Compose([
transforms.Resize((256, 256)),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
return transf
def get_input_tensors(img):
transf = get_input_transform()
# unsqeeze converts single image to batch of 1
return transf(img).unsqueeze(0)
# model = models.inception_v3(pretrained=True)
model = Inception3()
checkpoint = torch.load('../Data/inception_v3_google-1a9a5a14.pth')
model.load_state_dict(checkpoint)
# model.load_state_dict(model_zoo.load_url(model_urls['inception_v3_google']))
idx2label, cls2label, cls2idx = [], {}, {}
with open('../Data/imagenet_class_index.json', 'r') as read_file:
class_idx = json.load(read_file)
idx2label = [class_idx[str(k)][1] for k in range(len(class_idx))]
cls2label = {class_idx[str(k)][0]: class_idx[str(k)][1] for k in range(len(class_idx))}
cls2idx = {class_idx[str(k)][0]: k for k in range(len(class_idx))}
img_t = get_input_tensors(img)
model.eval()
logits = model(img_t)
probs = F.softmax(logits, dim=1)
probs5 = probs.topk(5)
print('top5: '+ str(probs5))
tuple((p,c, idx2label[c]) for p, c in zip(probs5[0][0].detach().numpy(), probs5[1][0].detach().numpy()))
def get_pil_transform():
transf = transforms.Compose([
transforms.Resize((256, 256)),
transforms.CenterCrop(224)
])
return transf
def get_preprocess_transform():
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transf = transforms.Compose([
transforms.ToTensor(),
normalize
])
return transf
pill_transf = get_pil_transform()
preprocess_transform = get_preprocess_transform()
| 22.066667
| 104
| 0.668429
|
4d21515589d20f3f0192ca14d895e315d2901f26
| 9,896
|
py
|
Python
|
bounties_api/std_bounties/migrations/0018_auto_20190331_1807.py
|
tenthirtyone/BountiesAPI
|
2bb449a947d987072be24633ba36fbd67c0ab29b
|
[
"MIT"
] | 45
|
2018-03-24T21:37:59.000Z
|
2021-11-12T11:53:04.000Z
|
bounties_api/std_bounties/migrations/0018_auto_20190331_1807.py
|
tenthirtyone/BountiesAPI
|
2bb449a947d987072be24633ba36fbd67c0ab29b
|
[
"MIT"
] | 192
|
2018-03-15T22:42:51.000Z
|
2022-02-12T11:42:20.000Z
|
bounties_api/std_bounties/migrations/0018_auto_20190331_1807.py
|
tenthirtyone/BountiesAPI
|
2bb449a947d987072be24633ba36fbd67c0ab29b
|
[
"MIT"
] | 27
|
2018-03-23T17:12:27.000Z
|
2021-12-06T02:21:26.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-03-31 18:07
from __future__ import unicode_literals
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user', '0027_auto_20190223_1548'),
('std_bounties', '0017_auto_20190131_0525'),
]
operations = [
migrations.CreateModel(
name='Contribution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('refunded', models.BooleanField(default=False)),
('contribution_id', models.IntegerField()),
('amount', models.DecimalField(decimal_places=0, max_digits=64)),
('calculated_amount', models.DecimalField(decimal_places=30, default=0, max_digits=70, null=True)),
('usd_amount', models.FloatField(default=0)),
('platform', models.CharField(blank=True, default='bounties-network', max_length=128)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('raw_event_data', django.contrib.postgres.fields.jsonb.JSONField(null=True)),
],
),
migrations.RenameField(
model_name='bounty',
old_name='sourceDirectoryHash',
new_name='attached_data_hash',
),
migrations.RenameField(
model_name='bounty',
old_name='sourceFileName',
new_name='attached_filename',
),
migrations.RenameField(
model_name='bounty',
old_name='webReferenceURL',
new_name='attached_url',
),
migrations.RenameField(
model_name='bounty',
old_name='bountyStage',
new_name='bounty_stage',
),
migrations.RenameField(
model_name='bounty',
old_name='calculated_fulfillmentAmount',
new_name='calculated_fulfillment_amount',
),
migrations.RenameField(
model_name='bounty',
old_name='experienceLevel',
new_name='experience_level',
),
migrations.RenameField(
model_name='bounty',
old_name='tokenContract',
new_name='token_contract',
),
migrations.RenameField(
model_name='bounty',
old_name='tokenDecimals',
new_name='token_decimals',
),
migrations.RenameField(
model_name='bounty',
old_name='tokenLockPrice',
new_name='token_lock_price',
),
migrations.RenameField(
model_name='bounty',
old_name='tokenSymbol',
new_name='token_symbol',
),
migrations.RenameField(
model_name='bountystate',
old_name='bountyStage',
new_name='bounty_stage',
),
migrations.RenameField(
model_name='draftbounty',
old_name='sourceDirectoryHash',
new_name='attached_data_hash',
),
migrations.RenameField(
model_name='draftbounty',
old_name='sourceFileName',
new_name='attached_filename',
),
migrations.RenameField(
model_name='draftbounty',
old_name='webReferenceURL',
new_name='attached_url',
),
migrations.RenameField(
model_name='draftbounty',
old_name='calculated_fulfillmentAmount',
new_name='calculated_fulfillment_amount',
),
migrations.RenameField(
model_name='draftbounty',
old_name='experienceLevel',
new_name='experience_level',
),
migrations.RenameField(
model_name='draftbounty',
old_name='tokenContract',
new_name='token_contract',
),
migrations.RenameField(
model_name='draftbounty',
old_name='tokenDecimals',
new_name='token_decimals',
),
migrations.RenameField(
model_name='draftbounty',
old_name='tokenSymbol',
new_name='token_symbol',
),
migrations.RemoveField(
model_name='bounty',
name='arbiter',
),
migrations.RemoveField(
model_name='bounty',
name='fulfillmentAmount',
),
migrations.RemoveField(
model_name='bounty',
name='issuer_address',
),
migrations.RemoveField(
model_name='bounty',
name='issuer_email',
),
migrations.RemoveField(
model_name='bounty',
name='issuer_githubUsername',
),
migrations.RemoveField(
model_name='bounty',
name='issuer_name',
),
migrations.RemoveField(
model_name='bounty',
name='sourceFileHash',
),
migrations.RemoveField(
model_name='draftbounty',
name='arbiter',
),
migrations.RemoveField(
model_name='draftbounty',
name='fulfillmentAmount',
),
migrations.RemoveField(
model_name='draftbounty',
name='issuer_address',
),
migrations.RemoveField(
model_name='draftbounty',
name='issuer_email',
),
migrations.RemoveField(
model_name='draftbounty',
name='issuer_githubUsername',
),
migrations.RemoveField(
model_name='draftbounty',
name='issuer_name',
),
migrations.RemoveField(
model_name='draftbounty',
name='sourceFileHash',
),
migrations.AddField(
model_name='bounty',
name='approvers',
field=models.ManyToManyField(related_name='std_bounties_bounty_relateda', to='user.User'),
),
migrations.AddField(
model_name='bounty',
name='contract_state',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
migrations.AddField(
model_name='bounty',
name='contract_version',
field=models.IntegerField(choices=[(1, 'v1'), (2, 'v2')], default=1),
),
migrations.AddField(
model_name='bounty',
name='fulfillment_amount',
field=models.DecimalField(decimal_places=0, default=0, max_digits=64),
),
migrations.AddField(
model_name='bounty',
name='issuers',
field=models.ManyToManyField(related_name='std_bounties_bounty_related', to='user.User'),
),
migrations.AddField(
model_name='bounty',
name='raw_event_data',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
migrations.AddField(
model_name='bounty',
name='raw_ipfs_data',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
migrations.AddField(
model_name='bounty',
name='token_version',
field=models.IntegerField(choices=[(0, 'Ether'), (20, 'ERC-20'), (721, 'ERC-721')], null=True),
),
migrations.AddField(
model_name='draftbounty',
name='approvers',
field=models.ManyToManyField(related_name='std_bounties_draftbounty_relateda', to='user.User'),
),
migrations.AddField(
model_name='draftbounty',
name='fulfillment_amount',
field=models.DecimalField(decimal_places=0, default=0, max_digits=64),
),
migrations.AddField(
model_name='draftbounty',
name='issuers',
field=models.ManyToManyField(related_name='std_bounties_draftbounty_related', to='user.User'),
),
migrations.AddField(
model_name='draftbounty',
name='token_version',
field=models.IntegerField(choices=[(0, 'Ether'), (20, 'ERC-20'), (721, 'ERC-721')], null=True),
),
migrations.AddField(
model_name='event',
name='contract_event_data',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
migrations.AddField(
model_name='fulfillment',
name='contract_version',
field=models.IntegerField(choices=[(1, 'v1'), (2, 'v2')], default=1),
),
migrations.AddField(
model_name='fulfillment',
name='fulfillers',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=128), null=True, size=None),
),
migrations.AlterField(
model_name='bounty',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='bounty',
name='platform',
field=models.CharField(blank=True, default='bounties-network', max_length=128),
),
migrations.AddField(
model_name='contribution',
name='bounty',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='std_bounties.Bounty'),
),
migrations.AddField(
model_name='contribution',
name='contributor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.User'),
),
]
| 35.217082
| 127
| 0.563359
|
e47bd7bf1eb15f605f6c73124f082d9c92a5c0d7
| 141,283
|
py
|
Python
|
mne/viz/_brain/_brain.py
|
Macquarie-MEG-Research/mne-python
|
469c56a8d1c4edb84852816301ecd43e8ff78ebf
|
[
"BSD-3-Clause"
] | null | null | null |
mne/viz/_brain/_brain.py
|
Macquarie-MEG-Research/mne-python
|
469c56a8d1c4edb84852816301ecd43e8ff78ebf
|
[
"BSD-3-Clause"
] | null | null | null |
mne/viz/_brain/_brain.py
|
Macquarie-MEG-Research/mne-python
|
469c56a8d1c4edb84852816301ecd43e8ff78ebf
|
[
"BSD-3-Clause"
] | null | null | null |
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Oleh Kozynets <ok7mailbox@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
# jona-sassenhagen <jona.sassenhagen@gmail.com>
# Joan Massich <mailsik@gmail.com>
#
# License: Simplified BSD
import contextlib
from functools import partial
from io import BytesIO
import os
import os.path as op
import sys
import time
import copy
import traceback
import warnings
import numpy as np
from collections import OrderedDict
from .colormap import calculate_lut
from .surface import _Surface
from .view import views_dicts, _lh_views_dict
from .mplcanvas import MplCanvas
from .callback import (ShowView, TimeCallBack, SmartCallBack, Widget,
UpdateLUT, UpdateColorbarScale)
from ..utils import _show_help, _get_color_list, concatenate_images
from .._3d import _process_clim, _handle_time, _check_views
from ...externals.decorator import decorator
from ...defaults import _handle_default
from ...surface import mesh_edges
from ...source_space import SourceSpaces, vertex_to_mni, read_talxfm
from ...transforms import apply_trans, invert_transform
from ...utils import (_check_option, logger, verbose, fill_doc, _validate_type,
use_log_level, Bunch, _ReuseCycle, warn,
get_subjects_dir)
_ARROW_MOVE = 10 # degrees per press
@decorator
def safe_event(fun, *args, **kwargs):
"""Protect against PyQt5 exiting on event-handling errors."""
try:
return fun(*args, **kwargs)
except Exception:
traceback.print_exc(file=sys.stderr)
class _Overlay(object):
def __init__(self, scalars, colormap, rng, opacity, name):
self._scalars = scalars
self._colormap = colormap
assert rng is not None
self._rng = rng
self._opacity = opacity
self._name = name
def to_colors(self):
from .._3d import _get_cmap
from matplotlib.colors import ListedColormap
if isinstance(self._colormap, str):
kind = self._colormap
cmap = _get_cmap(self._colormap)
else:
cmap = ListedColormap(self._colormap / 255.)
kind = str(type(self._colormap))
logger.debug(
f'Color mapping {repr(self._name)} with {kind} '
f'colormap and range {self._rng}')
rng = self._rng
assert rng is not None
scalars = _norm(self._scalars, rng)
colors = cmap(scalars)
if self._opacity is not None:
colors[:, 3] *= self._opacity
return colors
def _norm(x, rng):
if rng[0] == rng[1]:
factor = 1 if rng[0] == 0 else 1e-6 * rng[0]
else:
factor = rng[1] - rng[0]
return (x - rng[0]) / factor
class _LayeredMesh(object):
def __init__(self, renderer, vertices, triangles, normals):
self._renderer = renderer
self._vertices = vertices
self._triangles = triangles
self._normals = normals
self._polydata = None
self._actor = None
self._is_mapped = False
self._cache = None
self._overlays = OrderedDict()
self._default_scalars = np.ones(vertices.shape)
self._default_scalars_name = 'Data'
def map(self):
kwargs = {
"color": None,
"pickable": True,
"rgba": True,
}
mesh_data = self._renderer.mesh(
x=self._vertices[:, 0],
y=self._vertices[:, 1],
z=self._vertices[:, 2],
triangles=self._triangles,
normals=self._normals,
scalars=self._default_scalars,
**kwargs
)
self._actor, self._polydata = mesh_data
self._is_mapped = True
def _compute_over(self, B, A):
assert A.ndim == B.ndim == 2
assert A.shape[1] == B.shape[1] == 4
A_w = A[:, 3:] # * 1
B_w = B[:, 3:] * (1 - A_w)
C = A.copy()
C[:, :3] *= A_w
C[:, :3] += B[:, :3] * B_w
C[:, 3:] += B_w
C[:, :3] /= C[:, 3:]
return np.clip(C, 0, 1, out=C)
def _compose_overlays(self):
B = None
for overlay in self._overlays.values():
A = overlay.to_colors()
if B is None:
B = A
else:
B = self._compute_over(B, A)
return B
def add_overlay(self, scalars, colormap, rng, opacity, name):
overlay = _Overlay(
scalars=scalars,
colormap=colormap,
rng=rng,
opacity=opacity,
name=name,
)
self._overlays[name] = overlay
colors = overlay.to_colors()
# save colors in cache
if self._cache is None:
self._cache = colors
else:
self._cache = self._compute_over(self._cache, colors)
# update the texture
self._update()
def remove_overlay(self, names):
if not isinstance(names, list):
names = [names]
for name in names:
if name in self._overlays:
del self._overlays[name]
self.update()
def _update(self):
if self._cache is None or self._renderer is None:
return
self._renderer._set_mesh_scalars(
mesh=self._polydata,
scalars=self._cache,
name=self._default_scalars_name,
)
def update(self):
self._cache = self._compose_overlays()
self._update()
def _clean(self):
mapper = self._actor.GetMapper()
mapper.SetLookupTable(None)
self._actor.SetMapper(None)
self._actor = None
self._polydata = None
self._renderer = None
def update_overlay(self, name, scalars=None, colormap=None,
opacity=None, rng=None):
overlay = self._overlays.get(name, None)
if overlay is None:
return
if scalars is not None:
overlay._scalars = scalars
if colormap is not None:
overlay._colormap = colormap
if opacity is not None:
overlay._opacity = opacity
if rng is not None:
overlay._rng = rng
self.update()
@fill_doc
class Brain(object):
"""Class for visualizing a brain.
.. warning::
The API for this class is not currently complete. We suggest using
:meth:`mne.viz.plot_source_estimates` with the PyVista backend
enabled to obtain a ``Brain`` instance.
Parameters
----------
subject_id : str
Subject name in Freesurfer subjects dir.
hemi : str
Hemisphere id (ie 'lh', 'rh', 'both', or 'split'). In the case
of 'both', both hemispheres are shown in the same window.
In the case of 'split' hemispheres are displayed side-by-side
in different viewing panes.
surf : str
FreeSurfer surface mesh name (ie 'white', 'inflated', etc.).
title : str
Title for the window.
cortex : str or None
Specifies how the cortical surface is rendered.
The name of one of the preset cortex styles can be:
``'classic'`` (default), ``'high_contrast'``,
``'low_contrast'``, or ``'bone'`` or a valid color name.
Setting this to ``None`` is equivalent to ``(0.5, 0.5, 0.5)``.
alpha : float in [0, 1]
Alpha level to control opacity of the cortical surface.
size : int | array-like, shape (2,)
The size of the window, in pixels. can be one number to specify
a square window, or a length-2 sequence to specify (width, height).
background : tuple(int, int, int)
The color definition of the background: (red, green, blue).
foreground : matplotlib color
Color of the foreground (will be used for colorbars and text).
None (default) will use black or white depending on the value
of ``background``.
figure : list of Figure | None | int
If None (default), a new window will be created with the appropriate
views. For single view plots, the figure can be specified as int to
retrieve the corresponding Mayavi window.
subjects_dir : str | None
If not None, this directory will be used as the subjects directory
instead of the value set using the SUBJECTS_DIR environment
variable.
views : list | str
The views to use.
offset : bool | str
If True, shifts the right- or left-most x coordinate of the left and
right surfaces, respectively, to be at zero. This is useful for viewing
inflated surface where hemispheres typically overlap. Can be "auto"
(default) use True with inflated surfaces and False otherwise
(Default: 'auto'). Only used when ``hemi='both'``.
.. versionchanged:: 0.23
Default changed to "auto".
show_toolbar : bool
If True, toolbars will be shown for each view.
offscreen : bool
If True, rendering will be done offscreen (not shown). Useful
mostly for generating images or screenshots, but can be buggy.
Use at your own risk.
interaction : str
Can be "trackball" (default) or "terrain", i.e. a turntable-style
camera.
units : str
Can be 'm' or 'mm' (default).
%(view_layout)s
silhouette : dict | bool
As a dict, it contains the ``color``, ``linewidth``, ``alpha`` opacity
and ``decimate`` (level of decimation between 0 and 1 or None) of the
brain's silhouette to display. If True, the default values are used
and if False, no silhouette will be displayed. Defaults to False.
show : bool
Display the window as soon as it is ready. Defaults to True.
Attributes
----------
geo : dict
A dictionary of pysurfer.Surface objects for each hemisphere.
overlays : dict
The overlays.
Notes
-----
This table shows the capabilities of each Brain backend ("✓" for full
support, and "-" for partial support):
.. table::
:widths: auto
+---------------------------+--------------+---------------+
| 3D function: | surfer.Brain | mne.viz.Brain |
+===========================+==============+===============+
| add_annotation | ✓ | ✓ |
+---------------------------+--------------+---------------+
| add_data | ✓ | ✓ |
+---------------------------+--------------+---------------+
| add_foci | ✓ | ✓ |
+---------------------------+--------------+---------------+
| add_label | ✓ | ✓ |
+---------------------------+--------------+---------------+
| add_text | ✓ | ✓ |
+---------------------------+--------------+---------------+
| close | ✓ | ✓ |
+---------------------------+--------------+---------------+
| data | ✓ | ✓ |
+---------------------------+--------------+---------------+
| foci | ✓ | |
+---------------------------+--------------+---------------+
| labels | ✓ | ✓ |
+---------------------------+--------------+---------------+
| remove_foci | ✓ | |
+---------------------------+--------------+---------------+
| remove_labels | ✓ | ✓ |
+---------------------------+--------------+---------------+
| remove_annotations | - | ✓ |
+---------------------------+--------------+---------------+
| scale_data_colormap | ✓ | |
+---------------------------+--------------+---------------+
| save_image | ✓ | ✓ |
+---------------------------+--------------+---------------+
| save_movie | ✓ | ✓ |
+---------------------------+--------------+---------------+
| screenshot | ✓ | ✓ |
+---------------------------+--------------+---------------+
| show_view | ✓ | ✓ |
+---------------------------+--------------+---------------+
| TimeViewer | ✓ | ✓ |
+---------------------------+--------------+---------------+
| enable_depth_peeling | | ✓ |
+---------------------------+--------------+---------------+
| get_picked_points | | ✓ |
+---------------------------+--------------+---------------+
| add_data(volume) | | ✓ |
+---------------------------+--------------+---------------+
| view_layout | | ✓ |
+---------------------------+--------------+---------------+
| flatmaps | | ✓ |
+---------------------------+--------------+---------------+
| vertex picking | | ✓ |
+---------------------------+--------------+---------------+
| label picking | | ✓ |
+---------------------------+--------------+---------------+
"""
def __init__(self, subject_id, hemi, surf, title=None,
cortex="classic", alpha=1.0, size=800, background="black",
foreground=None, figure=None, subjects_dir=None,
views='auto', offset='auto', show_toolbar=False,
offscreen=False, interaction='trackball', units='mm',
view_layout='vertical', silhouette=False, show=True):
from ..backends.renderer import backend, _get_renderer, _get_3d_backend
from .._3d import _get_cmap
from matplotlib.colors import colorConverter
if hemi in ('both', 'split'):
self._hemis = ('lh', 'rh')
elif hemi in ('lh', 'rh'):
self._hemis = (hemi, )
else:
raise KeyError('hemi has to be either "lh", "rh", "split", '
'or "both"')
self._view_layout = _check_option('view_layout', view_layout,
('vertical', 'horizontal'))
if figure is not None and not isinstance(figure, int):
backend._check_3d_figure(figure)
if title is None:
self._title = subject_id
else:
self._title = title
self._interaction = 'trackball'
if isinstance(background, str):
background = colorConverter.to_rgb(background)
self._bg_color = background
if foreground is None:
foreground = 'w' if sum(self._bg_color) < 2 else 'k'
if isinstance(foreground, str):
foreground = colorConverter.to_rgb(foreground)
self._fg_color = foreground
if isinstance(views, str):
views = [views]
views = _check_views(surf, views, hemi)
col_dict = dict(lh=1, rh=1, both=1, split=2)
shape = (len(views), col_dict[hemi])
if self._view_layout == 'horizontal':
shape = shape[::-1]
self._subplot_shape = shape
size = tuple(np.atleast_1d(size).round(0).astype(int).flat)
if len(size) not in (1, 2):
raise ValueError('"size" parameter must be an int or length-2 '
'sequence of ints.')
self._size = size if len(size) == 2 else size * 2 # 1-tuple to 2-tuple
subjects_dir = get_subjects_dir(subjects_dir)
self.time_viewer = False
self.notebook = (_get_3d_backend() == "notebook")
self._hemi = hemi
self._units = units
self._alpha = float(alpha)
self._subject_id = subject_id
self._subjects_dir = subjects_dir
self._views = views
self._times = None
self._vertex_to_label_id = dict()
self._annotation_labels = dict()
self._labels = {'lh': list(), 'rh': list()}
self._unnamed_label_id = 0 # can only grow
self._annots = {'lh': list(), 'rh': list()}
self._layered_meshes = {}
self._elevation_rng = [15, 165] # range of motion of camera on theta
self._lut_locked = None
# default values for silhouette
self._silhouette = {
'color': self._bg_color,
'line_width': 2,
'alpha': alpha,
'decimate': 0.9,
}
_validate_type(silhouette, (dict, bool), 'silhouette')
if isinstance(silhouette, dict):
self._silhouette.update(silhouette)
self.silhouette = True
else:
self.silhouette = silhouette
# for now only one color bar can be added
# since it is the same for all figures
self._colorbar_added = False
# for now only one time label can be added
# since it is the same for all figures
self._time_label_added = False
# array of data used by TimeViewer
self._data = {}
self.geo = {}
self.set_time_interpolation('nearest')
geo_kwargs = self._cortex_colormap(cortex)
# evaluate at the midpoint of the used colormap
val = -geo_kwargs['vmin'] / (geo_kwargs['vmax'] - geo_kwargs['vmin'])
self._brain_color = _get_cmap(geo_kwargs['colormap'])(val)
# load geometry for one or both hemispheres as necessary
_validate_type(offset, (str, bool), 'offset')
if isinstance(offset, str):
_check_option('offset', offset, ('auto',), extra='when str')
offset = (surf == 'inflated')
offset = None if (not offset or hemi != 'both') else 0.0
self._renderer = _get_renderer(name=self._title, size=self._size,
bgcolor=background,
shape=shape,
fig=figure)
self.plotter = self._renderer.plotter
if self.notebook:
self.window = None
else:
self.window = self.plotter.app_window
self.window.signal_close.connect(self._clean)
self._setup_canonical_rotation()
for h in self._hemis:
# Initialize a Surface object as the geometry
geo = _Surface(subject_id, h, surf, subjects_dir, offset,
units=self._units, x_dir=self._rigid[0, :3])
# Load in the geometry and curvature
geo.load_geometry()
geo.load_curvature()
self.geo[h] = geo
for ri, ci, v in self._iter_views(h):
self._renderer.subplot(ri, ci)
if self._layered_meshes.get(h) is None:
mesh = _LayeredMesh(
renderer=self._renderer,
vertices=self.geo[h].coords,
triangles=self.geo[h].faces,
normals=self.geo[h].nn,
)
mesh.map() # send to GPU
mesh.add_overlay(
scalars=self.geo[h].bin_curv,
colormap=geo_kwargs["colormap"],
rng=[geo_kwargs["vmin"], geo_kwargs["vmax"]],
opacity=alpha,
name='curv',
)
self._layered_meshes[h] = mesh
# add metadata to the mesh for picking
mesh._polydata._hemi = h
else:
actor = self._layered_meshes[h]._actor
self._renderer.plotter.add_actor(actor)
if self.silhouette:
mesh = self._layered_meshes[h]
self._renderer._silhouette(
mesh=mesh._polydata,
color=self._silhouette["color"],
line_width=self._silhouette["line_width"],
alpha=self._silhouette["alpha"],
decimate=self._silhouette["decimate"],
)
self._renderer.set_camera(**views_dicts[h][v])
self.interaction = interaction
self._closed = False
if show:
self.show()
# update the views once the geometry is all set
for h in self._hemis:
for ri, ci, v in self._iter_views(h):
self.show_view(v, row=ri, col=ci, hemi=h)
if surf == 'flat':
self._renderer.set_interaction("rubber_band_2d")
def _setup_canonical_rotation(self):
from ...coreg import fit_matched_points, _trans_from_params
self._rigid = np.eye(4)
try:
xfm = read_talxfm(self._subject_id, self._subjects_dir)
except Exception:
return
# XYZ+origin + halfway
pts_tal = np.concatenate([np.eye(4)[:, :3], np.eye(3) * 0.5])
pts_subj = apply_trans(invert_transform(xfm), pts_tal)
# we fit with scaling enabled, but then discard it (we just need
# the rigid-body components)
params = fit_matched_points(pts_subj, pts_tal, scale=3, out='params')
self._rigid[:] = _trans_from_params((True, True, False), params[:6])
def setup_time_viewer(self, time_viewer=True, show_traces=True):
"""Configure the time viewer parameters.
Parameters
----------
time_viewer : bool
If True, enable widgets interaction. Defaults to True.
show_traces : bool
If True, enable visualization of time traces. Defaults to True.
Notes
-----
The keyboard shortcuts are the following:
'?': Display help window
'i': Toggle interface
's': Apply auto-scaling
'r': Restore original clim
'c': Clear all traces
'n': Shift the time forward by the playback speed
'b': Shift the time backward by the playback speed
'Space': Start/Pause playback
'Up': Decrease camera elevation angle
'Down': Increase camera elevation angle
'Left': Decrease camera azimuth angle
'Right': Increase camera azimuth angle
"""
from ..backends._utils import _qt_disable_paint
if self.time_viewer:
return
if not self._data:
raise ValueError("No data to visualize. See ``add_data``.")
self.time_viewer = time_viewer
self.orientation = list(_lh_views_dict.keys())
self.default_smoothing_range = [0, 15]
# Default configuration
self.playback = False
self.visibility = False
self.refresh_rate_ms = max(int(round(1000. / 60.)), 1)
self.default_scaling_range = [0.2, 2.0]
self.default_playback_speed_range = [0.01, 1]
self.default_playback_speed_value = 0.01
self.default_status_bar_msg = "Press ? for help"
self.default_label_extract_modes = {
"stc": ["mean", "max"],
"src": ["mean_flip", "pca_flip", "auto"],
}
self.default_trace_modes = ('vertex', 'label')
self.annot = None
self.label_extract_mode = None
all_keys = ('lh', 'rh', 'vol')
self.act_data_smooth = {key: (None, None) for key in all_keys}
self.color_list = _get_color_list()
# remove grey for better contrast on the brain
self.color_list.remove("#7f7f7f")
self.color_cycle = _ReuseCycle(self.color_list)
self.mpl_canvas = None
self.rms = None
self.picked_patches = {key: list() for key in all_keys}
self.picked_points = {key: list() for key in all_keys}
self.pick_table = dict()
self._spheres = list()
self._mouse_no_mvt = -1
self.callbacks = dict()
self.widgets = dict()
self.keys = ('fmin', 'fmid', 'fmax')
# Direct access parameters:
self.tool_bar = None
if self.notebook:
self.main_menu = None
self.status_bar = None
self.interactor = None
else:
self.main_menu = self.plotter.main_menu
self.status_bar = self.window.statusBar()
self.interactor = self.plotter.interactor
# Derived parameters:
self.playback_speed = self.default_playback_speed_value
_validate_type(show_traces, (bool, str, 'numeric'), 'show_traces')
self.interactor_fraction = 0.25
if isinstance(show_traces, str):
self.show_traces = True
self.separate_canvas = False
self.traces_mode = 'vertex'
if show_traces == 'separate':
self.separate_canvas = True
elif show_traces == 'label':
self.traces_mode = 'label'
else:
assert show_traces == 'vertex' # guaranteed above
else:
if isinstance(show_traces, bool):
self.show_traces = show_traces
else:
show_traces = float(show_traces)
if not 0 < show_traces < 1:
raise ValueError(
'show traces, if numeric, must be between 0 and 1, '
f'got {show_traces}')
self.show_traces = True
self.interactor_fraction = show_traces
self.traces_mode = 'vertex'
self.separate_canvas = False
del show_traces
self._configure_time_label()
self._configure_scalar_bar()
self._configure_shortcuts()
self._configure_picking()
self._configure_tool_bar()
self._configure_dock()
if self.notebook:
self._renderer.show()
self.mpl_canvas.show()
self.toggle_interface()
if not self.notebook:
self._configure_playback()
self._configure_menu()
self._configure_status_bar()
# show everything at the end
with _qt_disable_paint(self.plotter):
with self._ensure_minimum_sizes():
self.show()
self._update()
@safe_event
def _clean(self):
# resolve the reference cycle
self.clear_glyphs()
self.remove_annotations()
# clear init actors
for hemi in self._hemis:
self._layered_meshes[hemi]._clean()
self._clear_callbacks()
self._clear_widgets()
self.plotter._key_press_event_callbacks.clear()
if getattr(self, 'mpl_canvas', None) is not None:
self.mpl_canvas.clear()
if getattr(self, 'act_data_smooth', None) is not None:
for key in list(self.act_data_smooth.keys()):
self.act_data_smooth[key] = None
# XXX this should be done in PyVista
for renderer in self._renderer._all_renderers:
renderer.RemoveAllLights()
# app_window cannot be set to None because it is used in __del__
for key in ('lighting', 'interactor', '_RenderWindow'):
setattr(self.plotter, key, None)
# Qt LeaveEvent requires _Iren so we use _FakeIren instead of None
# to resolve the ref to vtkGenericRenderWindowInteractor
self.plotter._Iren = _FakeIren()
if getattr(self.plotter, 'scalar_bar', None) is not None:
self.plotter.scalar_bar = None
if getattr(self.plotter, 'picker', None) is not None:
self.plotter.picker = None
# XXX end PyVista
for key in ('plotter', 'main_menu', 'window', 'tool_bar',
'status_bar', 'interactor', 'mpl_canvas', 'time_actor',
'picked_renderer', 'act_data_smooth', '_iren',
'actions', 'widgets', 'geo', '_hemi_actors', '_data'):
setattr(self, key, None)
@contextlib.contextmanager
def _ensure_minimum_sizes(self):
"""Ensure that widgets respect the windows size."""
sz = self._size
adjust_mpl = (self.show_traces and
not self.separate_canvas and
not self.notebook)
if not adjust_mpl:
yield
else:
mpl_h = int(round((sz[1] * self.interactor_fraction) /
(1 - self.interactor_fraction)))
self.mpl_canvas.canvas.setMinimumSize(sz[0], mpl_h)
try:
yield
finally:
self.splitter.setSizes([sz[1], mpl_h])
# 1. Process events
self._renderer._process_events()
self._renderer._process_events()
# 2. Get the window size that accommodates the size
sz = self.plotter.app_window.size()
# 3. Call app_window.setBaseSize and resize (in pyvistaqt)
self.plotter.window_size = (sz.width(), sz.height())
# 4. Undo the min size setting and process events
self.plotter.interactor.setMinimumSize(0, 0)
self._renderer._process_events()
self._renderer._process_events()
# 5. Resize the window (again!) to the correct size
# (not sure why, but this is required on macOS at least)
self.plotter.window_size = (sz.width(), sz.height())
self._renderer._process_events()
self._renderer._process_events()
# sizes could change, update views
for hemi in ('lh', 'rh'):
for ri, ci, v in self._iter_views(hemi):
self.show_view(view=v, row=ri, col=ci)
self._renderer._process_events()
def toggle_interface(self, value=None):
"""Toggle the interface.
Parameters
----------
value : bool | None
If True, the widgets are shown and if False, they
are hidden. If None, the state of the widgets is
toggled. Defaults to None.
"""
if value is None:
self.visibility = not self.visibility
else:
self.visibility = value
# update tool bar and dock
with self._ensure_minimum_sizes():
if self.visibility:
self._renderer._dock_show()
self._renderer._tool_bar_update_button_icon(
name="visibility", icon_name="visibility_on")
else:
self._renderer._dock_hide()
self._renderer._tool_bar_update_button_icon(
name="visibility", icon_name="visibility_off")
self._update()
def apply_auto_scaling(self):
"""Detect automatically fitting scaling parameters."""
self._update_auto_scaling()
def restore_user_scaling(self):
"""Restore original scaling parameters."""
self._update_auto_scaling(restore=True)
def toggle_playback(self, value=None):
"""Toggle time playback.
Parameters
----------
value : bool | None
If True, automatic time playback is enabled and if False,
it's disabled. If None, the state of time playback is toggled.
Defaults to None.
"""
if value is None:
self.playback = not self.playback
else:
self.playback = value
# update tool bar icon
if self.playback:
self._renderer._tool_bar_update_button_icon(
name="play", icon_name="pause")
else:
self._renderer._tool_bar_update_button_icon(
name="play", icon_name="play")
if self.playback:
time_data = self._data['time']
max_time = np.max(time_data)
if self._current_time == max_time: # start over
self.set_time_point(0) # first index
self._last_tick = time.time()
def reset(self):
"""Reset view and time step."""
self.reset_view()
max_time = len(self._data['time']) - 1
if max_time > 0:
self.callbacks["time"](
self._data["initial_time_idx"],
update_widget=True,
)
self._update()
def set_playback_speed(self, speed):
"""Set the time playback speed.
Parameters
----------
speed : float
The speed of the playback.
"""
self.playback_speed = speed
@safe_event
def _play(self):
if self.playback:
try:
self._advance()
except Exception:
self.toggle_playback(value=False)
raise
def _advance(self):
this_time = time.time()
delta = this_time - self._last_tick
self._last_tick = time.time()
time_data = self._data['time']
times = np.arange(self._n_times)
time_shift = delta * self.playback_speed
max_time = np.max(time_data)
time_point = min(self._current_time + time_shift, max_time)
# always use linear here -- this does not determine the data
# interpolation mode, it just finds where we are (in time) in
# terms of the time indices
idx = np.interp(time_point, time_data, times)
self.callbacks["time"](idx, update_widget=True)
if time_point == max_time:
self.toggle_playback(value=False)
def _configure_time_label(self):
self.time_actor = self._data.get('time_actor')
if self.time_actor is not None:
self.time_actor.SetPosition(0.5, 0.03)
self.time_actor.GetTextProperty().SetJustificationToCentered()
self.time_actor.GetTextProperty().BoldOn()
def _configure_scalar_bar(self):
if self._colorbar_added:
scalar_bar = self.plotter.scalar_bar
scalar_bar.SetOrientationToVertical()
scalar_bar.SetHeight(0.6)
scalar_bar.SetWidth(0.05)
scalar_bar.SetPosition(0.02, 0.2)
def _configure_dock_time_widget(self, layout=None):
len_time = len(self._data['time']) - 1
if len_time < 1:
return
layout = self._renderer.dock_layout if layout is None else layout
hlayout = self._renderer._dock_add_layout(vertical=False)
self.widgets["min_time"] = Widget(
widget=self._renderer._dock_add_label(value="-", layout=hlayout),
notebook=self.notebook
)
self._renderer._dock_add_stretch(hlayout)
self.widgets["current_time"] = Widget(
widget=self._renderer._dock_add_label(value="x", layout=hlayout),
notebook=self.notebook,
)
self._renderer._dock_add_stretch(hlayout)
self.widgets["max_time"] = Widget(
widget=self._renderer._dock_add_label(value="+", layout=hlayout),
notebook=self.notebook,
)
if self.notebook:
from ..backends._notebook import _ipy_add_widget
_ipy_add_widget(layout, hlayout, self._renderer.dock_width)
else:
layout.addLayout(hlayout)
min_time = float(self._data['time'][0])
max_time = float(self._data['time'][-1])
self.widgets["min_time"].set_value(f"{min_time: .3f}")
self.widgets["max_time"].set_value(f"{max_time: .3f}")
self.widgets["current_time"].set_value(f"{self._current_time: .3f}")
def _configure_dock_playback_widget(self, name):
layout = self._renderer._dock_add_group_box(name)
len_time = len(self._data['time']) - 1
# Time widget
if len_time < 1:
self.callbacks["time"] = None
self.widgets["time"] = None
else:
self.callbacks["time"] = TimeCallBack(
brain=self,
callback=self.plot_time_line,
)
self.widgets["time"] = Widget(
widget=self._renderer._dock_add_slider(
name="Time (s)",
value=self._data['time_idx'],
rng=[0, len_time],
double=True,
callback=self.callbacks["time"],
compact=False,
layout=layout,
),
notebook=self.notebook,
)
self.callbacks["time"].widget = self.widgets["time"]
# Time labels
if len_time < 1:
self.widgets["min_time"] = None
self.widgets["max_time"] = None
self.widgets["current_time"] = None
else:
self._configure_dock_time_widget(layout)
self.callbacks["time"].label = self.widgets["current_time"]
# Playback speed widget
if len_time < 1:
self.callbacks["playback_speed"] = None
self.widgets["playback_speed"] = None
else:
self.callbacks["playback_speed"] = SmartCallBack(
callback=self.set_playback_speed,
)
self.widgets["playback_speed"] = Widget(
widget=self._renderer._dock_add_spin_box(
name="Speed",
value=self.default_playback_speed_value,
rng=self.default_playback_speed_range,
callback=self.callbacks["playback_speed"],
layout=layout,
),
notebook=self.notebook,
)
self.callbacks["playback_speed"].widget = \
self.widgets["playback_speed"]
# Time label
current_time = self._current_time
assert current_time is not None # should never be the case, float
time_label = self._data['time_label']
if callable(time_label):
current_time = time_label(current_time)
else:
current_time = time_label
if self.time_actor is not None:
self.time_actor.SetInput(current_time)
del current_time
def _configure_dock_orientation_widget(self, name):
layout = self._renderer._dock_add_group_box(name)
# Renderer widget
rends = [str(i) for i in range(len(self._renderer._all_renderers))]
if len(rends) > 1:
def select_renderer(idx):
idx = int(idx)
loc = self._renderer._index_to_loc(idx)
self.plotter.subplot(*loc)
self.callbacks["renderer"] = SmartCallBack(
callback=select_renderer,
)
self.widgets["renderer"] = Widget(
widget=self._renderer._dock_add_combo_box(
name="Renderer",
value="0",
rng=rends,
callback=self.callbacks["renderer"],
layout=layout,
),
notebook=self.notebook,
)
self.callbacks["renderer"].widget = \
self.widgets["renderer"]
# Use 'lh' as a reference for orientation for 'both'
if self._hemi == 'both':
hemis_ref = ['lh']
else:
hemis_ref = self._hemis
orientation_data = [None] * len(rends)
for hemi in hemis_ref:
for ri, ci, view in self._iter_views(hemi):
idx = self._renderer._loc_to_index((ri, ci))
if view == 'flat':
_data = None
else:
_data = dict(default=view, hemi=hemi, row=ri, col=ci)
orientation_data[idx] = _data
self.callbacks["orientation"] = ShowView(
brain=self,
data=orientation_data,
)
self.widgets["orientation"] = Widget(
widget=self._renderer._dock_add_combo_box(
name=None,
value=self.orientation[0],
rng=self.orientation,
callback=self.callbacks["orientation"],
layout=layout,
),
notebook=self.notebook,
)
def _configure_dock_colormap_widget(self, name):
layout = self._renderer._dock_add_group_box(name)
self._renderer._dock_add_label(
value="min / mid / max",
align=True,
layout=layout,
)
up = UpdateLUT(brain=self)
for key in self.keys:
hlayout = self._renderer._dock_add_layout(vertical=False)
rng = _get_range(self)
self.callbacks[key] = lambda value, key=key: up(**{key: value})
self.widgets[key] = Widget(
widget=self._renderer._dock_add_slider(
name=None,
value=self._data[key],
rng=rng,
callback=self.callbacks[key],
double=True,
layout=hlayout,
),
notebook=self.notebook,
)
self.widgets[f"entry_{key}"] = Widget(
widget=self._renderer._dock_add_spin_box(
name=None,
value=self._data[key],
callback=self.callbacks[key],
rng=rng,
layout=hlayout,
),
notebook=self.notebook,
)
up.widgets[key] = [self.widgets[key], self.widgets[f"entry_{key}"]]
if self.notebook:
from ..backends._notebook import _ipy_add_widget
_ipy_add_widget(layout, hlayout, self._renderer.dock_width)
else:
layout.addLayout(hlayout)
# reset / minus / plus
hlayout = self._renderer._dock_add_layout(vertical=False)
self._renderer._dock_add_label(
value="Rescale",
align=True,
layout=hlayout,
)
self.widgets["reset"] = Widget(
widget=self._renderer._dock_add_button(
name="↺",
callback=self.restore_user_scaling,
layout=hlayout,
),
notebook=self.notebook,
)
for key, char, val in (("fminus", "➖", 1.2 ** -0.25),
("fplus", "➕", 1.2 ** 0.25)):
self.callbacks[key] = UpdateColorbarScale(
brain=self,
factor=val,
)
self.widgets[key] = Widget(
widget=self._renderer._dock_add_button(
name=char,
callback=self.callbacks[key],
layout=hlayout,
),
notebook=self.notebook,
)
if self.notebook:
from ..backends._notebook import _ipy_add_widget
_ipy_add_widget(layout, hlayout, self._renderer.dock_width)
else:
layout.addLayout(hlayout)
# register colorbar slider representations
widgets = {key: self.widgets[key] for key in self.keys}
for name in ("fmin", "fmid", "fmax", "fminus", "fplus"):
self.callbacks[name].widgets = widgets
def _configure_dock_trace_widget(self, name):
if not self.show_traces:
return
if self.notebook:
self._configure_vertex_time_course()
return
# do not show trace mode for volumes
if (self._data.get('src', None) is not None and
self._data['src'].kind == 'volume'):
self._configure_vertex_time_course()
return
layout = self._renderer._dock_add_group_box(name)
# setup candidate annots
def _set_annot(annot):
self.clear_glyphs()
self.remove_labels()
self.remove_annotations()
self.annot = annot
if annot == 'None':
self.traces_mode = 'vertex'
self._configure_vertex_time_course()
else:
self.traces_mode = 'label'
self._configure_label_time_course()
self._update()
# setup label extraction parameters
def _set_label_mode(mode):
if self.traces_mode != 'label':
return
glyphs = copy.deepcopy(self.picked_patches)
self.label_extract_mode = mode
self.clear_glyphs()
for hemi in self._hemis:
for label_id in glyphs[hemi]:
label = self._annotation_labels[hemi][label_id]
vertex_id = label.vertices[0]
self._add_label_glyph(hemi, None, vertex_id)
self.mpl_canvas.axes.relim()
self.mpl_canvas.axes.autoscale_view()
self.mpl_canvas.update_plot()
self._update()
from ...source_estimate import _get_allowed_label_modes
from ...label import _read_annot_cands
dir_name = op.join(self._subjects_dir, self._subject_id, 'label')
cands = _read_annot_cands(dir_name, raise_error=False)
cands = cands + ['None']
self.annot = cands[0]
stc = self._data["stc"]
modes = _get_allowed_label_modes(stc)
if self._data["src"] is None:
modes = [m for m in modes if m not in
self.default_label_extract_modes["src"]]
self.label_extract_mode = modes[-1]
if self.traces_mode == 'vertex':
_set_annot('None')
else:
_set_annot(self.annot)
self.widgets["annotation"] = Widget(
widget=self._renderer._dock_add_combo_box(
name="Annotation",
value=self.annot,
rng=cands,
callback=_set_annot,
layout=layout,
),
notebook=self.notebook,
)
self.widgets["extract_mode"] = Widget(
widget=self._renderer._dock_add_combo_box(
name="Extract mode",
value=self.label_extract_mode,
rng=modes,
callback=_set_label_mode,
layout=layout,
),
notebook=self.notebook,
)
def _configure_dock(self):
self._renderer._dock_initialize()
self._configure_dock_playback_widget(name="Playback")
self._configure_dock_orientation_widget(name="Orientation")
self._configure_dock_colormap_widget(name="Color Limits")
self._configure_dock_trace_widget(name="Trace")
# Smoothing widget
self.callbacks["smoothing"] = SmartCallBack(
callback=self.set_data_smoothing,
)
self.widgets["smoothing"] = Widget(
widget=self._renderer._dock_add_spin_box(
name="Smoothing",
value=self._data['smoothing_steps'],
rng=self.default_smoothing_range,
callback=self.callbacks["smoothing"],
double=False,
),
notebook=self.notebook,
)
self.callbacks["smoothing"].widget = \
self.widgets["smoothing"]
self._renderer._dock_finalize()
def _configure_playback(self):
self.plotter.add_callback(self._play, self.refresh_rate_ms)
def _configure_mplcanvas(self):
ratio = (1 - self.interactor_fraction) / self.interactor_fraction
if self.notebook:
dpi = 96
w, h = self.plotter.window_size
else:
dpi = self.window.windowHandle().screen().logicalDotsPerInch()
w = self.interactor.geometry().width()
h = self.interactor.geometry().height()
h /= ratio
# Get the fractional components for the brain and mpl
self.mpl_canvas = MplCanvas(self, w / dpi, h / dpi, dpi,
self.notebook)
xlim = [np.min(self._data['time']),
np.max(self._data['time'])]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
self.mpl_canvas.axes.set(xlim=xlim)
if not self.notebook and not self.separate_canvas:
from PyQt5.QtWidgets import QSplitter
from PyQt5.QtCore import Qt
canvas = self.mpl_canvas.canvas
vlayout = self.plotter.frame.layout()
vlayout.removeWidget(self.interactor)
self.splitter = splitter = QSplitter(
orientation=Qt.Vertical, parent=self.plotter.frame)
vlayout.addWidget(splitter)
splitter.addWidget(self.interactor)
splitter.addWidget(canvas)
self.mpl_canvas.set_color(
bg_color=self._bg_color,
fg_color=self._fg_color,
)
if not self.notebook:
self.mpl_canvas.show()
def _configure_vertex_time_course(self):
if not self.show_traces:
return
if self.mpl_canvas is None:
self._configure_mplcanvas()
else:
self.clear_glyphs()
# plot RMS of the activation
y = np.concatenate(list(v[0] for v in self.act_data_smooth.values()
if v[0] is not None))
rms = np.linalg.norm(y, axis=0) / np.sqrt(len(y))
del y
self.rms, = self.mpl_canvas.axes.plot(
self._data['time'], rms,
lw=3, label='RMS', zorder=3, color=self._fg_color,
alpha=0.5, ls=':')
# now plot the time line
self.plot_time_line()
# then the picked points
for idx, hemi in enumerate(['lh', 'rh', 'vol']):
act_data = self.act_data_smooth.get(hemi, [None])[0]
if act_data is None:
continue
hemi_data = self._data[hemi]
vertices = hemi_data['vertices']
# simulate a picked renderer
if self._hemi in ('both', 'rh') or hemi == 'vol':
idx = 0
self.picked_renderer = self._renderer._all_renderers[idx]
# initialize the default point
if self._data['initial_time'] is not None:
# pick at that time
use_data = act_data[
:, [np.round(self._data['time_idx']).astype(int)]]
else:
use_data = act_data
ind = np.unravel_index(np.argmax(np.abs(use_data), axis=None),
use_data.shape)
if hemi == 'vol':
mesh = hemi_data['grid']
else:
mesh = self._layered_meshes[hemi]._polydata
vertex_id = vertices[ind[0]]
self._add_vertex_glyph(hemi, mesh, vertex_id)
def _configure_picking(self):
# get data for each hemi
from scipy import sparse
for idx, hemi in enumerate(['vol', 'lh', 'rh']):
hemi_data = self._data.get(hemi)
if hemi_data is not None:
act_data = hemi_data['array']
if act_data.ndim == 3:
act_data = np.linalg.norm(act_data, axis=1)
smooth_mat = hemi_data.get('smooth_mat')
vertices = hemi_data['vertices']
if hemi == 'vol':
assert smooth_mat is None
smooth_mat = sparse.csr_matrix(
(np.ones(len(vertices)),
(vertices, np.arange(len(vertices)))))
self.act_data_smooth[hemi] = (act_data, smooth_mat)
self._renderer._update_picking_callback(
self._on_mouse_move,
self._on_button_press,
self._on_button_release,
self._on_pick
)
def _save_movie_noname(self):
return self.save_movie(None)
def _screenshot(self):
from PIL import Image
img = self.screenshot(time_viewer=True)
def _save_image(fname, img):
Image.fromarray(img).save(fname)
if self.notebook:
fname = self._renderer.actions.get("screenshot_field").value
fname = self._renderer._get_screenshot_filename() \
if len(fname) == 0 else fname
_save_image(fname, img)
else:
try:
from pyvista.plotting.qt_plotting import FileDialog
except ImportError:
from pyvistaqt.plotting import FileDialog
FileDialog(
self.plotter.app_window,
callback=partial(_save_image, img=img)
)
def _configure_tool_bar(self):
self._renderer._tool_bar_load_icons()
self._renderer._tool_bar_initialize()
self._renderer._tool_bar_add_button(
name="screenshot",
desc="Take a screenshot",
func=self._screenshot,
)
self._renderer._tool_bar_add_text(
name="screenshot_field",
value=None,
placeholder="Type a file name",
)
self._renderer._tool_bar_add_button(
name="movie",
desc="Save movie...",
func=self._save_movie_noname,
)
self._renderer._tool_bar_add_button(
name="visibility",
desc="Toggle Visibility",
func=self.toggle_interface,
icon_name="visibility_on"
)
self._renderer._tool_bar_add_button(
name="play",
desc="Play/Pause",
func=self.toggle_playback,
)
self._renderer._tool_bar_add_button(
name="reset",
desc="Reset",
func=self.reset,
)
self._renderer._tool_bar_add_button(
name="scale",
desc="Auto-Scale",
func=self.apply_auto_scaling,
)
self._renderer._tool_bar_add_button(
name="clear",
desc="Clear traces",
func=self.clear_glyphs,
)
self._renderer._tool_bar_add_spacer()
self._renderer._tool_bar_add_button(
name="help",
desc="Help",
func=self.help,
)
self._renderer._tool_bar_finalize()
# Qt shortcuts
if not self.notebook:
self._renderer.actions["movie"].setShortcut("ctrl+shift+s")
self._renderer.actions["play"].setShortcut(" ")
self._renderer.actions["help"].setShortcut("?")
def _shift_time(self, op):
self.callbacks["time"](
value=(op(self._current_time, self.playback_speed)),
time_as_index=False,
update_widget=True,
)
def _rotate_azimuth(self, value):
azimuth = (self._renderer.figure._azimuth + value) % 360
self._renderer.set_camera(azimuth=azimuth, reset_camera=False)
def _rotate_elevation(self, value):
elevation = np.clip(
self._renderer.figure._elevation + value,
self._elevation_rng[0],
self._elevation_rng[1],
)
self._renderer.set_camera(elevation=elevation, reset_camera=False)
def _configure_shortcuts(self):
# First, we remove the default bindings:
self.plotter._key_press_event_callbacks.clear()
# Then, we add our own:
self.plotter.add_key_event("i", self.toggle_interface)
self.plotter.add_key_event("s", self.apply_auto_scaling)
self.plotter.add_key_event("r", self.restore_user_scaling)
self.plotter.add_key_event("c", self.clear_glyphs)
self.plotter.add_key_event("n", partial(self._shift_time,
op=lambda x, y: x + y))
self.plotter.add_key_event("b", partial(self._shift_time,
op=lambda x, y: x - y))
for key, func, sign in (("Left", self._rotate_azimuth, 1),
("Right", self._rotate_azimuth, -1),
("Up", self._rotate_elevation, 1),
("Down", self._rotate_elevation, -1)):
self.plotter.add_key_event(key, partial(func, sign * _ARROW_MOVE))
def _configure_menu(self):
# remove default picking menu
to_remove = list()
for action in self.main_menu.actions():
if action.text() == "Tools":
to_remove.append(action)
for action in to_remove:
self.main_menu.removeAction(action)
# add help menu
menu = self.main_menu.addMenu('Help')
menu.addAction('Show MNE key bindings\t?', self.help)
def _configure_status_bar(self):
from PyQt5.QtWidgets import QLabel, QProgressBar
self.status_msg = QLabel(self.default_status_bar_msg)
self.status_progress = QProgressBar()
self.status_bar.layout().addWidget(self.status_msg, 1)
self.status_bar.layout().addWidget(self.status_progress, 0)
self.status_progress.hide()
def _on_mouse_move(self, vtk_picker, event):
if self._mouse_no_mvt:
self._mouse_no_mvt -= 1
def _on_button_press(self, vtk_picker, event):
self._mouse_no_mvt = 2
def _on_button_release(self, vtk_picker, event):
if self._mouse_no_mvt > 0:
x, y = vtk_picker.GetEventPosition()
# programmatically detect the picked renderer
self.picked_renderer = self.plotter.iren.FindPokedRenderer(x, y)
# trigger the pick
self.plotter.picker.Pick(x, y, 0, self.picked_renderer)
self._mouse_no_mvt = 0
def _on_pick(self, vtk_picker, event):
if not self.show_traces:
return
# vtk_picker is a vtkCellPicker
cell_id = vtk_picker.GetCellId()
mesh = vtk_picker.GetDataSet()
if mesh is None or cell_id == -1 or not self._mouse_no_mvt:
return # don't pick
# 1) Check to see if there are any spheres along the ray
if len(self._spheres):
collection = vtk_picker.GetProp3Ds()
found_sphere = None
for ii in range(collection.GetNumberOfItems()):
actor = collection.GetItemAsObject(ii)
for sphere in self._spheres:
if any(a is actor for a in sphere._actors):
found_sphere = sphere
break
if found_sphere is not None:
break
if found_sphere is not None:
assert found_sphere._is_glyph
mesh = found_sphere
# 2) Remove sphere if it's what we have
if hasattr(mesh, "_is_glyph"):
self._remove_vertex_glyph(mesh)
return
# 3) Otherwise, pick the objects in the scene
try:
hemi = mesh._hemi
except AttributeError: # volume
hemi = 'vol'
else:
assert hemi in ('lh', 'rh')
if self.act_data_smooth[hemi][0] is None: # no data to add for hemi
return
pos = np.array(vtk_picker.GetPickPosition())
if hemi == 'vol':
# VTK will give us the point closest to the viewer in the vol.
# We want to pick the point with the maximum value along the
# camera-to-click array, which fortunately we can get "just"
# by inspecting the points that are sufficiently close to the
# ray.
grid = mesh = self._data[hemi]['grid']
vertices = self._data[hemi]['vertices']
coords = self._data[hemi]['grid_coords'][vertices]
scalars = grid.cell_arrays['values'][vertices]
spacing = np.array(grid.GetSpacing())
max_dist = np.linalg.norm(spacing) / 2.
origin = vtk_picker.GetRenderer().GetActiveCamera().GetPosition()
ori = pos - origin
ori /= np.linalg.norm(ori)
# the magic formula: distance from a ray to a given point
dists = np.linalg.norm(np.cross(ori, coords - pos), axis=1)
assert dists.shape == (len(coords),)
mask = dists <= max_dist
idx = np.where(mask)[0]
if len(idx) == 0:
return # weird point on edge of volume?
# useful for debugging the ray by mapping it into the volume:
# dists = dists - dists.min()
# dists = (1. - dists / dists.max()) * self._cmap_range[1]
# grid.cell_arrays['values'][vertices] = dists * mask
idx = idx[np.argmax(np.abs(scalars[idx]))]
vertex_id = vertices[idx]
# Naive way: convert pos directly to idx; i.e., apply mri_src_t
# shape = self._data[hemi]['grid_shape']
# taking into account the cell vs point difference (spacing/2)
# shift = np.array(grid.GetOrigin()) + spacing / 2.
# ijk = np.round((pos - shift) / spacing).astype(int)
# vertex_id = np.ravel_multi_index(ijk, shape, order='F')
else:
vtk_cell = mesh.GetCell(cell_id)
cell = [vtk_cell.GetPointId(point_id) for point_id
in range(vtk_cell.GetNumberOfPoints())]
vertices = mesh.points[cell]
idx = np.argmin(abs(vertices - pos), axis=0)
vertex_id = cell[idx[0]]
if self.traces_mode == 'label':
self._add_label_glyph(hemi, mesh, vertex_id)
else:
self._add_vertex_glyph(hemi, mesh, vertex_id)
def _add_label_glyph(self, hemi, mesh, vertex_id):
if hemi == 'vol':
return
label_id = self._vertex_to_label_id[hemi][vertex_id]
label = self._annotation_labels[hemi][label_id]
# remove the patch if already picked
if label_id in self.picked_patches[hemi]:
self._remove_label_glyph(hemi, label_id)
return
if hemi == label.hemi:
self.add_label(label, borders=True, reset_camera=False)
self.picked_patches[hemi].append(label_id)
def _remove_label_glyph(self, hemi, label_id):
label = self._annotation_labels[hemi][label_id]
label._line.remove()
self.color_cycle.restore(label._color)
self.mpl_canvas.update_plot()
self._layered_meshes[hemi].remove_overlay(label.name)
self.picked_patches[hemi].remove(label_id)
def _add_vertex_glyph(self, hemi, mesh, vertex_id):
if vertex_id in self.picked_points[hemi]:
return
# skip if the wrong hemi is selected
if self.act_data_smooth[hemi][0] is None:
return
color = next(self.color_cycle)
line = self.plot_time_course(hemi, vertex_id, color)
if hemi == 'vol':
ijk = np.unravel_index(
vertex_id, np.array(mesh.GetDimensions()) - 1, order='F')
# should just be GetCentroid(center), but apparently it's VTK9+:
# center = np.empty(3)
# voxel.GetCentroid(center)
voxel = mesh.GetCell(*ijk)
pts = voxel.GetPoints()
n_pts = pts.GetNumberOfPoints()
center = np.empty((n_pts, 3))
for ii in range(pts.GetNumberOfPoints()):
pts.GetPoint(ii, center[ii])
center = np.mean(center, axis=0)
else:
center = mesh.GetPoints().GetPoint(vertex_id)
del mesh
# from the picked renderer to the subplot coords
rindex = self._renderer._all_renderers.index(self.picked_renderer)
row, col = self._renderer._index_to_loc(rindex)
actors = list()
spheres = list()
for ri, ci, _ in self._iter_views(hemi):
self.plotter.subplot(ri, ci)
# Using _sphere() instead of renderer.sphere() for 2 reasons:
# 1) renderer.sphere() fails on Windows in a scenario where a lot
# of picking requests are done in a short span of time (could be
# mitigated with synchronization/delay?)
# 2) the glyph filter is used in renderer.sphere() but only one
# sphere is required in this function.
actor, sphere = self._renderer._sphere(
center=np.array(center),
color=color,
radius=4.0,
)
actors.append(actor)
spheres.append(sphere)
# add metadata for picking
for sphere in spheres:
sphere._is_glyph = True
sphere._hemi = hemi
sphere._line = line
sphere._actors = actors
sphere._color = color
sphere._vertex_id = vertex_id
self.picked_points[hemi].append(vertex_id)
self._spheres.extend(spheres)
self.pick_table[vertex_id] = spheres
return sphere
def _remove_vertex_glyph(self, mesh, render=True):
vertex_id = mesh._vertex_id
if vertex_id not in self.pick_table:
return
hemi = mesh._hemi
color = mesh._color
spheres = self.pick_table[vertex_id]
spheres[0]._line.remove()
self.mpl_canvas.update_plot()
self.picked_points[hemi].remove(vertex_id)
with warnings.catch_warnings(record=True):
# We intentionally ignore these in case we have traversed the
# entire color cycle
warnings.simplefilter('ignore')
self.color_cycle.restore(color)
for sphere in spheres:
# remove all actors
self.plotter.remove_actor(sphere._actors, render=render)
sphere._actors = None
self._spheres.pop(self._spheres.index(sphere))
self.pick_table.pop(vertex_id)
def clear_glyphs(self):
"""Clear the picking glyphs."""
if not self.time_viewer:
return
for sphere in list(self._spheres): # will remove itself, so copy
self._remove_vertex_glyph(sphere, render=False)
assert sum(len(v) for v in self.picked_points.values()) == 0
assert len(self.pick_table) == 0
assert len(self._spheres) == 0
for hemi in self._hemis:
for label_id in list(self.picked_patches[hemi]):
self._remove_label_glyph(hemi, label_id)
assert sum(len(v) for v in self.picked_patches.values()) == 0
if self.rms is not None:
self.rms.remove()
self.rms = None
self._update()
def plot_time_course(self, hemi, vertex_id, color):
"""Plot the vertex time course.
Parameters
----------
hemi : str
The hemisphere id of the vertex.
vertex_id : int
The vertex identifier in the mesh.
color : matplotlib color
The color of the time course.
Returns
-------
line : matplotlib object
The time line object.
"""
if self.mpl_canvas is None:
return
time = self._data['time'].copy() # avoid circular ref
mni = None
if hemi == 'vol':
hemi_str = 'V'
xfm = read_talxfm(
self._subject_id, self._subjects_dir)
if self._units == 'mm':
xfm['trans'][:3, 3] *= 1000.
ijk = np.unravel_index(
vertex_id, self._data[hemi]['grid_shape'], order='F')
src_mri_t = self._data[hemi]['grid_src_mri_t']
mni = apply_trans(np.dot(xfm['trans'], src_mri_t), ijk)
else:
hemi_str = 'L' if hemi == 'lh' else 'R'
try:
mni = vertex_to_mni(
vertices=vertex_id,
hemis=0 if hemi == 'lh' else 1,
subject=self._subject_id,
subjects_dir=self._subjects_dir
)
except Exception:
mni = None
if mni is not None:
mni = ' MNI: ' + ', '.join('%5.1f' % m for m in mni)
else:
mni = ''
label = "{}:{}{}".format(hemi_str, str(vertex_id).ljust(6), mni)
act_data, smooth = self.act_data_smooth[hemi]
if smooth is not None:
act_data = smooth[vertex_id].dot(act_data)[0]
else:
act_data = act_data[vertex_id].copy()
line = self.mpl_canvas.plot(
time,
act_data,
label=label,
lw=1.,
color=color,
zorder=4,
)
return line
def plot_time_line(self):
"""Add the time line to the MPL widget."""
if self.mpl_canvas is None:
return
if isinstance(self.show_traces, bool) and self.show_traces:
# add time information
current_time = self._current_time
if not hasattr(self, "time_line"):
self.time_line = self.mpl_canvas.plot_time_line(
x=current_time,
label='time',
color=self._fg_color,
lw=1,
)
self.time_line.set_xdata(current_time)
self.mpl_canvas.update_plot()
def help(self):
"""Display the help window."""
pairs = [
('?', 'Display help window'),
('i', 'Toggle interface'),
('s', 'Apply auto-scaling'),
('r', 'Restore original clim'),
('c', 'Clear all traces'),
('n', 'Shift the time forward by the playback speed'),
('b', 'Shift the time backward by the playback speed'),
('Space', 'Start/Pause playback'),
('Up', 'Decrease camera elevation angle'),
('Down', 'Increase camera elevation angle'),
('Left', 'Decrease camera azimuth angle'),
('Right', 'Increase camera azimuth angle'),
]
text1, text2 = zip(*pairs)
text1 = '\n'.join(text1)
text2 = '\n'.join(text2)
_show_help(
col1=text1,
col2=text2,
width=5,
height=2,
)
def _clear_callbacks(self):
if not hasattr(self, 'callbacks'):
return
for callback in self.callbacks.values():
if callback is not None:
for key in ('plotter', 'brain', 'callback',
'widget', 'widgets'):
setattr(callback, key, None)
self.callbacks.clear()
def _clear_widgets(self):
if not hasattr(self, 'widgets'):
return
for widget in self.widgets.values():
if widget is not None:
for key in ('triggered', 'valueChanged'):
setattr(widget, key, None)
self.widgets.clear()
@property
def interaction(self):
"""The interaction style."""
return self._interaction
@interaction.setter
def interaction(self, interaction):
"""Set the interaction style."""
_validate_type(interaction, str, 'interaction')
_check_option('interaction', interaction, ('trackball', 'terrain'))
for ri, ci, _ in self._iter_views('vol'): # will traverse all
self._renderer.subplot(ri, ci)
self._renderer.set_interaction(interaction)
def _cortex_colormap(self, cortex):
"""Return the colormap corresponding to the cortex."""
colormap_map = dict(classic=dict(colormap="Greys",
vmin=-1, vmax=2),
high_contrast=dict(colormap="Greys",
vmin=-.1, vmax=1.3),
low_contrast=dict(colormap="Greys",
vmin=-5, vmax=5),
bone=dict(colormap="bone_r",
vmin=-.2, vmax=2),
)
return colormap_map[cortex]
@verbose
def add_data(self, array, fmin=None, fmid=None, fmax=None,
thresh=None, center=None, transparent=False, colormap="auto",
alpha=1, vertices=None, smoothing_steps=None, time=None,
time_label="auto", colorbar=True,
hemi=None, remove_existing=None, time_label_size=None,
initial_time=None, scale_factor=None, vector_alpha=None,
clim=None, src=None, volume_options=0.4, colorbar_kwargs=None,
verbose=None):
"""Display data from a numpy array on the surface or volume.
This provides a similar interface to
:meth:`surfer.Brain.add_overlay`, but it displays
it with a single colormap. It offers more flexibility over the
colormap, and provides a way to display four-dimensional data
(i.e., a timecourse) or five-dimensional data (i.e., a
vector-valued timecourse).
.. note:: ``fmin`` sets the low end of the colormap, and is separate
from thresh (this is a different convention from
:meth:`surfer.Brain.add_overlay`).
Parameters
----------
array : numpy array, shape (n_vertices[, 3][, n_times])
Data array. For the data to be understood as vector-valued
(3 values per vertex corresponding to X/Y/Z surface RAS),
then ``array`` must be have all 3 dimensions.
If vectors with no time dimension are desired, consider using a
singleton (e.g., ``np.newaxis``) to create a "time" dimension
and pass ``time_label=None`` (vector values are not supported).
%(fmin_fmid_fmax)s
%(thresh)s
%(center)s
%(transparent)s
colormap : str, list of color, or array
Name of matplotlib colormap to use, a list of matplotlib colors,
or a custom look up table (an n x 4 array coded with RBGA values
between 0 and 255), the default "auto" chooses a default divergent
colormap, if "center" is given (currently "icefire"), otherwise a
default sequential colormap (currently "rocket").
alpha : float in [0, 1]
Alpha level to control opacity of the overlay.
vertices : numpy array
Vertices for which the data is defined (needed if
``len(data) < nvtx``).
smoothing_steps : int or None
Number of smoothing steps (smoothing is used if len(data) < nvtx)
The value 'nearest' can be used too. None (default) will use as
many as necessary to fill the surface.
time : numpy array
Time points in the data array (if data is 2D or 3D).
%(time_label)s
colorbar : bool
Whether to add a colorbar to the figure. Can also be a tuple
to give the (row, col) index of where to put the colorbar.
hemi : str | None
If None, it is assumed to belong to the hemisphere being
shown. If two hemispheres are being shown, an error will
be thrown.
remove_existing : bool
Not supported yet.
Remove surface added by previous "add_data" call. Useful for
conserving memory when displaying different data in a loop.
time_label_size : int
Font size of the time label (default 14).
initial_time : float | None
Time initially shown in the plot. ``None`` to use the first time
sample (default).
scale_factor : float | None (default)
The scale factor to use when displaying glyphs for vector-valued
data.
vector_alpha : float | None
Alpha level to control opacity of the arrows. Only used for
vector-valued data. If None (default), ``alpha`` is used.
clim : dict
Original clim arguments.
%(src_volume_options)s
colorbar_kwargs : dict | None
Options to pass to :meth:`pyvista.BasePlotter.add_scalar_bar`
(e.g., ``dict(title_font_size=10)``).
%(verbose)s
Notes
-----
If the data is defined for a subset of vertices (specified
by the "vertices" parameter), a smoothing method is used to interpolate
the data onto the high resolution surface. If the data is defined for
subsampled version of the surface, smoothing_steps can be set to None,
in which case only as many smoothing steps are applied until the whole
surface is filled with non-zeros.
Due to a Mayavi (or VTK) alpha rendering bug, ``vector_alpha`` is
clamped to be strictly < 1.
"""
_validate_type(transparent, bool, 'transparent')
_validate_type(vector_alpha, ('numeric', None), 'vector_alpha')
_validate_type(scale_factor, ('numeric', None), 'scale_factor')
# those parameters are not supported yet, only None is allowed
_check_option('thresh', thresh, [None])
_check_option('remove_existing', remove_existing, [None])
_validate_type(time_label_size, (None, 'numeric'), 'time_label_size')
if time_label_size is not None:
time_label_size = float(time_label_size)
if time_label_size < 0:
raise ValueError('time_label_size must be positive, got '
f'{time_label_size}')
hemi = self._check_hemi(hemi, extras=['vol'])
stc, array, vertices = self._check_stc(hemi, array, vertices)
array = np.asarray(array)
vector_alpha = alpha if vector_alpha is None else vector_alpha
self._data['vector_alpha'] = vector_alpha
self._data['scale_factor'] = scale_factor
# Create time array and add label if > 1D
if array.ndim <= 1:
time_idx = 0
else:
# check time array
if time is None:
time = np.arange(array.shape[-1])
else:
time = np.asarray(time)
if time.shape != (array.shape[-1],):
raise ValueError('time has shape %s, but need shape %s '
'(array.shape[-1])' %
(time.shape, (array.shape[-1],)))
self._data["time"] = time
if self._n_times is None:
self._times = time
elif len(time) != self._n_times:
raise ValueError("New n_times is different from previous "
"n_times")
elif not np.array_equal(time, self._times):
raise ValueError("Not all time values are consistent with "
"previously set times.")
# initial time
if initial_time is None:
time_idx = 0
else:
time_idx = self._to_time_index(initial_time)
# time label
time_label, _ = _handle_time(time_label, 's', time)
y_txt = 0.05 + 0.1 * bool(colorbar)
if array.ndim == 3:
if array.shape[1] != 3:
raise ValueError('If array has 3 dimensions, array.shape[1] '
'must equal 3, got %s' % (array.shape[1],))
fmin, fmid, fmax = _update_limits(
fmin, fmid, fmax, center, array
)
if colormap == 'auto':
colormap = 'mne' if center is not None else 'hot'
if smoothing_steps is None:
smoothing_steps = 7
elif smoothing_steps == 'nearest':
smoothing_steps = 0
elif isinstance(smoothing_steps, int):
if smoothing_steps < 0:
raise ValueError('Expected value of `smoothing_steps` is'
' positive but {} was given.'.format(
smoothing_steps))
else:
raise TypeError('Expected type of `smoothing_steps` is int or'
' NoneType but {} was given.'.format(
type(smoothing_steps)))
self._data['stc'] = stc
self._data['src'] = src
self._data['smoothing_steps'] = smoothing_steps
self._data['clim'] = clim
self._data['time'] = time
self._data['initial_time'] = initial_time
self._data['time_label'] = time_label
self._data['initial_time_idx'] = time_idx
self._data['time_idx'] = time_idx
self._data['transparent'] = transparent
# data specific for a hemi
self._data[hemi] = dict()
self._data[hemi]['glyph_dataset'] = None
self._data[hemi]['glyph_mapper'] = None
self._data[hemi]['glyph_actor'] = None
self._data[hemi]['array'] = array
self._data[hemi]['vertices'] = vertices
self._data['alpha'] = alpha
self._data['colormap'] = colormap
self._data['center'] = center
self._data['fmin'] = fmin
self._data['fmid'] = fmid
self._data['fmax'] = fmax
self.update_lut()
# 1) add the surfaces first
actor = None
for ri, ci, _ in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
if hemi in ('lh', 'rh'):
actor = self._layered_meshes[hemi]._actor
else:
src_vol = src[2:] if src.kind == 'mixed' else src
actor, _ = self._add_volume_data(hemi, src_vol, volume_options)
assert actor is not None # should have added one
# 2) update time and smoothing properties
# set_data_smoothing calls "set_time_point" for us, which will set
# _current_time
self.set_time_interpolation(self.time_interpolation)
self.set_data_smoothing(self._data['smoothing_steps'])
# 3) add the other actors
if colorbar is True:
# botto left by default
colorbar = (self._subplot_shape[0] - 1, 0)
for ri, ci, v in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
# Add the time label to the bottommost view
do = (ri, ci) == colorbar
if not self._time_label_added and time_label is not None and do:
time_actor = self._renderer.text2d(
x_window=0.95, y_window=y_txt,
color=self._fg_color,
size=time_label_size,
text=time_label(self._current_time),
justification='right'
)
self._data['time_actor'] = time_actor
self._time_label_added = True
if colorbar and not self._colorbar_added and do:
kwargs = dict(source=actor, n_labels=8, color=self._fg_color,
bgcolor=self._brain_color[:3])
kwargs.update(colorbar_kwargs or {})
self._renderer.scalarbar(**kwargs)
self._colorbar_added = True
self._renderer.set_camera(**views_dicts[hemi][v])
# 4) update the scalar bar and opacity
self.update_lut(alpha=alpha)
def _iter_views(self, hemi):
# which rows and columns each type of visual needs to be added to
if self._hemi == 'split':
hemi_dict = dict(lh=[0], rh=[1], vol=[0, 1])
else:
hemi_dict = dict(lh=[0], rh=[0], vol=[0])
for vi, view in enumerate(self._views):
if self._hemi == 'split':
view_dict = dict(lh=[vi], rh=[vi], vol=[vi, vi])
else:
view_dict = dict(lh=[vi], rh=[vi], vol=[vi])
if self._view_layout == 'vertical':
rows = view_dict # views are rows
cols = hemi_dict # hemis are columns
else:
rows = hemi_dict # hemis are rows
cols = view_dict # views are columns
for ri, ci in zip(rows[hemi], cols[hemi]):
yield ri, ci, view
def remove_labels(self):
"""Remove all the ROI labels from the image."""
for hemi in self._hemis:
mesh = self._layered_meshes[hemi]
for label in self._labels[hemi]:
mesh.remove_overlay(label.name)
self._labels[hemi].clear()
self._update()
def remove_annotations(self):
"""Remove all annotations from the image."""
for hemi in self._hemis:
mesh = self._layered_meshes[hemi]
mesh.remove_overlay(self._annots[hemi])
self._annots[hemi].clear()
self._update()
def _add_volume_data(self, hemi, src, volume_options):
_validate_type(src, SourceSpaces, 'src')
_check_option('src.kind', src.kind, ('volume',))
_validate_type(
volume_options, (dict, 'numeric', None), 'volume_options')
assert hemi == 'vol'
if not isinstance(volume_options, dict):
volume_options = dict(
resolution=float(volume_options) if volume_options is not None
else None)
volume_options = _handle_default('volume_options', volume_options)
allowed_types = (
['resolution', (None, 'numeric')],
['blending', (str,)],
['alpha', ('numeric', None)],
['surface_alpha', (None, 'numeric')],
['silhouette_alpha', (None, 'numeric')],
['silhouette_linewidth', ('numeric',)],
)
for key, types in allowed_types:
_validate_type(volume_options[key], types,
f'volume_options[{repr(key)}]')
extra_keys = set(volume_options) - set(a[0] for a in allowed_types)
if len(extra_keys):
raise ValueError(
f'volume_options got unknown keys {sorted(extra_keys)}')
blending = _check_option('volume_options["blending"]',
volume_options['blending'],
('composite', 'mip'))
alpha = volume_options['alpha']
if alpha is None:
alpha = 0.4 if self._data[hemi]['array'].ndim == 3 else 1.
alpha = np.clip(float(alpha), 0., 1.)
resolution = volume_options['resolution']
surface_alpha = volume_options['surface_alpha']
if surface_alpha is None:
surface_alpha = min(alpha / 2., 0.1)
silhouette_alpha = volume_options['silhouette_alpha']
if silhouette_alpha is None:
silhouette_alpha = surface_alpha / 4.
silhouette_linewidth = volume_options['silhouette_linewidth']
del volume_options
volume_pos = self._data[hemi].get('grid_volume_pos')
volume_neg = self._data[hemi].get('grid_volume_neg')
center = self._data['center']
if volume_pos is None:
xyz = np.meshgrid(
*[np.arange(s) for s in src[0]['shape']], indexing='ij')
dimensions = np.array(src[0]['shape'], int)
mult = 1000 if self._units == 'mm' else 1
src_mri_t = src[0]['src_mri_t']['trans'].copy()
src_mri_t[:3] *= mult
if resolution is not None:
resolution = resolution * mult / 1000. # to mm
del src, mult
coords = np.array([c.ravel(order='F') for c in xyz]).T
coords = apply_trans(src_mri_t, coords)
self.geo[hemi] = Bunch(coords=coords)
vertices = self._data[hemi]['vertices']
assert self._data[hemi]['array'].shape[0] == len(vertices)
# MNE constructs the source space on a uniform grid in MRI space,
# but mne coreg can change it to be non-uniform, so we need to
# use all three elements here
assert np.allclose(
src_mri_t[:3, :3], np.diag(np.diag(src_mri_t)[:3]))
spacing = np.diag(src_mri_t)[:3]
origin = src_mri_t[:3, 3] - spacing / 2.
scalars = np.zeros(np.prod(dimensions))
scalars[vertices] = 1. # for the outer mesh
grid, grid_mesh, volume_pos, volume_neg = \
self._renderer._volume(dimensions, origin, spacing, scalars,
surface_alpha, resolution, blending,
center)
self._data[hemi]['alpha'] = alpha # incorrectly set earlier
self._data[hemi]['grid'] = grid
self._data[hemi]['grid_mesh'] = grid_mesh
self._data[hemi]['grid_coords'] = coords
self._data[hemi]['grid_src_mri_t'] = src_mri_t
self._data[hemi]['grid_shape'] = dimensions
self._data[hemi]['grid_volume_pos'] = volume_pos
self._data[hemi]['grid_volume_neg'] = volume_neg
actor_pos, _ = self._renderer.plotter.add_actor(
volume_pos, reset_camera=False, name=None, culling=False)
if volume_neg is not None:
actor_neg, _ = self._renderer.plotter.add_actor(
volume_neg, reset_camera=False, name=None, culling=False)
else:
actor_neg = None
grid_mesh = self._data[hemi]['grid_mesh']
if grid_mesh is not None:
_, prop = self._renderer.plotter.add_actor(
grid_mesh, reset_camera=False, name=None, culling=False,
pickable=False)
prop.SetColor(*self._brain_color[:3])
prop.SetOpacity(surface_alpha)
if silhouette_alpha > 0 and silhouette_linewidth > 0:
for ri, ci, v in self._iter_views('vol'):
self._renderer.subplot(ri, ci)
self._renderer._silhouette(
mesh=grid_mesh.GetInput(),
color=self._brain_color[:3],
line_width=silhouette_linewidth,
alpha=silhouette_alpha,
)
return actor_pos, actor_neg
def add_label(self, label, color=None, alpha=1, scalar_thresh=None,
borders=False, hemi=None, subdir=None,
reset_camera=True):
"""Add an ROI label to the image.
Parameters
----------
label : str | instance of Label
Label filepath or name. Can also be an instance of
an object with attributes "hemi", "vertices", "name", and
optionally "color" and "values" (if scalar_thresh is not None).
color : matplotlib-style color | None
Anything matplotlib accepts: string, RGB, hex, etc. (default
"crimson").
alpha : float in [0, 1]
Alpha level to control opacity.
scalar_thresh : None | float
Threshold the label ids using this value in the label
file's scalar field (i.e. label only vertices with
scalar >= thresh).
borders : bool | int
Show only label borders. If int, specify the number of steps
(away from the true border) along the cortical mesh to include
as part of the border definition.
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown.
subdir : None | str
If a label is specified as name, subdir can be used to indicate
that the label file is in a sub-directory of the subject's
label directory rather than in the label directory itself (e.g.
for ``$SUBJECTS_DIR/$SUBJECT/label/aparc/lh.cuneus.label``
``brain.add_label('cuneus', subdir='aparc')``).
reset_camera : bool
If True, reset the camera view after adding the label. Defaults
to True.
Notes
-----
To remove previously added labels, run Brain.remove_labels().
"""
from matplotlib.colors import colorConverter
from ...label import read_label
if isinstance(label, str):
if color is None:
color = "crimson"
if os.path.isfile(label):
filepath = label
label = read_label(filepath)
hemi = label.hemi
label_name = os.path.basename(filepath).split('.')[1]
else:
hemi = self._check_hemi(hemi)
label_name = label
label_fname = ".".join([hemi, label_name, 'label'])
if subdir is None:
filepath = op.join(self._subjects_dir, self._subject_id,
'label', label_fname)
else:
filepath = op.join(self._subjects_dir, self._subject_id,
'label', subdir, label_fname)
if not os.path.exists(filepath):
raise ValueError('Label file %s does not exist'
% filepath)
label = read_label(filepath)
ids = label.vertices
scalars = label.values
else:
# try to extract parameters from label instance
try:
hemi = label.hemi
ids = label.vertices
if label.name is None:
label.name = 'unnamed' + str(self._unnamed_label_id)
self._unnamed_label_id += 1
label_name = str(label.name)
if color is None:
if hasattr(label, 'color') and label.color is not None:
color = label.color
else:
color = "crimson"
if scalar_thresh is not None:
scalars = label.values
except Exception:
raise ValueError('Label was not a filename (str), and could '
'not be understood as a class. The class '
'must have attributes "hemi", "vertices", '
'"name", and (if scalar_thresh is not None)'
'"values"')
hemi = self._check_hemi(hemi)
if scalar_thresh is not None:
ids = ids[scalars >= scalar_thresh]
scalars = np.zeros(self.geo[hemi].coords.shape[0])
scalars[ids] = 1
if self.time_viewer and self.show_traces \
and self.traces_mode == 'label':
stc = self._data["stc"]
src = self._data["src"]
tc = stc.extract_label_time_course(label, src=src,
mode=self.label_extract_mode)
tc = tc[0] if tc.ndim == 2 else tc[0, 0, :]
color = next(self.color_cycle)
line = self.mpl_canvas.plot(
self._data['time'], tc, label=label_name,
color=color)
else:
line = None
orig_color = color
color = colorConverter.to_rgba(color, alpha)
cmap = np.array([(0, 0, 0, 0,), color])
ctable = np.round(cmap * 255).astype(np.uint8)
for ri, ci, v in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
if borders:
n_vertices = scalars.size
edges = mesh_edges(self.geo[hemi].faces)
edges = edges.tocoo()
border_edges = scalars[edges.row] != scalars[edges.col]
show = np.zeros(n_vertices, dtype=np.int64)
keep_idx = np.unique(edges.row[border_edges])
if isinstance(borders, int):
for _ in range(borders):
keep_idx = np.in1d(
self.geo[hemi].faces.ravel(), keep_idx)
keep_idx.shape = self.geo[hemi].faces.shape
keep_idx = self.geo[hemi].faces[np.any(
keep_idx, axis=1)]
keep_idx = np.unique(keep_idx)
show[keep_idx] = 1
scalars *= show
mesh = self._layered_meshes[hemi]
mesh.add_overlay(
scalars=scalars,
colormap=ctable,
rng=[np.min(scalars), np.max(scalars)],
opacity=alpha,
name=label_name,
)
if reset_camera:
self._renderer.set_camera(**views_dicts[hemi][v])
if self.time_viewer and self.show_traces \
and self.traces_mode == 'label':
label._color = orig_color
label._line = line
self._labels[hemi].append(label)
self._update()
def add_foci(self, coords, coords_as_verts=False, map_surface=None,
scale_factor=1, color="white", alpha=1, name=None,
hemi=None, resolution=50):
"""Add spherical foci, possibly mapping to displayed surf.
The foci spheres can be displayed at the coordinates given, or
mapped through a surface geometry. In other words, coordinates
from a volume-based analysis in MNI space can be displayed on an
inflated average surface by finding the closest vertex on the
white surface and mapping to that vertex on the inflated mesh.
Parameters
----------
coords : ndarray, shape (n_coords, 3)
Coordinates in stereotaxic space (default) or array of
vertex ids (with ``coord_as_verts=True``).
coords_as_verts : bool
Whether the coords parameter should be interpreted as vertex ids.
map_surface : None
Surface to map coordinates through, or None to use raw coords.
scale_factor : float
Controls the size of the foci spheres (relative to 1cm).
color : matplotlib color code
HTML name, RBG tuple, or hex code.
alpha : float in [0, 1]
Opacity of focus gylphs.
name : str
Internal name to use.
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown. If two hemispheres are being shown, an error will
be thrown.
resolution : int
The resolution of the spheres.
"""
from matplotlib.colors import colorConverter
hemi = self._check_hemi(hemi, extras=['vol'])
# those parameters are not supported yet, only None is allowed
_check_option('map_surface', map_surface, [None])
# Figure out how to interpret the first parameter
if coords_as_verts:
coords = self.geo[hemi].coords[coords]
# Convert the color code
if not isinstance(color, tuple):
color = colorConverter.to_rgb(color)
if self._units == 'm':
scale_factor = scale_factor / 1000.
for ri, ci, v in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
self._renderer.sphere(center=coords, color=color,
scale=(10. * scale_factor),
opacity=alpha, resolution=resolution)
self._renderer.set_camera(**views_dicts[hemi][v])
def add_text(self, x, y, text, name=None, color=None, opacity=1.0,
row=-1, col=-1, font_size=None, justification=None):
"""Add a text to the visualization.
Parameters
----------
x : float
X coordinate.
y : float
Y coordinate.
text : str
Text to add.
name : str
Name of the text (text label can be updated using update_text()).
color : tuple
Color of the text. Default is the foreground color set during
initialization (default is black or white depending on the
background color).
opacity : float
Opacity of the text (default 1.0).
row : int
Row index of which brain to use.
col : int
Column index of which brain to use.
font_size : float | None
The font size to use.
justification : str | None
The text justification.
"""
# XXX: support `name` should be added when update_text/remove_text
# are implemented
# _check_option('name', name, [None])
self._renderer.text2d(x_window=x, y_window=y, text=text, color=color,
size=font_size, justification=justification)
def _configure_label_time_course(self):
from ...label import read_labels_from_annot
if not self.show_traces:
return
if self.mpl_canvas is None:
self._configure_mplcanvas()
else:
self.clear_glyphs()
self.traces_mode = 'label'
self.add_annotation(self.annot, color="w", alpha=0.75)
# now plot the time line
self.plot_time_line()
self.mpl_canvas.update_plot()
for hemi in self._hemis:
labels = read_labels_from_annot(
subject=self._subject_id,
parc=self.annot,
hemi=hemi,
subjects_dir=self._subjects_dir
)
self._vertex_to_label_id[hemi] = np.full(
self.geo[hemi].coords.shape[0], -1)
self._annotation_labels[hemi] = labels
for idx, label in enumerate(labels):
self._vertex_to_label_id[hemi][label.vertices] = idx
def add_annotation(self, annot, borders=True, alpha=1, hemi=None,
remove_existing=True, color=None, **kwargs):
"""Add an annotation file.
Parameters
----------
annot : str | tuple
Either path to annotation file or annotation name. Alternatively,
the annotation can be specified as a ``(labels, ctab)`` tuple per
hemisphere, i.e. ``annot=(labels, ctab)`` for a single hemisphere
or ``annot=((lh_labels, lh_ctab), (rh_labels, rh_ctab))`` for both
hemispheres. ``labels`` and ``ctab`` should be arrays as returned
by :func:`nibabel.freesurfer.io.read_annot`.
borders : bool | int
Show only label borders. If int, specify the number of steps
(away from the true border) along the cortical mesh to include
as part of the border definition.
alpha : float in [0, 1]
Alpha level to control opacity.
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown. If two hemispheres are being shown, data must exist
for both hemispheres.
remove_existing : bool
If True (default), remove old annotations.
color : matplotlib-style color code
If used, show all annotations in the same (specified) color.
Probably useful only when showing annotation borders.
**kwargs : dict
These are passed to the underlying
``mayavi.mlab.pipeline.surface`` call.
"""
from ...label import _read_annot
hemis = self._check_hemis(hemi)
# Figure out where the data is coming from
if isinstance(annot, str):
if os.path.isfile(annot):
filepath = annot
path = os.path.split(filepath)[0]
file_hemi, annot = os.path.basename(filepath).split('.')[:2]
if len(hemis) > 1:
if annot[:2] == 'lh.':
filepaths = [filepath, op.join(path, 'rh' + annot[2:])]
elif annot[:2] == 'rh.':
filepaths = [op.join(path, 'lh' + annot[2:], filepath)]
else:
raise RuntimeError('To add both hemispheres '
'simultaneously, filename must '
'begin with "lh." or "rh."')
else:
filepaths = [filepath]
else:
filepaths = []
for hemi in hemis:
filepath = op.join(self._subjects_dir,
self._subject_id,
'label',
".".join([hemi, annot, 'annot']))
if not os.path.exists(filepath):
raise ValueError('Annotation file %s does not exist'
% filepath)
filepaths += [filepath]
annots = []
for hemi, filepath in zip(hemis, filepaths):
# Read in the data
labels, cmap, _ = _read_annot(filepath)
annots.append((labels, cmap))
else:
annots = [annot] if len(hemis) == 1 else annot
annot = 'annotation'
for hemi, (labels, cmap) in zip(hemis, annots):
# Maybe zero-out the non-border vertices
self._to_borders(labels, hemi, borders)
# Handle null labels properly
cmap[:, 3] = 255
bgcolor = np.round(np.array(self._brain_color) * 255).astype(int)
bgcolor[-1] = 0
cmap[cmap[:, 4] < 0, 4] += 2 ** 24 # wrap to positive
cmap[cmap[:, 4] <= 0, :4] = bgcolor
if np.any(labels == 0) and not np.any(cmap[:, -1] <= 0):
cmap = np.vstack((cmap, np.concatenate([bgcolor, [0]])))
# Set label ids sensibly
order = np.argsort(cmap[:, -1])
cmap = cmap[order]
ids = np.searchsorted(cmap[:, -1], labels)
cmap = cmap[:, :4]
# Set the alpha level
alpha_vec = cmap[:, 3]
alpha_vec[alpha_vec > 0] = alpha * 255
# Override the cmap when a single color is used
if color is not None:
from matplotlib.colors import colorConverter
rgb = np.round(np.multiply(colorConverter.to_rgb(color), 255))
cmap[:, :3] = rgb.astype(cmap.dtype)
ctable = cmap.astype(np.float64)
for ri, ci, _ in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
mesh = self._layered_meshes[hemi]
mesh.add_overlay(
scalars=ids,
colormap=ctable,
rng=[np.min(ids), np.max(ids)],
opacity=alpha,
name=annot,
)
self._annots[hemi].append(annot)
if not self.time_viewer or self.traces_mode == 'vertex':
self._renderer._set_colormap_range(
mesh._actor, cmap.astype(np.uint8), None)
self._update()
def close(self):
"""Close all figures and cleanup data structure."""
self._closed = True
self._renderer.close()
def show(self):
"""Display the window."""
self._renderer.show()
def show_view(self, view=None, roll=None, distance=None, row=0, col=0,
hemi=None, align=True):
"""Orient camera to display view.
Parameters
----------
view : str | dict
String view, or a dict with azimuth and elevation.
roll : float | None
The roll.
distance : float | None
The distance.
row : int
The row to set.
col : int
The column to set.
hemi : str
Which hemi to use for string lookup (when in "both" mode).
align : bool
If True, consider view arguments relative to canonical MRI
directions (closest to MNI for the subject) rather than native MRI
space. This helps when MRIs are not in standard orientation (e.g.,
have large rotations).
"""
hemi = self._hemi if hemi is None else hemi
if hemi == 'split':
if (self._view_layout == 'vertical' and col == 1 or
self._view_layout == 'horizontal' and row == 1):
hemi = 'rh'
else:
hemi = 'lh'
if isinstance(view, str):
view = views_dicts[hemi].get(view)
view = view.copy()
if roll is not None:
view.update(roll=roll)
if distance is not None:
view.update(distance=distance)
self._renderer.subplot(row, col)
xfm = self._rigid if align else None
self._renderer.set_camera(**view, reset_camera=False, rigid=xfm)
self._update()
def reset_view(self):
"""Reset the camera."""
for h in self._hemis:
for ri, ci, v in self._iter_views(h):
self._renderer.subplot(ri, ci)
self._renderer.set_camera(**views_dicts[h][v],
reset_camera=False)
def save_image(self, filename, mode='rgb'):
"""Save view from all panels to disk.
Parameters
----------
filename : str
Path to new image file.
mode : str
Either 'rgb' or 'rgba' for values to return.
"""
self._renderer.screenshot(mode=mode, filename=filename)
@fill_doc
def screenshot(self, mode='rgb', time_viewer=False):
"""Generate a screenshot of current view.
Parameters
----------
mode : str
Either 'rgb' or 'rgba' for values to return.
%(brain_screenshot_time_viewer)s
Returns
-------
screenshot : array
Image pixel values.
"""
img = self._renderer.screenshot(mode)
if time_viewer and self.time_viewer and \
self.show_traces and \
not self.separate_canvas:
canvas = self.mpl_canvas.fig.canvas
canvas.draw_idle()
fig = self.mpl_canvas.fig
with BytesIO() as output:
# Need to pass dpi here so it uses the physical (HiDPI) DPI
# rather than logical DPI when saving in most cases.
# But when matplotlib uses HiDPI and VTK doesn't
# (e.g., macOS w/Qt 5.14+ and VTK9) then things won't work,
# so let's just calculate the DPI we need to get
# the correct size output based on the widths being equal
dpi = img.shape[1] / fig.get_size_inches()[0]
fig.savefig(output, dpi=dpi, format='raw',
facecolor=self._bg_color, edgecolor='none')
output.seek(0)
trace_img = np.reshape(
np.frombuffer(output.getvalue(), dtype=np.uint8),
newshape=(-1, img.shape[1], 4))[:, :, :3]
img = concatenate_images(
[img, trace_img], bgcolor=self._brain_color[:3])
return img
@contextlib.contextmanager
def _no_lut_update(self, why):
orig = self._lut_locked
self._lut_locked = why
try:
yield
finally:
self._lut_locked = orig
@fill_doc
def update_lut(self, fmin=None, fmid=None, fmax=None, alpha=None):
"""Update color map.
Parameters
----------
%(fmin_fmid_fmax)s
alpha : float | None
Alpha to use in the update.
"""
args = f'{fmin}, {fmid}, {fmax}, {alpha}'
if self._lut_locked is not None:
logger.debug(f'LUT update postponed with {args}')
return
logger.debug(f'Updating LUT with {args}')
center = self._data['center']
colormap = self._data['colormap']
transparent = self._data['transparent']
lims = {key: self._data[key] for key in ('fmin', 'fmid', 'fmax')}
_update_monotonic(lims, fmin=fmin, fmid=fmid, fmax=fmax)
assert all(val is not None for val in lims.values())
self._data.update(lims)
self._data['ctable'] = np.round(
calculate_lut(colormap, alpha=1., center=center,
transparent=transparent, **lims) *
255).astype(np.uint8)
# update our values
rng = self._cmap_range
ctable = self._data['ctable']
# in testing, no plotter; if colorbar=False, no scalar_bar
scalar_bar = getattr(
getattr(self._renderer, 'plotter', None), 'scalar_bar', None)
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
if hemi in self._layered_meshes:
mesh = self._layered_meshes[hemi]
mesh.update_overlay(name='data',
colormap=self._data['ctable'],
opacity=alpha,
rng=rng)
self._renderer._set_colormap_range(
mesh._actor, ctable, scalar_bar, rng,
self._brain_color)
scalar_bar = None
grid_volume_pos = hemi_data.get('grid_volume_pos')
grid_volume_neg = hemi_data.get('grid_volume_neg')
for grid_volume in (grid_volume_pos, grid_volume_neg):
if grid_volume is not None:
self._renderer._set_volume_range(
grid_volume, ctable, hemi_data['alpha'],
scalar_bar, rng)
scalar_bar = None
glyph_actor = hemi_data.get('glyph_actor')
if glyph_actor is not None:
for glyph_actor_ in glyph_actor:
self._renderer._set_colormap_range(
glyph_actor_, ctable, scalar_bar, rng)
scalar_bar = None
if self.time_viewer:
with self._no_lut_update(f'update_lut {args}'):
for key in ('fmin', 'fmid', 'fmax'):
self.callbacks[key](lims[key])
self._update()
def set_data_smoothing(self, n_steps):
"""Set the number of smoothing steps.
Parameters
----------
n_steps : int
Number of smoothing steps.
"""
from scipy import sparse
from ...morph import _hemi_morph
for hemi in ['lh', 'rh']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
if len(hemi_data['array']) >= self.geo[hemi].x.shape[0]:
continue
vertices = hemi_data['vertices']
if vertices is None:
raise ValueError(
'len(data) < nvtx (%s < %s): the vertices '
'parameter must not be None'
% (len(hemi_data), self.geo[hemi].x.shape[0]))
morph_n_steps = 'nearest' if n_steps == 0 else n_steps
maps = sparse.eye(len(self.geo[hemi].coords), format='csr')
with use_log_level(False):
smooth_mat = _hemi_morph(
self.geo[hemi].orig_faces,
np.arange(len(self.geo[hemi].coords)),
vertices, morph_n_steps, maps, warn=False)
self._data[hemi]['smooth_mat'] = smooth_mat
self.set_time_point(self._data['time_idx'])
self._data['smoothing_steps'] = n_steps
@property
def _n_times(self):
return len(self._times) if self._times is not None else None
@property
def time_interpolation(self):
"""The interpolation mode."""
return self._time_interpolation
@fill_doc
def set_time_interpolation(self, interpolation):
"""Set the interpolation mode.
Parameters
----------
%(brain_time_interpolation)s
"""
self._time_interpolation = _check_option(
'interpolation',
interpolation,
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic')
)
self._time_interp_funcs = dict()
self._time_interp_inv = None
if self._times is not None:
idx = np.arange(self._n_times)
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
array = hemi_data['array']
self._time_interp_funcs[hemi] = _safe_interp1d(
idx, array, self._time_interpolation, axis=-1,
assume_sorted=True)
self._time_interp_inv = _safe_interp1d(idx, self._times)
def set_time_point(self, time_idx):
"""Set the time point shown (can be a float to interpolate).
Parameters
----------
time_idx : int | float
The time index to use. Can be a float to use interpolation
between indices.
"""
self._current_act_data = dict()
time_actor = self._data.get('time_actor', None)
time_label = self._data.get('time_label', None)
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
array = hemi_data['array']
# interpolate in time
vectors = None
if array.ndim == 1:
act_data = array
self._current_time = 0
else:
act_data = self._time_interp_funcs[hemi](time_idx)
self._current_time = self._time_interp_inv(time_idx)
if array.ndim == 3:
vectors = act_data
act_data = np.linalg.norm(act_data, axis=1)
self._current_time = self._time_interp_inv(time_idx)
self._current_act_data[hemi] = act_data
if time_actor is not None and time_label is not None:
time_actor.SetInput(time_label(self._current_time))
# update the volume interpolation
grid = hemi_data.get('grid')
if grid is not None:
vertices = self._data['vol']['vertices']
values = self._current_act_data['vol']
rng = self._cmap_range
fill = 0 if self._data['center'] is not None else rng[0]
grid.cell_arrays['values'].fill(fill)
# XXX for sided data, we probably actually need two
# volumes as composite/MIP needs to look at two
# extremes... for now just use abs. Eventually we can add
# two volumes if we want.
grid.cell_arrays['values'][vertices] = values
# interpolate in space
smooth_mat = hemi_data.get('smooth_mat')
if smooth_mat is not None:
act_data = smooth_mat.dot(act_data)
# update the mesh scalar values
if hemi in self._layered_meshes:
mesh = self._layered_meshes[hemi]
if 'data' in mesh._overlays:
mesh.update_overlay(name='data', scalars=act_data)
else:
mesh.add_overlay(
scalars=act_data,
colormap=self._data['ctable'],
rng=self._cmap_range,
opacity=None,
name='data',
)
# update the glyphs
if vectors is not None:
self._update_glyphs(hemi, vectors)
self._data['time_idx'] = time_idx
self._update()
def set_time(self, time):
"""Set the time to display (in seconds).
Parameters
----------
time : float
The time to show, in seconds.
"""
if self._times is None:
raise ValueError(
'Cannot set time when brain has no defined times.')
elif min(self._times) <= time <= max(self._times):
self.set_time_point(np.interp(float(time), self._times,
np.arange(self._n_times)))
else:
raise ValueError(
f'Requested time ({time} s) is outside the range of '
f'available times ({min(self._times)}-{max(self._times)} s).')
def _update_glyphs(self, hemi, vectors):
hemi_data = self._data.get(hemi)
assert hemi_data is not None
vertices = hemi_data['vertices']
vector_alpha = self._data['vector_alpha']
scale_factor = self._data['scale_factor']
vertices = slice(None) if vertices is None else vertices
x, y, z = np.array(self.geo[hemi].coords)[vertices].T
if hemi_data['glyph_actor'] is None:
add = True
hemi_data['glyph_actor'] = list()
else:
add = False
count = 0
for ri, ci, _ in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
if hemi_data['glyph_dataset'] is None:
glyph_mapper, glyph_dataset = self._renderer.quiver3d(
x, y, z,
vectors[:, 0], vectors[:, 1], vectors[:, 2],
color=None,
mode='2darrow',
scale_mode='vector',
scale=scale_factor,
opacity=vector_alpha,
name=str(hemi) + "_glyph"
)
hemi_data['glyph_dataset'] = glyph_dataset
hemi_data['glyph_mapper'] = glyph_mapper
else:
glyph_dataset = hemi_data['glyph_dataset']
glyph_dataset.point_arrays['vec'] = vectors
glyph_mapper = hemi_data['glyph_mapper']
if add:
glyph_actor = self._renderer._actor(glyph_mapper)
prop = glyph_actor.GetProperty()
prop.SetLineWidth(2.)
prop.SetOpacity(vector_alpha)
self._renderer.plotter.add_actor(glyph_actor)
hemi_data['glyph_actor'].append(glyph_actor)
else:
glyph_actor = hemi_data['glyph_actor'][count]
count += 1
self._renderer._set_colormap_range(
actor=glyph_actor,
ctable=self._data['ctable'],
scalar_bar=None,
rng=self._cmap_range,
)
@property
def _cmap_range(self):
dt_max = self._data['fmax']
if self._data['center'] is None:
dt_min = self._data['fmin']
else:
dt_min = -1 * dt_max
rng = [dt_min, dt_max]
return rng
def _update_fscale(self, fscale):
"""Scale the colorbar points."""
fmin = self._data['fmin'] * fscale
fmid = self._data['fmid'] * fscale
fmax = self._data['fmax'] * fscale
self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax)
def _update_auto_scaling(self, restore=False):
user_clim = self._data['clim']
if user_clim is not None and 'lims' in user_clim:
allow_pos_lims = False
else:
allow_pos_lims = True
if user_clim is not None and restore:
clim = user_clim
else:
clim = 'auto'
colormap = self._data['colormap']
transparent = self._data['transparent']
mapdata = _process_clim(
clim, colormap, transparent,
np.concatenate(list(self._current_act_data.values())),
allow_pos_lims)
diverging = 'pos_lims' in mapdata['clim']
colormap = mapdata['colormap']
scale_pts = mapdata['clim']['pos_lims' if diverging else 'lims']
transparent = mapdata['transparent']
del mapdata
fmin, fmid, fmax = scale_pts
center = 0. if diverging else None
self._data['center'] = center
self._data['colormap'] = colormap
self._data['transparent'] = transparent
self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax)
def _to_time_index(self, value):
"""Return the interpolated time index of the given time value."""
time = self._data['time']
value = np.interp(value, time, np.arange(len(time)))
return value
@property
def data(self):
"""Data used by time viewer and color bar widgets."""
return self._data
@property
def labels(self):
return self._labels
@property
def views(self):
return self._views
@property
def hemis(self):
return self._hemis
def _save_movie(self, filename, time_dilation=4., tmin=None, tmax=None,
framerate=24, interpolation=None, codec=None,
bitrate=None, callback=None, time_viewer=False, **kwargs):
import imageio
with self._renderer._disabled_interaction():
images = self._make_movie_frames(
time_dilation, tmin, tmax, framerate, interpolation, callback,
time_viewer)
# find imageio FFMPEG parameters
if 'fps' not in kwargs:
kwargs['fps'] = framerate
if codec is not None:
kwargs['codec'] = codec
if bitrate is not None:
kwargs['bitrate'] = bitrate
imageio.mimwrite(filename, images, **kwargs)
@fill_doc
def save_movie(self, filename, time_dilation=4., tmin=None, tmax=None,
framerate=24, interpolation=None, codec=None,
bitrate=None, callback=None, time_viewer=False, **kwargs):
"""Save a movie (for data with a time axis).
The movie is created through the :mod:`imageio` module. The format is
determined by the extension, and additional options can be specified
through keyword arguments that depend on the format. For available
formats and corresponding parameters see the imageio documentation:
http://imageio.readthedocs.io/en/latest/formats.html#multiple-images
.. Warning::
This method assumes that time is specified in seconds when adding
data. If time is specified in milliseconds this will result in
movies 1000 times longer than expected.
Parameters
----------
filename : str
Path at which to save the movie. The extension determines the
format (e.g., ``'*.mov'``, ``'*.gif'``, ...; see the :mod:`imageio`
documentation for available formats).
time_dilation : float
Factor by which to stretch time (default 4). For example, an epoch
from -100 to 600 ms lasts 700 ms. With ``time_dilation=4`` this
would result in a 2.8 s long movie.
tmin : float
First time point to include (default: all data).
tmax : float
Last time point to include (default: all data).
framerate : float
Framerate of the movie (frames per second, default 24).
%(brain_time_interpolation)s
If None, it uses the current ``brain.interpolation``,
which defaults to ``'nearest'``. Defaults to None.
codec : str | None
The codec to use.
bitrate : float | None
The bitrate to use.
callback : callable | None
A function to call on each iteration. Useful for status message
updates. It will be passed keyword arguments ``frame`` and
``n_frames``.
%(brain_screenshot_time_viewer)s
**kwargs : dict
Specify additional options for :mod:`imageio`.
Returns
-------
dialog : object
The opened dialog is returned for testing purpose only.
"""
if self.time_viewer:
try:
from pyvista.plotting.qt_plotting import FileDialog
except ImportError:
from pyvistaqt.plotting import FileDialog
if filename is None:
self.status_msg.setText("Choose movie path ...")
self.status_msg.show()
self.status_progress.setValue(0)
def _post_setup(unused):
del unused
self.status_msg.hide()
self.status_progress.hide()
dialog = FileDialog(
self.plotter.app_window,
callback=partial(self._save_movie, **kwargs)
)
dialog.setDirectory(os.getcwd())
dialog.finished.connect(_post_setup)
return dialog
else:
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QCursor
def frame_callback(frame, n_frames):
if frame == n_frames:
# On the ImageIO step
self.status_msg.setText(
"Saving with ImageIO: %s"
% filename
)
self.status_msg.show()
self.status_progress.hide()
self.status_bar.layout().update()
else:
self.status_msg.setText(
"Rendering images (frame %d / %d) ..."
% (frame + 1, n_frames)
)
self.status_msg.show()
self.status_progress.show()
self.status_progress.setRange(0, n_frames - 1)
self.status_progress.setValue(frame)
self.status_progress.update()
self.status_progress.repaint()
self.status_msg.update()
self.status_msg.parent().update()
self.status_msg.repaint()
# set cursor to busy
default_cursor = self.interactor.cursor()
self.interactor.setCursor(QCursor(Qt.WaitCursor))
try:
self._save_movie(
filename=filename,
time_dilation=(1. / self.playback_speed),
callback=frame_callback,
**kwargs
)
except (Exception, KeyboardInterrupt):
warn('Movie saving aborted:\n' + traceback.format_exc())
finally:
self.interactor.setCursor(default_cursor)
else:
self._save_movie(filename, time_dilation, tmin, tmax,
framerate, interpolation, codec,
bitrate, callback, time_viewer, **kwargs)
def _make_movie_frames(self, time_dilation, tmin, tmax, framerate,
interpolation, callback, time_viewer):
from math import floor
# find tmin
if tmin is None:
tmin = self._times[0]
elif tmin < self._times[0]:
raise ValueError("tmin=%r is smaller than the first time point "
"(%r)" % (tmin, self._times[0]))
# find indexes at which to create frames
if tmax is None:
tmax = self._times[-1]
elif tmax > self._times[-1]:
raise ValueError("tmax=%r is greater than the latest time point "
"(%r)" % (tmax, self._times[-1]))
n_frames = floor((tmax - tmin) * time_dilation * framerate)
times = np.arange(n_frames, dtype=float)
times /= framerate * time_dilation
times += tmin
time_idx = np.interp(times, self._times, np.arange(self._n_times))
n_times = len(time_idx)
if n_times == 0:
raise ValueError("No time points selected")
logger.debug("Save movie for time points/samples\n%s\n%s"
% (times, time_idx))
# Sometimes the first screenshot is rendered with a different
# resolution on OS X
self.screenshot(time_viewer=time_viewer)
old_mode = self.time_interpolation
if interpolation is not None:
self.set_time_interpolation(interpolation)
try:
images = [
self.screenshot(time_viewer=time_viewer)
for _ in self._iter_time(time_idx, callback)]
finally:
self.set_time_interpolation(old_mode)
if callback is not None:
callback(frame=len(time_idx), n_frames=len(time_idx))
return images
def _iter_time(self, time_idx, callback):
"""Iterate through time points, then reset to current time.
Parameters
----------
time_idx : array_like
Time point indexes through which to iterate.
callback : callable | None
Callback to call before yielding each frame.
Yields
------
idx : int | float
Current index.
Notes
-----
Used by movie and image sequence saving functions.
"""
if self.time_viewer:
func = partial(self.callbacks["time"],
update_widget=True)
else:
func = self.set_time_point
current_time_idx = self._data["time_idx"]
for ii, idx in enumerate(time_idx):
func(idx)
if callback is not None:
callback(frame=ii, n_frames=len(time_idx))
yield idx
# Restore original time index
func(current_time_idx)
def _check_stc(self, hemi, array, vertices):
from ...source_estimate import (
_BaseSourceEstimate, _BaseSurfaceSourceEstimate,
_BaseMixedSourceEstimate, _BaseVolSourceEstimate
)
if isinstance(array, _BaseSourceEstimate):
stc = array
stc_surf = stc_vol = None
if isinstance(stc, _BaseSurfaceSourceEstimate):
stc_surf = stc
elif isinstance(stc, _BaseMixedSourceEstimate):
stc_surf = stc.surface() if hemi != 'vol' else None
stc_vol = stc.volume() if hemi == 'vol' else None
elif isinstance(stc, _BaseVolSourceEstimate):
stc_vol = stc if hemi == 'vol' else None
else:
raise TypeError("stc not supported")
if stc_surf is None and stc_vol is None:
raise ValueError("No data to be added")
if stc_surf is not None:
array = getattr(stc_surf, hemi + '_data')
vertices = stc_surf.vertices[0 if hemi == 'lh' else 1]
if stc_vol is not None:
array = stc_vol.data
vertices = np.concatenate(stc_vol.vertices)
else:
stc = None
return stc, array, vertices
def _check_hemi(self, hemi, extras=()):
"""Check for safe single-hemi input, returns str."""
if hemi is None:
if self._hemi not in ['lh', 'rh']:
raise ValueError('hemi must not be None when both '
'hemispheres are displayed')
else:
hemi = self._hemi
elif hemi not in ['lh', 'rh'] + list(extras):
extra = ' or None' if self._hemi in ['lh', 'rh'] else ''
raise ValueError('hemi must be either "lh" or "rh"' +
extra + ", got " + str(hemi))
return hemi
def _check_hemis(self, hemi):
"""Check for safe dual or single-hemi input, returns list."""
if hemi is None:
if self._hemi not in ['lh', 'rh']:
hemi = ['lh', 'rh']
else:
hemi = [self._hemi]
elif hemi not in ['lh', 'rh']:
extra = ' or None' if self._hemi in ['lh', 'rh'] else ''
raise ValueError('hemi must be either "lh" or "rh"' + extra)
else:
hemi = [hemi]
return hemi
def _to_borders(self, label, hemi, borders, restrict_idx=None):
"""Convert a label/parc to borders."""
if not isinstance(borders, (bool, int)) or borders < 0:
raise ValueError('borders must be a bool or positive integer')
if borders:
n_vertices = label.size
edges = mesh_edges(self.geo[hemi].orig_faces)
edges = edges.tocoo()
border_edges = label[edges.row] != label[edges.col]
show = np.zeros(n_vertices, dtype=np.int64)
keep_idx = np.unique(edges.row[border_edges])
if isinstance(borders, int):
for _ in range(borders):
keep_idx = np.in1d(
self.geo[hemi].orig_faces.ravel(), keep_idx)
keep_idx.shape = self.geo[hemi].orig_faces.shape
keep_idx = self.geo[hemi].orig_faces[
np.any(keep_idx, axis=1)]
keep_idx = np.unique(keep_idx)
if restrict_idx is not None:
keep_idx = keep_idx[np.in1d(keep_idx, restrict_idx)]
show[keep_idx] = 1
label *= show
def enable_depth_peeling(self):
"""Enable depth peeling."""
self._renderer.enable_depth_peeling()
def _update(self):
from ..backends import renderer
if renderer.get_3d_backend() in ['pyvista', 'notebook']:
if self.notebook and self._renderer.figure.display is not None:
self._renderer.figure.display.update_canvas()
else:
self._renderer.plotter.update()
def get_picked_points(self):
"""Return the vertices of the picked points.
Returns
-------
points : list of int | None
The vertices picked by the time viewer.
"""
if hasattr(self, "time_viewer"):
return self.picked_points
def __hash__(self):
"""Hash the object."""
raise NotImplementedError
def _safe_interp1d(x, y, kind='linear', axis=-1, assume_sorted=False):
"""Work around interp1d not liking singleton dimensions."""
from scipy.interpolate import interp1d
if y.shape[axis] == 1:
def func(x):
return np.take(y, np.zeros(np.asarray(x).shape, int), axis=axis)
return func
else:
return interp1d(x, y, kind, axis=axis, assume_sorted=assume_sorted)
def _update_limits(fmin, fmid, fmax, center, array):
if center is None:
if fmin is None:
fmin = array.min() if array.size > 0 else 0
if fmax is None:
fmax = array.max() if array.size > 0 else 1
else:
if fmin is None:
fmin = 0
if fmax is None:
fmax = np.abs(center - array).max() if array.size > 0 else 1
if fmid is None:
fmid = (fmin + fmax) / 2.
if fmin >= fmid:
raise RuntimeError('min must be < mid, got %0.4g >= %0.4g'
% (fmin, fmid))
if fmid >= fmax:
raise RuntimeError('mid must be < max, got %0.4g >= %0.4g'
% (fmid, fmax))
return fmin, fmid, fmax
def _update_monotonic(lims, fmin, fmid, fmax):
if fmin is not None:
lims['fmin'] = fmin
if lims['fmax'] < fmin:
logger.debug(f' Bumping fmax = {lims["fmax"]} to {fmin}')
lims['fmax'] = fmin
if lims['fmid'] < fmin:
logger.debug(f' Bumping fmid = {lims["fmid"]} to {fmin}')
lims['fmid'] = fmin
assert lims['fmin'] <= lims['fmid'] <= lims['fmax']
if fmid is not None:
lims['fmid'] = fmid
if lims['fmin'] > fmid:
logger.debug(f' Bumping fmin = {lims["fmin"]} to {fmid}')
lims['fmin'] = fmid
if lims['fmax'] < fmid:
logger.debug(f' Bumping fmax = {lims["fmax"]} to {fmid}')
lims['fmax'] = fmid
assert lims['fmin'] <= lims['fmid'] <= lims['fmax']
if fmax is not None:
lims['fmax'] = fmax
if lims['fmin'] > fmax:
logger.debug(f' Bumping fmin = {lims["fmin"]} to {fmax}')
lims['fmin'] = fmax
if lims['fmid'] > fmax:
logger.debug(f' Bumping fmid = {lims["fmid"]} to {fmax}')
lims['fmid'] = fmax
assert lims['fmin'] <= lims['fmid'] <= lims['fmax']
def _get_range(brain):
val = np.abs(np.concatenate(list(brain._current_act_data.values())))
return [np.min(val), np.max(val)]
class _FakeIren():
def EnterEvent(self):
pass
def MouseMoveEvent(self):
pass
def LeaveEvent(self):
pass
def SetEventInformation(self, *args, **kwargs):
pass
def CharEvent(self):
pass
def KeyPressEvent(self, *args, **kwargs):
pass
def KeyReleaseEvent(self, *args, **kwargs):
pass
| 39.876658
| 79
| 0.536901
|
f68efa741ce414163be4ac25ef7459e3707b9b36
| 37,284
|
py
|
Python
|
zeus/modules/operators/functions/tensorflow_fn.py
|
shaido987/vega
|
14d5d49fb8bdf96bd1f3fcfac201ce6b6712c3b6
|
[
"MIT"
] | 1
|
2021-05-08T07:47:44.000Z
|
2021-05-08T07:47:44.000Z
|
zeus/modules/operators/functions/tensorflow_fn.py
|
WholeG/vega
|
d1ccf1c3ce68a118bdb6775594ceed0f895911e7
|
[
"MIT"
] | null | null | null |
zeus/modules/operators/functions/tensorflow_fn.py
|
WholeG/vega
|
d1ccf1c3ce68a118bdb6775594ceed0f895911e7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Custom functions of tensorflow."""
import logging
import math
import numpy as np
from collections import OrderedDict
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import state_ops
from zeus.common.config import Config
from zeus.common.class_factory import ClassType, ClassFactory
from zeus.modules.operators.functions.serializable import OperatorSerializable
from zeus.common.general import General
class Module(object):
"""Base Module to adapter tf Module."""
def __init__(self):
self.name = ''
self.data_format = General.data_format
self._modules = Config()
self._parameters = OrderedDict()
self._weights_buffer = OrderedDict()
self._init_configs()
def _init_configs(self):
self._training = True
self._trainable = True
self.weight_file = None
self.from_weight_type = None
self._is_load_pretrained = False
self._is_adaptive_weight = False
self.exclude_weight_prefix = None
def add_module(self, name, model):
"""Add models into self._models."""
setattr(self, str(name), model)
def build(self):
"""Build model or params."""
pass
def named_modules(self):
"""Return names spaces."""
self._apply_names()
_modules = []
for module in self.children():
_modules.append((module.name, module))
_modules.extend(module.named_modules())
return _modules
def named_children(self):
"""Return names children."""
return [(name, module) for name, module in self._modules.items()]
def children(self):
"""Get child models of current Module."""
for model in self._modules.values():
yield model
def load_checkpoint(self, weight_file):
"""Load weight state dict from last checkpoint file."""
if not weight_file:
return
logging.info("Load checkpoint form file ({}).".format(weight_file))
# model_file = tf.train.latest_checkpoint(weight_file)
reader = tf.train.NewCheckpointReader(weight_file)
variables = reader.get_variable_to_shape_map()
states = {v: reader.get_tensor(v) for v in variables}
self.load_checkpoint_from_numpy(states)
def load_checkpoint_from_numpy(self, states):
"""Load checkpoint from numpy."""
states = self._exclude_checkpoint_by_prefix(states)
for name, module in self.named_modules():
child_state = [(k, v) for k, v in states.items() if k.startswith(module.name + '/')]
for k, v in child_state:
module.set_weights(k, v)
def _exclude_checkpoint_by_prefix(self, states):
if self.exclude_weight_prefix:
if not isinstance(self.exclude_weight_prefix, list):
self.exclude_weight_prefix = [self.exclude_weight_prefix]
for prefix in self.exclude_weight_prefix:
states = {k: v for k, v in states.items() if not k.startswith(prefix)}
return states
def set_weights(self, name, value):
"""Set weights into weights buffer."""
self._weights_buffer[name] = value
@property
def training(self):
"""Get training flag."""
return self._training
@training.setter
def training(self, value):
"""Set training flag."""
self._training = value
for module in self.children():
module.training = value
@property
def is_adaptive_weight(self):
"""Get _is_adaptive_weight flag."""
return self._is_adaptive_weight
@is_adaptive_weight.setter
def is_adaptive_weight(self, value):
"""Set _is_adaptive_weight flag."""
self._is_adaptive_weight = value
for module in self.children():
module.is_adaptive_weight = value
def freeze(self):
"""Set training flag."""
self._trainable = False
for module in self.children():
module.freeze()
def __setattr__(self, key, value):
"""Set name to modules."""
super().__setattr__(key, value)
if isinstance(value, Module):
self._modules[key] = value
def set_parameters(self, name, value):
"""Set Parameters."""
self._parameters[name] = value
setattr(self, name, value)
return self.name
def get_weights(self, name=None):
"""Get weights by name."""
if self._weights_buffer:
return self._weights_buffer
return tf.get_default_graph().get_tensor_by_name('{}:0'.format(name))
def get_all_weights(self):
"""Get all weights."""
all_weights = OrderedDict()
for child in self.children():
all_weights.update(child._weights_buffer)
if isinstance(child, Module):
all_weights.update(child.get_all_weights())
return all_weights
def get_weight_ops(self, name):
"""Get weight ops."""
all_weight = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
weight_ops = [t for t in all_weight if not t.name.startswith(name)]
return weight_ops
def call(self, inputs, *args, **kwarg):
"""Call inputs."""
output = inputs
for model in self.children():
output = model(output)
return output
def adaptive_weight(self, inputs):
"""Adaptive weight."""
return {}
def _apply_names(self, parent_name=''):
"""Apply names spaces."""
for scope_name, module in self._modules.items():
scope_name = '{}.{}'.format(parent_name, scope_name) if parent_name else scope_name
module.name = module.name or scope_name + '/' + module.__class__.__name__
module._apply_names(scope_name)
def _apply_parameters(self):
"""Apply names spaces."""
for name, params in self._parameters.items():
setattr(self, name, tf.Variable(params, name='{}.{}'.format(self.name, name) if self.name else name))
def __call__(self, inputs, *args, **kwargs):
"""Call call function."""
self.build()
self._apply_parameters()
self._apply_names()
for module in self.children():
module._is_load_pretrained = True
out = self.call(inputs, *args, **kwargs)
self._apply_weights(inputs)
return out
def _apply_weights(self, inputs):
if not self._weights_buffer:
return
variables = tf.get_collection(tf.GraphKeys.VARIABLES)
if self.is_adaptive_weight:
self._weights_buffer.update(self.adaptive_weight(inputs))
values = [(var, self._weights_buffer.get(var.name.replace(':0', ''))) for var in variables if
var.name.replace(':0', '') in self._weights_buffer]
for v, weight in values:
v._initializer_op = state_ops.assign(v, weight)
self._weights_buffer.clear()
def modules(self):
"""Get the current modules."""
if self._modules.values():
return self._modules.values()
else:
return [self]
@ClassFactory.register(ClassType.NETWORK)
class QuantizeConv2d(OperatorSerializable):
"""QuantizeConv2d Module inherit nn.Module."""
def __init__(self):
"""Construct Identity class."""
OperatorSerializable.__init__(self)
def call(self, inputs, **kwargs):
"""Call QuantizeConv2d function."""
# todo
return inputs
@ClassFactory.register(ClassType.NETWORK)
class Pad(Module, OperatorSerializable):
"""Pad layer."""
def __init__(self, kernel_size):
super(Pad, self).__init__()
self.kernel_size = kernel_size
def call(self, inputs, *args, **kwargs):
"""Call padding function."""
return inputs
class HeInitial(object):
"""Initialize of Hekaiming."""
def __init__(self, scale=0.1):
self.scale = scale
def __call__(self, tensor, **kwargs):
"""Call He_initial function."""
c, h, w = get_shape(tensor)[1:]
fan_in = c * h * w
std = math.sqrt(2) / math.sqrt(fan_in)
return tf.random_normal_initializer(0, std * self.scale)
@ClassFactory.register(ClassType.NETWORK)
class Conv2d(Module, OperatorSerializable):
"""Fuse and unified conv2d args."""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0, bias=True, groups=1,
dilation=1, separable=False, depthwise=False, padding_mode='same'):
super(Conv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.bias = bias
self.groups = groups
self.dilation = dilation
self.kernel_initial = tf.variance_scaling_initializer()
self.bias_initial = tf.zeros_initializer()
self._initializer = None
self.reuse = None
self.separable = separable
self.depthwise = depthwise
self.padding_mode = padding_mode
def call(self, inputs, **kwargs):
"""Call separable_conv2d function."""
if self._initializer:
self.kernel_initial = self._initializer(inputs)
if self.dilation > 1:
conv2d = tf.keras.layers.SeparableConv2D(filters=self.out_channels,
kernel_size=self.kernel_size,
strides=self.stride,
data_format=self.data_format,
dilation_rate=self.dilation,
padding=self.padding_mode,
use_bias=self.bias,
name=self.name, trainable=self._trainable)
else:
conv2d = tf.keras.layers.Conv2D(filters=self.out_channels,
kernel_size=self.kernel_size,
kernel_initializer=self.kernel_initial,
bias_initializer=self.bias_initial,
strides=self.stride,
data_format=self.data_format,
dilation_rate=self.dilation,
padding=self.padding_mode,
use_bias=self.bias,
name=self.name, trainable=self._trainable)
x = conv2d(inputs=inputs)
return x
def initial(self, kernel_mode='he', bias_mode='zero', kernel_scale=1., bias_scale=1.):
"""Initialize weight and bias."""
if kernel_mode == 'he':
self._initializer = HeInitial(kernel_scale)
def adaptive_weight(self, inputs):
"""Adaptive weight."""
res = OrderedDict()
for name, weight in self._weights_buffer.items():
in_channels = inputs.shape.as_list()[1]
w_in_shape = weight.shape[2]
if w_in_shape < in_channels:
weight = np.tile(weight, (2, 1))
elif w_in_shape > in_channels:
cut = list(range(w_in_shape)[:in_channels])
weight = weight[:, :, cut, :]
w_out_shape = weight.shape[3]
if w_out_shape < self.out_channels:
weight = np.tile(weight, (1, 2))
elif w_out_shape > self.out_channels:
cut = list(range(w_out_shape)[:self.out_channels])
weight = weight[:, :, :, cut]
res[name] = weight
return res
@ClassFactory.register(ClassType.NETWORK)
class SeparableConv2d(Module, OperatorSerializable):
"""Separable Conv2d args."""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0, dilation=1, bias=True):
super(SeparableConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.bias = bias
self.dilation = dilation
def call(self, input, **kwargs):
"""Call separable_conv2d function."""
model = tf.keras.layers.SeparableConv2D(filters=self.out_channels,
kernel_size=self.kernel_size,
strides=self.stride,
data_format=self.data_format,
dilation_rate=self.dilation,
depthwise_initializer=tf.variance_scaling_initializer(),
pointwise_initializer=tf.variance_scaling_initializer(),
padding='SAME', use_bias=self.bias,
name=self.name,
reuse=self.reuse, trainable=self._trainable)
return model(inputs=input)
@ClassFactory.register(ClassType.NETWORK)
class MaxPool2d(Module, OperatorSerializable):
"""Fuse and unified MaxPool2d args."""
def __init__(self, kernel_size, stride, padding=0):
super(MaxPool2d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
def call(self, input, **kwargs):
"""Call MaxPooling2D function."""
model = tf.layers.MaxPooling2D(pool_size=self.kernel_size, strides=self.stride,
data_format=self.data_format, padding='SAME', name=self.name,
trainable=self._trainable)
x = model(inputs=input)
return x
@ClassFactory.register(ClassType.NETWORK)
class Zero(Module, OperatorSerializable):
"""Class of Zero operation."""
def __init__(self, stride):
"""Init Zero."""
super(Zero, self).__init__()
self.stride = stride
def call(self, x, **kwargs):
"""Forward Function fo Zero."""
if self.stride == 1:
return tf.zeros_like(x)
if self.data_format == 'channels_first':
return tf.zeros_like(x)[:, :, ::self.stride, ::self.stride]
else:
return tf.zeros_like(x)[:, ::self.stride, ::self.stride, :]
@ClassFactory.register(ClassType.NETWORK)
class View(Module, OperatorSerializable):
"""Call squeeze."""
def __init__(self, size=None):
super(View, self).__init__()
self.size = size
def call(self, inputs, **kwargs):
"""Call squeeze function."""
if not self.size:
total_shape = 1
for _shape in inputs.get_shape()[1:]:
total_shape *= _shape
return tf.reshape(inputs, [-1, total_shape])
else:
self.size = list(self.size)
return tf.reshape(inputs, self.size)
@ClassFactory.register(ClassType.NETWORK)
class Relu(Module, OperatorSerializable):
"""Call relu."""
def __init__(self, inplace=False):
super(Relu, self).__init__()
self.inplace = inplace
def call(self, input, **kwargs):
"""Call relu function."""
return tf.nn.relu(input)
@ClassFactory.register(ClassType.NETWORK)
class Relu6(Module, OperatorSerializable):
"""Call relu6."""
def __init__(self, inplace=False):
super(Relu6, self).__init__()
self.inplace = inplace
def call(self, input, **kwargs):
"""Call relu6 function."""
return tf.nn.relu6(input)
@ClassFactory.register(ClassType.NETWORK)
class Hswish(Module, OperatorSerializable):
"""Call Hswish."""
def __init__(self, inplace=False):
super(Hswish, self).__init__()
self.inplace = inplace
def call(self, input, **kwargs):
"""Call Hswish function."""
return input * tf.nn.relu6(input + 3.) / 6.
@ClassFactory.register(ClassType.NETWORK)
class Hsigmoid(Module, OperatorSerializable):
"""Call Hsigmoid."""
def __init__(self, inplace=False):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def call(self, input, **kwargs):
"""Call Hsigmoid function."""
return tf.nn.relu6(input + 3.) / 6.
@ClassFactory.register(ClassType.NETWORK)
class AdaptiveAvgPool2d(Module, OperatorSerializable):
"""Call reduce_mean."""
def __init__(self, output_size=(1, 1)):
super(AdaptiveAvgPool2d, self).__init__()
self.output_size = output_size
def call(self, input, **kwargs):
"""Call reduce_mean function."""
axes = [2, 3] if self.data_format == 'channels_first' else [1, 2]
return tf.reduce_mean(input, axes, keepdims=True)
@ClassFactory.register(ClassType.NETWORK)
class Linear(Module, OperatorSerializable):
"""Call dense."""
def __init__(self, in_features=None, out_features=None, use_bias=True, activation=None):
super(Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.use_bias = use_bias
self.activation = activation
def call(self, input, **kwargs):
"""Call dense function."""
fc = tf.keras.layers.Dense(units=self.out_features, use_bias=self.use_bias, name=self.name,
activation=self.activation)
out = fc(inputs=input)
return out
def adaptive_weight(self, inputs):
"""Adaptive weight."""
self.in_features = inputs.shape.as_list()[1]
res = OrderedDict()
for name, weight in self.get_weights().items():
if 'kernel' in name:
if weight.shape[0] < self.in_features:
res[name] = np.tile(weight, (2, 1))
elif weight.shape[0] > self.in_features:
idx = list(range(weight.shape[0])[:self.in_features])
res[name] = weight[idx, :]
if weight.shape[1] < self.out_features:
res[name] = np.tile(weight, (1, 2))
elif weight.shape[1] > self.out_features:
idx = list(range(weight.shape[1])[:self.out_features])
res[name] = weight[:, idx]
elif 'bias' in name:
if weight.shape[0] < self.out_features:
res[name] = np.tile(weight, 2)
elif weight.shape[0] > self.out_features:
idx = list(range(weight.shape[0])[:self.out_features])
res[name] = weight[idx]
return res
@ClassFactory.register(ClassType.NETWORK)
class AvgPool2d(Module, OperatorSerializable):
"""Call average_pooling2d."""
def __init__(self, kernel_size, stride, padding=0, count_include_pad=True):
super(AvgPool2d, self).__init__()
if not stride:
stride = kernel_size
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.count_include_pad = count_include_pad
def call(self, input, **kwargs):
"""Call average_pooling2d function."""
return tf.keras.layers.AveragePooling2D(pool_size=self.kernel_size,
strides=self.stride,
data_format=self.data_format,
padding='SAME',
name=self.name, trainable=self._trainable)(input)
@ClassFactory.register(ClassType.NETWORK)
class BatchNorm2d(Module, OperatorSerializable):
"""Call batch_normalization."""
def __init__(self, num_features=None, eps=1e-05, momentum=0.997, affine=None):
super(BatchNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.training = affine if affine is not None else self.training
self.affine = affine
def call(self, input, **kwargs):
"""Call batch_normalization function."""
bn = tf.keras.layers.BatchNormalization(momentum=self.momentum,
axis=1 if self.data_format == 'channels_first' else 3,
epsilon=self.eps,
center=True, scale=True, fused=True,
name=self.name, trainable=self._trainable)
if self._is_load_pretrained:
self.training = True
out = bn(inputs=input, training=self.training)
# update moving average
if self._trainable:
for item in bn.updates:
tf.add_to_collections(tf.GraphKeys.UPDATE_OPS, item)
return out
def adaptive_weight(self, input):
"""Adaptive weight."""
self.num_features = input.shape.as_list()[1]
res = OrderedDict()
for name, weight in self.get_weights().items():
if weight.shape[0] < self.num_features:
res[name] = np.tile(weight, 2)
elif weight.shape[0] > self.num_features:
idx = list(range(weight.shape[0])[:self.num_features])
res[name] = weight[idx]
return res
@ClassFactory.register(ClassType.NETWORK)
class Identity(Module, OperatorSerializable):
"""Class of Identity operation."""
def __init__(self):
"""Init Identity."""
super(Identity, self).__init__()
def call(self, x, **kwargs):
"""Forward function of Identity."""
return tf.identity(x)
@ClassFactory.register(ClassType.NETWORK)
class Dropout(Module, OperatorSerializable):
"""Class of Dropout."""
def __init__(self, prob=0.5, inplace=False):
"""Construct Dropout class."""
super(Dropout, self).__init__(prob, inplace)
self.dropout = tf.keras.layers.Dropout(prob)
def call(self, x, **kwargs):
"""Call Dropout function."""
out = self.dropout(x)
return out
@ClassFactory.register(ClassType.NETWORK)
class Tanh(Module, OperatorSerializable):
"""Class of Dropout."""
def call(self, x, **kwargs):
"""Forward Tanh."""
return super(Tanh, self).forward(x)
@ClassFactory.register(ClassType.NETWORK)
class Embedding(Module, OperatorSerializable):
"""Class of Embedding."""
def __init__(self, num_embeddings, embedding_dim):
super(Embedding, self).__init__()
self.embedding = tf.keras.layers.Embedding(num_embeddings, embedding_dim, )
def call(self, x, **kwargs):
"""Call embedding."""
return self.embedding(x)
@ClassFactory.register(ClassType.NETWORK)
class PixelShuffle(Module, OperatorSerializable):
"""Class of PixelShuffle."""
def __init__(self, upscale):
super(PixelShuffle, self).__init__()
self.upscale = upscale
def call(self, inputs, **kwargs):
"""Forward function of PixelShuffle."""
inputs = tf.cast(inputs, tf.float16)
if self.data_format == 'channels_first':
inputs = tf.transpose(inputs, [0, 2, 3, 1])
outputs = tf.nn.depth_to_space(inputs, self.upscale, data_format='NHWC')
if self.data_format == 'channels_first':
outputs = tf.transpose(outputs, [0, 3, 1, 2])
outputs = tf.cast(outputs, tf.float32)
return outputs
@ClassFactory.register(ClassType.NETWORK)
class Split(Module, OperatorSerializable):
"""Class of Split."""
def __init__(self, size=None, dim=0):
super(Split, self).__init__()
self.size = size
self.dim = dim
def call(self, inputs, **kwargs):
"""Forward function of Split."""
length = inputs.shape[self.dim]
number = length // self.size
return tf.split(inputs, number, self.dim)
@ClassFactory.register(ClassType.NETWORK)
class Squeeze(Module, OperatorSerializable):
"""Class of Squeeze."""
def __init__(self, dim=0):
self.dim = dim
super(Squeeze, self).__init__()
def call(self, inputs, **kwargs):
"""Forward function of squeeze."""
return tf.squeeze(inputs, [self.dim])
@ClassFactory.register(ClassType.NETWORK)
class Permute(Module, OperatorSerializable):
"""Class of Permute."""
def __init__(self, size=None):
super(Permute, self).__init__()
self.size = size
def call(self, inputs, **kwargs):
"""Forward function of Permute."""
return tf.transpose(inputs, self.size)
@ClassFactory.register(ClassType.NETWORK)
class Stack(Module, OperatorSerializable):
"""Class of Stack."""
def __init__(self, dim=0):
super(Stack, self).__init__()
self.dim = dim
def call(self, inputs, **kwargs):
"""Forward function of Stack."""
return tf.stack(inputs, self.dim)
@ClassFactory.register(ClassType.NETWORK)
class Transpose(Module, OperatorSerializable):
"""Class of Transpose."""
def __init__(self, dim1=0, dim2=1):
super(Transpose, self).__init__()
self.dim1, self.dim2 = dim1, dim2
def call(self, inputs, **kwargs):
"""Call Transpose."""
new_dim = [i for i in range(len(inputs.shape))]
new_dim[self.dim1], new_dim[self.dim2] = new_dim[self.dim2], new_dim[self.dim1]
return tf.transpose(inputs, new_dim)
@ClassFactory.register(ClassType.NETWORK)
class LeakyReLU(Module, OperatorSerializable):
"""Class of LeakyReLU."""
def __init__(self, inplace=False, negative_slope=0.01):
super(LeakyReLU, self).__init__()
self.inplace = inplace
self.alpha = negative_slope
def call(self, input, **kwargs):
"""Call LeakyReLU."""
return tf.nn.leaky_relu(input, self.alpha)
@ClassFactory.register(ClassType.NETWORK)
class InterpolateScale(Module, OperatorSerializable):
"""Upsample of torch with scale_factor."""
def __init__(self, scale_factor=None, size=None, mode='bilinear', align_corners=False):
super(InterpolateScale, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
self.size = size
def call(self, inputs, **kwargs):
"""Call InterpolateScale."""
inputs = tf.transpose(inputs, [0, 2, 3, 1])
if self.size is not None:
if isinstance(self.size, int):
self.size = (self.size, self.size)
output = tf.image.resize(inputs, size=self.size, method=self.mode, align_corners=self.align_corners)
else:
output = tf.image.resize_images(inputs, [inputs.shape[1] * self.scale_factor,
inputs.shape[2] * self.scale_factor], method=self.mode,
align_corners=self.align_corners)
return tf.transpose(output, [0, 3, 1, 2])
@ClassFactory.register(ClassType.NETWORK)
class MeanShift(Module, OperatorSerializable):
"""Subtract or add rgb_mean to the image."""
def __init__(self, rgb_range, rgb_mean, rgb_std=(1.0, 1.0, 1.0), sign=-1):
"""Construct the class MeanShift.
:param rgb_range: range of tensor, usually 1.0 or 255.0
:param rgb_mean: mean of rgb value
:param rgb_std: std of rgb value
:param sign: -1 for subtract, 1 for add
"""
super(MeanShift, self).__init__()
self.rgb_std = rgb_std
self.rgb_mean = rgb_mean
self.sign = sign
self.rgb_range = rgb_range
def call(self, inputs, *args, **kwargs):
"""Call MeanShift."""
std = tf.convert_to_tensor(self.rgb_std, dtype=tf.float32)
self.weight = tf.convert_to_tensor(np.eye(3).astype(np.float32)) # tf.eye(3)
self.weight = tf.div(self.weight, std)
self.bias = self.sign * self.rgb_range * tf.convert_to_tensor(self.rgb_mean, dtype=tf.float32)
self.bias = tf.div(self.bias, std)
res = tf.einsum('ij, njhw->nihw', self.weight, inputs)
res = tf.transpose(res, [0, 2, 3, 1])
res = tf.nn.bias_add(res, self.bias)
res = tf.transpose(res, [0, 3, 1, 2])
return res
@ClassFactory.register(ClassType.NETWORK)
class GlobalMaxPool1d(Module):
"""Construct the class GlobalMaxPool1d."""
def __init__(self):
super(GlobalMaxPool1d, self).__init__()
def call(self, inputs, *args, **kwargs):
"""Call max_pool1d function."""
return tf.layers.MaxPooling1D(pool_size=get_shape(inputs)[2])(inputs)
@ClassFactory.register(ClassType.NETWORK)
class MoudleList(Module, OperatorSerializable):
"""Class of LeakyReLU."""
def __init__(self):
super(MoudleList, self).__init__()
self.moudle_list = []
def append(self, moudle):
"""Append new moudle."""
index = len(self.moudle_list)
self.add_module('moudle_list_' + str(index), moudle)
self.moudle_list.append(moudle)
return self
def __getitem__(self, idx):
"""Get item by idx."""
return list(self.children())[idx]
def concat(inputs, dim=1):
"""Call concat according to backends."""
if dim != 1:
return tf.concat(inputs, axis=dim)
if General.data_format == "channels_first":
dim = 1
elif General.data_format == "channels_last":
dim = 3
return tf.concat(inputs, axis=dim)
def mul(a, b):
"""Call mul according to backends."""
return tf.multiply(a, b)
def matmul(a, b):
"""Call matmul according to backends."""
return tf.matmul(a, b)
def random_normal(*size):
"""Apply random values from a normal distribution."""
return tf.random.normal(size)
def softmax(input, dim=None):
"""Apply a softmax function."""
return tf.nn.softmax(input, dim)
def gumbel_softmax_sample(input, temperature, eps=1e-20):
"""Draw a sample from the Gumbel-Softmax distribution."""
shape = tf.shape(input)
U = tf.random_uniform(shape, minval=0, maxval=1)
U = -tf.log(-tf.log(U + eps) + eps)
y = input + U
return tf.nn.softmax(y / temperature)
def gumbel_softmax(input, dim=-1, tau=1, hard=True, eps=1e-20):
"""Apply a gumbel-softmax function."""
# keep_dims = True if dim == -1 else False
y = gumbel_softmax_sample(input, tau, eps)
if hard:
y_hard = tf.cast(tf.equal(y, tf.reduce_max(y, 1, keep_dims=True)), y.dtype)
y = tf.stop_gradient(y_hard - y) + y
return y
def to_numpy(input):
"""Apply numpy function."""
return input
def mean(input):
"""Apply mean function."""
return tf.reduce_mean(input, [-2, -1], keepdims=True)
def pad(inputs, position):
"""Apply pad function."""
len_dim = len(get_shape(inputs))
pos = [[0, 0] for i in range(len_dim)]
for i in range(len(position)):
if i % 2 == 0:
pos[(-(i // 2) - 1)][0] = position[i]
else:
pos[(-(i // 2) - 1)][1] = position[i]
return tf.pad(inputs, pos)
def tensor_abs(inputs):
"""Apply abs function."""
return tf.abs(inputs)
def mean_all(inputs):
"""Apply mean_all function."""
return tf.math.reduce_mean(inputs)
def interpolate(input, size, mode='bilinear', align_corners=False):
"""Apply interpolate function."""
x = tf.image.resize(tf.transpose(input, [0, 2, 3, 1]),
size=size, method=mode, align_corners=align_corners)
x = tf.transpose(x, [0, 3, 1, 2])
return x
def add_n(input):
"""Apply sum function."""
return tf.add_n(list(input))
def get_shape(inputs):
"""Get shape."""
return inputs.get_shape().as_list()
def drop_path(x, prob):
"""Drop path operation.
:param x: input feature map
:type x: torch tensor
:param prob: dropout probability
:type prob: float
:return: output feature map after dropout
:rtype: torch tensor
"""
if prob <= 0.:
return x
keep = 1. - prob
bernoulli_random = tf.random.uniform([int(x.get_shape()[0]), 1, 1, 1])
mask = tf.cast(bernoulli_random < keep, tf.float32)
x = tf.div(x, keep)
x = tf.multiply(x, mask)
return x
def zeros(shape):
"""Create zeros like shape."""
res = tf.zeros(shape)
res = tf.cast(res, tf.float32)
return res
def maximum(arg1, arg2):
"""Get max item."""
return tf.maximum(arg1, arg2)
def minimum(arg1, arg2):
"""Get min item."""
return tf.minimum(arg1, arg2)
def new_constant(tensor, size, value, dtype='long'):
"""Return new tensor with shape."""
if dtype == 'long':
dtype = tf.float32
elif dtype == 'uint8':
dtype = tf.int32
else:
dtype = None
if not isinstance(size, list):
size = list(size)
return tf.constant(value=value, dtype=dtype, shape=size)
def argmax(tensor, dim):
"""Get max and ind from dim."""
return tf.argmax(tensor, axis=dim)
def clamp(x, min=float("-inf"), max=float("inf")):
"""Cet value after clamp."""
return tf.clip_by_value(x, min=min, max=max)
def where(cond):
"""Return index by condition."""
return tf.where(cond)
def unique(inputs):
"""Return the unique elements of the input tensor."""
return tf.unique(inputs)
def log(inputs):
"""Return the log of the input tensor."""
return tf.math.log(inputs)
def convert_to_tensor(narray, device):
"""Convert numpy to tensor."""
return tf.convert_to_tensor(narray, tf.float32)
def new_ones(tensor, size, dtype=None):
"""Return new tensor with shape."""
if dtype == 'long':
dtype = tf.float32
elif dtype == 'uint8':
dtype = tf.int32
else:
dtype = None
tf.constant(value=1, dtype=dtype, shape=size)
def arange(left, right, dtype, device):
"""Rreange from left to right."""
if dtype == 'long':
dtype = tf.float32
elif dtype == 'uint8':
dtype = tf.int32
else:
dtype = None
return tf.range(left, right, dtype=dtype)
def compare_where(cond, x, y):
"""Return item by condition."""
return tf.where(cond, x, y)
def unsqueeze(inputs, dim):
"""Expand in dim."""
return tf.expand_dims(inputs, dim)
def expand_as(inputs, tensor):
"""Expand as tensor."""
return tf.broadcast_to(inputs, tensor.get_shape())
def exp(tensor):
"""Return exp(tensor)."""
return tf.math.exp(tensor)
def pow(input, exponent, out=None):
"""Calculate the exponent value of the input by element and returns the result tensor."""
return tf.pow(input)
def ones(input_size, out):
"""Return a tensor with all 1s. The shape is defined by the variable parameter size."""
return tf.ones(input_size, out)
def one_hot(inputs, num_classes):
"""Take LongTensor with index values of shape."""
return tf.one_hot(inputs, num_classes)
def ones_like(out):
"""Return a tensor with all 1s. The shape is defined by the variable parameter size."""
return tf.ones_like(out)
def zeros_like(out):
"""Return a tensor with all 1s. The shape is defined by the variable parameter size."""
return tf.zeros_like(out)
def to(input, dtype):
"""Convert input to dtype."""
if dtype == 'long':
dtype = tf.long
elif dtype == 'uint8':
dtype = tf.uint8
elif dtype == 'float32':
dtype = tf.float32
return tf.cast(input, dtype=dtype)
def reduce_sum(input, dim=0, dtype=None):
"""Apply sum function."""
out = tf.reduce_sum(input, axis=dim)
if dtype is not None:
out = to(out, dtype)
return out
def gelu(x):
"""Apply gelu function."""
return x * 0.5 * (1.0 + tf.erf(x / math.sqrt(2.0)))
def swish(x):
"""Apply swish function."""
return x * tf.sigmoid(x)
def relu(x):
"""Apply relu function."""
return tf.nn.relu(x)
def sqrt(x):
"""Apply sqrt function."""
return tf.sqrt(x)
@ClassFactory.register(ClassType.NETWORK)
class LayerNorm(Module, OperatorSerializable):
"""Layer Norm module."""
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root)."""
super(LayerNorm, self).__init__()
self.weight = self.set_parameters('gamma', ones(hidden_size))
self.bias = self.set_parameters('beta', zeros(hidden_size))
self.variance_epsilon = eps
def call(self, x):
"""Call LayerNorm."""
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
| 32.647986
| 113
| 0.601143
|
ddd492a69cc89725e049fe010ffc0c18f834b74d
| 1,929
|
py
|
Python
|
c_nutra/config/settings/local.py
|
usbdevlab/C-Nutra
|
d20addafd343595a599231acb82a4af2fdb43c7f
|
[
"MIT"
] | null | null | null |
c_nutra/config/settings/local.py
|
usbdevlab/C-Nutra
|
d20addafd343595a599231acb82a4af2fdb43c7f
|
[
"MIT"
] | null | null | null |
c_nutra/config/settings/local.py
|
usbdevlab/C-Nutra
|
d20addafd343595a599231acb82a4af2fdb43c7f
|
[
"MIT"
] | null | null | null |
"""Development settings and globals."""
from __future__ import absolute_import
from os.path import join, normpath
from .base import *
import os
# DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG CONFIGURATION
# EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# END EMAIL CONFIGURATION
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'default.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# END DATABASE CONFIGURATION
# CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# END CACHE CONFIGURATION
# TOOLBAR CONFIGURATION
# See:
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup
INSTALLED_APPS += (
'password_reset',
)
MIDDLEWARE_CLASSES += (
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
INTERNAL_IPS = ('127.0.0.1',)
# END TOOLBAR CONFIGURATION
AUTH_PROFILE_MODULE = 'users.UserProfile'
support_email = "survey@coliving.org"
| 27.557143
| 108
| 0.675998
|
a1ddc8d6cb28fe84547c92baeaca8da80ac58c35
| 2,886
|
py
|
Python
|
azure-batch/azure/batch/models/pool_get_options.py
|
CharaD7/azure-sdk-for-python
|
9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c
|
[
"MIT"
] | null | null | null |
azure-batch/azure/batch/models/pool_get_options.py
|
CharaD7/azure-sdk-for-python
|
9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c
|
[
"MIT"
] | null | null | null |
azure-batch/azure/batch/models/pool_get_options.py
|
CharaD7/azure-sdk-for-python
|
9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PoolGetOptions(Model):
"""Additional parameters for the Pool_Get operation.
:param select: An OData $select clause.
:type select: str
:param expand: An OData $expand clause.
:type expand: str
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id identifier in the response.
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. If not specified, this
header will be automatically populated with the current system clock
time.
:type ocp_date: datetime
:param if_match: An ETag is specified. Specify this header to perform the
operation only if the resource's ETag is an exact match as specified.
:type if_match: str
:param if_none_match: An ETag is specified. Specify this header to
perform the operation only if the resource's ETag does not match the
specified ETag.
:type if_none_match: str
:param if_modified_since: Specify this header to perform the operation
only if the resource has been modified since the specified date/time.
:type if_modified_since: datetime
:param if_unmodified_since: Specify this header to perform the operation
only if the resource has not been modified since the specified date/time.
:type if_unmodified_since: datetime
"""
def __init__(self, select=None, expand=None, timeout=30, client_request_id=None, return_client_request_id=None, ocp_date=None, if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None):
self.select = select
self.expand = expand
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
| 46.548387
| 216
| 0.703396
|
05b1433d757485fbc5db001edfedd18b005c99f2
| 445
|
py
|
Python
|
python/isbn-verifier/isbn_verifier.py
|
gdantaas/Exercism-Python
|
3a11f5010a1f740b73be458d9802ec074d6569a0
|
[
"MIT"
] | null | null | null |
python/isbn-verifier/isbn_verifier.py
|
gdantaas/Exercism-Python
|
3a11f5010a1f740b73be458d9802ec074d6569a0
|
[
"MIT"
] | null | null | null |
python/isbn-verifier/isbn_verifier.py
|
gdantaas/Exercism-Python
|
3a11f5010a1f740b73be458d9802ec074d6569a0
|
[
"MIT"
] | null | null | null |
def is_valid(isbn):
from functools import reduce
import string
isbn = isbn.replace('-', '')
if len(isbn) != 10 or not isbn[:-1].isnumeric() or isbn[-1] not in string.digits + 'X':
return False
else:
weights = range(10, 0, -1)
digits = [10 if dig == 'X' else int(dig) for dig in isbn.replace('-', '')]
return reduce(lambda x, y: (x[0]*x[1] + y[0] * y[1], 1), zip(digits, weights))[0] % 11 == 0
| 40.454545
| 99
| 0.550562
|
9eca10719ff8baea606f3a54dcdab6d7d28ee272
| 447
|
py
|
Python
|
data/scripts/templates/object/mobile/shared_space_comm_station_lok.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/mobile/shared_space_comm_station_lok.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/mobile/shared_space_comm_station_lok.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_space_comm_station_lok.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 26.294118
| 68
| 0.731544
|
f6cb14d3fb50b057a3ee2485ee2dbedd2a4e3115
| 16,007
|
py
|
Python
|
Geolocation/Data/Design2a/design2a_11k_test5/pilot.0000/rp_install/lib/python2.7/site-packages/radical/pilot/umgr/staging_input/default.py
|
radical-experiments/iceberg_escience
|
e5c230a23395a71a4adf554730ea3d77f923166c
|
[
"MIT"
] | 1
|
2019-05-24T02:19:29.000Z
|
2019-05-24T02:19:29.000Z
|
Geolocation/Data/Design2a/design2a_11k_test5/pilot.0000/rp_install/lib/python2.7/site-packages/radical/pilot/umgr/staging_input/default.py
|
radical-experiments/iceberg_escience
|
e5c230a23395a71a4adf554730ea3d77f923166c
|
[
"MIT"
] | null | null | null |
Geolocation/Data/Design2a/design2a_11k_test5/pilot.0000/rp_install/lib/python2.7/site-packages/radical/pilot/umgr/staging_input/default.py
|
radical-experiments/iceberg_escience
|
e5c230a23395a71a4adf554730ea3d77f923166c
|
[
"MIT"
] | null | null | null |
__copyright__ = "Copyright 2013-2016, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import tempfile
import threading as mt
import tarfile
import radical.saga as rs
import radical.utils as ru
from ... import states as rps
from ... import constants as rpc
from ... import utils as rpu
from .base import UMGRStagingInputComponent
from ...staging_directives import complete_url
# if we receive more than a certain numnber of units in a bulk, we create the
# unit sandboxes in a remote bulk op. That limit is defined here, along with
# the definition of the bulk mechanism used to create the sandboxes:
# saga: use SAGA bulk ops
# tar : unpack a locally created tar which contains all sandboxes
UNIT_BULK_MKDIR_THRESHOLD = 16
UNIT_BULK_MKDIR_MECHANISM = 'tar'
# ==============================================================================
#
class Default(UMGRStagingInputComponent):
"""
This component performs all umgr side input staging directives for compute
units. It gets units from the umgr_staging_input_queue, in
UMGR_STAGING_INPUT_PENDING state, will advance them to UMGR_STAGING_INPUT
state while performing the staging, and then moves then to the
AGENT_SCHEDULING_PENDING state, passing control to the agent.
"""
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
UMGRStagingInputComponent.__init__(self, cfg, session)
# --------------------------------------------------------------------------
#
def initialize_child(self):
# we keep a cache of SAGA dir handles
self._fs_cache = dict()
self._js_cache = dict()
self._pilots = dict()
self._pilots_lock = mt.RLock()
self.register_input(rps.UMGR_STAGING_INPUT_PENDING,
rpc.UMGR_STAGING_INPUT_QUEUE, self.work)
# FIXME: this queue is inaccessible, needs routing via mongodb
self.register_output(rps.AGENT_STAGING_INPUT_PENDING, None)
# we subscribe to the command channel to learn about pilots being added
# to this unit manager.
self.register_subscriber(rpc.CONTROL_PUBSUB, self._base_command_cb)
# --------------------------------------------------------------------------
#
def finalize_child(self):
self.unregister_subscriber(rpc.STATE_PUBSUB, self._base_command_cb)
try:
[fs.close() for fs in self._fs_cache.values()]
[js.close() for js in self._js_cache.values()]
except:
pass
# --------------------------------------------------------------------------
#
def _base_command_cb(self, topic, msg):
# keep track of `add_pilots` commands and updates self._pilots
# accordingly.
cmd = msg.get('cmd')
arg = msg.get('arg')
if cmd not in ['add_pilots']:
self._log.debug('skip cmd %s', cmd)
pilots = arg.get('pilots', [])
if not isinstance(pilots, list):
pilots = [pilots]
with self._pilots_lock:
for pilot in pilots:
pid = pilot['uid']
self._log.debug('add pilot %s', pid)
if pid not in self._pilots:
self._pilots[pid] = pilot
return True
# --------------------------------------------------------------------------
#
def work(self, units):
if not isinstance(units, list):
units = [units]
self.advance(units, rps.UMGR_STAGING_INPUT, publish=True, push=False)
# we first filter out any units which don't need any input staging, and
# advance them again as a bulk. We work over the others one by one, and
# advance them individually, to avoid stalling from slow staging ops.
no_staging_units = list()
staging_units = list()
for unit in units:
# no matter if we perform any staging or not, we will push the full
# unit info to the DB on the next advance, and will pass control to
# the agent.
unit['$all'] = True
unit['control'] = 'agent_pending'
# check if we have any staging directives to be enacted in this
# component
actionables = list()
for sd in unit['description'].get('input_staging', []):
if sd['action'] in [rpc.TRANSFER, rpc.TARBALL]:
actionables.append(sd)
if actionables:
staging_units.append([unit, actionables])
else:
no_staging_units.append(unit)
# Optimization: if we obtained a large bulk of units, we at this point
# attempt a bulk mkdir for the unit sandboxes, to free the agent of
# performing that operation. That implies that the agent needs to check
# sandbox existence before attempting to create them now.
#
# Note that this relies on the umgr scheduler to assigning the sandbox
# to the unit.
#
# Note further that we need to make sure that all units are actually
# pointing into the same target file system, so we need to cluster by
# filesystem before checking the bulk size. For simplicity we actually
# cluster by pilot ID, which is sub-optimal for unit bulks which go to
# different pilots on the same resource (think OSG).
#
# Note further that we skip the bulk-op for all units for which we
# actually need to stage data, since the mkdir will then implicitly be
# done anyways.
#
# Caveat: we can actually only (reasonably) do this if we know some
# details about the pilot, because otherwise we'd have too much guessing
# to do about the pilot configuration (sandbox, access schema, etc), so
# we only attempt this optimization for units scheduled to pilots for
# which we learned those details.
unit_sboxes_by_pid = dict()
for unit in no_staging_units:
sbox = unit['unit_sandbox']
pid = unit['pilot']
if pid not in unit_sboxes_by_pid:
unit_sboxes_by_pid[pid] = list()
unit_sboxes_by_pid[pid].append(sbox)
# now trigger the bulk mkdir for all filesystems which have more than
# a certain units tohandle in this bulk:
for pid in unit_sboxes_by_pid:
with self._pilots_lock:
pilot = self._pilots.get(pid)
if not pilot:
# we don't feel inclined to optimize for unknown pilots
self._log.debug('pid unknown - skip optimizion', pid)
continue
session_sbox = self._session._get_session_sandbox(pilot)
unit_sboxes = unit_sboxes_by_pid[pid]
if len(unit_sboxes) >= UNIT_BULK_MKDIR_THRESHOLD:
self._log.debug('=== tar %d sboxes', len(unit_sboxes))
# no matter the bulk mechanism, we need a SAGA handle to the
# remote FS
sbox_fs = ru.Url(session_sbox) # deep copy
sbox_fs.path = '/'
sbox_fs_str = str(sbox_fs)
if sbox_fs_str not in self._fs_cache:
self._fs_cache[sbox_fs_str] = rs.filesystem.Directory(sbox_fs,
session=self._session)
saga_dir = self._fs_cache[sbox_fs_str]
# we have two options for a bulk mkdir:
# 1) ask SAGA to create the sandboxes in a bulk op
# 2) create a tarball with all unit sandboxes, push it over, and
# untar it (one untar op then creates all dirs). We implement
# both
if UNIT_BULK_MKDIR_MECHANISM == 'saga':
tc = rs.task.Container()
for sbox in unit_sboxes:
tc.add(saga_dir.make_dir(sbox, ttype=rs.TASK))
tc.run()
tc.wait()
elif UNIT_BULK_MKDIR_MECHANISM == 'tar':
tmp_path = tempfile.mkdtemp(prefix='rp_agent_tar_dir')
tmp_dir = os.path.abspath(tmp_path)
tar_name = '%s.%s.tar' % (self._session.uid, self.uid)
tar_tgt = '%s/%s' % (tmp_dir, tar_name)
tar_url = ru.Url('file://localhost/%s' % tar_tgt)
# we want pathnames which are relative to the session
# sandbox. Ignore all other sandboxes - the agent will have
# to create those.
root = str(session_sbox)
rlen = len(root)
rels = list()
for path in unit_sboxes:
if path.startswith(root):
rels.append(path[rlen+1:])
rpu.create_tar(tar_tgt, rels)
tar_rem_path = "%s/%s" % (str(session_sbox), tar_name)
self._log.debug('sbox: %s [%s]', session_sbox, type(session_sbox))
self._log.debug('copy: %s -> %s', tar_url, tar_rem_path)
saga_dir.copy(tar_url, tar_rem_path, flags=rs.filesystem.CREATE_PARENTS)
# get a job service handle to the target resource and run
# the untar command. Use the hop to skip the batch system
js_url = pilot['js_hop']
self._log.debug('js : %s', js_url)
if js_url in self._js_cache:
js_tmp = self._js_cache[js_url]
else:
js_tmp = rs.job.Service(js_url, session=self._session)
self._js_cache[js_url] = js_tmp
cmd = "tar xvf %s/%s -C %s" % (session_sbox.path, tar_name,
session_sbox.path)
j = js_tmp.run_job(cmd)
j.wait()
self._log.debug('untar : %s', cmd)
self._log.debug('untar : %s\n---\n%s\n---\n%s',
j.get_stdout_string(), j.get_stderr_string(),
j.exit_code)
if no_staging_units:
# nothing to stage, push to the agent
self.advance(no_staging_units, rps.AGENT_STAGING_INPUT_PENDING,
publish=True, push=True)
for unit,actionables in staging_units:
self._handle_unit(unit, actionables)
# --------------------------------------------------------------------------
#
def _handle_unit(self, unit, actionables):
# FIXME: we should created unit sandboxes in a bulk
uid = unit['uid']
self._prof.prof("create_sandbox_start", uid=uid)
src_context = {'pwd' : os.getcwd(), # !!!
'unit' : unit['unit_sandbox'],
'pilot' : unit['pilot_sandbox'],
'resource' : unit['resource_sandbox']}
tgt_context = {'pwd' : unit['unit_sandbox'], # !!!
'unit' : unit['unit_sandbox'],
'pilot' : unit['pilot_sandbox'],
'resource' : unit['resource_sandbox']}
# we have actionable staging directives, and thus we need a unit
# sandbox.
sandbox = rs.Url(unit["unit_sandbox"])
tmp = rs.Url(unit["unit_sandbox"])
# url used for cache (sandbox url w/o path)
tmp.path = '/'
key = str(tmp)
self._log.debug('key %s / %s', key, tmp)
if key not in self._fs_cache:
self._fs_cache[key] = rs.filesystem.Directory(tmp,
session=self._session)
saga_dir = self._fs_cache[key]
saga_dir.make_dir(sandbox, flags=rs.filesystem.CREATE_PARENTS)
self._prof.prof("create_sandbox_stop", uid=uid)
# Loop over all transfer directives and filter out tarball staging
# directives. Those files are added into a tarball, and a single
# actionable to stage that tarball replaces the original actionables.
# create a new actionable list during the filtering
new_actionables = list()
tar_file = None
for sd in actionables:
# don't touch non-tar SDs
if sd['action'] != rpc.TARBALL:
new_actionables.append(sd)
else:
action = sd['action']
flags = sd['flags'] # NOTE: we don't use those
did = sd['uid']
src = sd['source']
tgt = sd['target']
src = complete_url(src, src_context, self._log)
tgt = complete_url(tgt, tgt_context, self._log)
self._prof.prof('staging_in_tar_start', uid=uid, msg=did)
# create a tarfile on the first match, and register for transfer
if not tar_file:
tmp_file = tempfile.NamedTemporaryFile(
prefix='rp_usi_%s.' % uid,
suffix='.tar',
delete=False)
tar_path = tmp_file.name
tar_file = tarfile.open(fileobj=tmp_file, mode='w')
tar_src = ru.Url('file://localhost/%s' % tar_path)
tar_tgt = ru.Url('unit:////%s.tar' % uid)
tar_did = ru.generate_id('sd')
tar_sd = {'action' : rpc.TRANSFER,
'flags' : rpc.DEFAULT_FLAGS,
'uid' : tar_did,
'source' : str(tar_src),
'target' : str(tar_tgt),
}
new_actionables.append(tar_sd)
# add the src file
tar_file.add(src.path, arcname=tgt.path)
self._prof.prof('staging_in_tar_stop', uid=uid, msg=did)
# make sure tarball is flushed to disk
if tar_file:
tar_file.close()
# work on the filtered TRANSFER actionables
for sd in new_actionables:
action = sd['action']
flags = sd['flags']
did = sd['uid']
src = sd['source']
tgt = sd['target']
if action == rpc.TRANSFER:
src = complete_url(src, src_context, self._log)
tgt = complete_url(tgt, tgt_context, self._log)
# Check if the src is a folder, if true
# add recursive flag if not already specified
if os.path.isdir(src.path):
flags |= rs.filesystem.RECURSIVE
# Always set CREATE_PARENTS
flags |= rs.filesystem.CREATE_PARENTS
src = complete_url(src, src_context, self._log)
tgt = complete_url(tgt, tgt_context, self._log)
self._prof.prof('staging_in_start', uid=uid, msg=did)
saga_dir.copy(src, tgt, flags=flags)
self._prof.prof('staging_in_stop', uid=uid, msg=did)
if tar_file:
# some tarball staging was done. Add a staging directive for the
# agent to untar the tarball, and clean up.
tar_sd['action'] = rpc.TARBALL
unit['description']['input_staging'].append(tar_sd)
os.remove(tar_path)
# staging is done, we can advance the unit at last
self.advance(unit, rps.AGENT_STAGING_INPUT_PENDING, publish=True, push=True)
# ------------------------------------------------------------------------------
| 38.202864
| 92
| 0.526207
|
b2110a5d25ed7e320ce30cfd180e57e879d0bf19
| 2,712
|
py
|
Python
|
app.py
|
brentajones/quotable
|
3aeb12901eff0875bff9865c591b0e09d3c0463c
|
[
"MIT"
] | 1
|
2020-05-24T18:31:19.000Z
|
2020-05-24T18:31:19.000Z
|
app.py
|
brentajones/quotable
|
3aeb12901eff0875bff9865c591b0e09d3c0463c
|
[
"MIT"
] | 1
|
2015-01-09T00:33:14.000Z
|
2015-01-09T00:33:14.000Z
|
app.py
|
registerguard/quotable
|
8d30f8da5bd5c5914ec3d41e810dbbbf70dbc2dc
|
[
"MIT"
] | 1
|
2021-02-18T11:07:52.000Z
|
2021-02-18T11:07:52.000Z
|
#!/usr/bin/env python
import json
from mimetypes import guess_type
import urllib
import envoy
from flask import Flask, Markup, abort, render_template
import app_config
import copytext
from render_utils import flatten_app_config, make_context
app = Flask(app_config.PROJECT_NAME)
# Example application views
@app.route('/')
def index():
"""
Example view demonstrating rendering a simple HTML page.
"""
return render_template('index.html', **make_context())
@app.route('/widget.html')
def widget():
"""
Embeddable widget example page.
"""
return render_template('widget.html', **make_context())
@app.route('/test_widget.html')
def test_widget():
"""
Example page displaying widget at different embed sizes.
"""
return render_template('test_widget.html', **make_context())
@app.route('/test/test.html')
def test_dir():
return render_template('index.html', **make_context())
# Render LESS files on-demand
@app.route('/less/<string:filename>')
def _less(filename):
try:
with open('less/%s' % filename) as f:
less = f.read()
except IOError:
abort(404)
r = envoy.run('node_modules/bin/lessc -', data=less)
return r.std_out, 200, { 'Content-Type': 'text/css' }
# Render JST templates on-demand
@app.route('/js/templates.js')
def _templates_js():
r = envoy.run('node_modules/bin/jst --template underscore jst')
return r.std_out, 200, { 'Content-Type': 'application/javascript' }
# Render application configuration
@app.route('/js/app_config.js')
def _app_config_js():
config = flatten_app_config()
js = 'window.APP_CONFIG = ' + json.dumps(config)
return js, 200, { 'Content-Type': 'application/javascript' }
# Render copytext
@app.route('/js/copy.js')
def _copy_js():
copy = 'window.COPY = ' + copytext.Copy().json()
return copy, 200, { 'Content-Type': 'application/javascript' }
# Server arbitrary static files on-demand
@app.route('/<path:path>')
def _static(path):
try:
with open('www/%s' % path) as f:
return f.read(), 200, { 'Content-Type': guess_type(path)[0] }
except IOError:
abort(404)
@app.template_filter('urlencode')
def urlencode_filter(s):
"""
Filter to urlencode strings.
"""
if type(s) == 'Markup':
s = s.unescape()
s = s.encode('utf8')
s = urllib.quote_plus(s)
return Markup(s)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port')
args = parser.parse_args()
server_port = 8000
if args.port:
server_port = int(args.port)
app.run(host='0.0.0.0', port=server_port, debug=app_config.DEBUG)
| 24.432432
| 73
| 0.659661
|
0920451f4ef9fdc9769d13a5e4bb7deddb3b95d6
| 23,325
|
py
|
Python
|
musicProfile/spotify.py
|
samkovaly/SpotifyAPIMusicProfile
|
0c3a82d65713bb2ad67d5b165fc027bbec83740e
|
[
"MIT"
] | 1
|
2020-02-06T23:43:47.000Z
|
2020-02-06T23:43:47.000Z
|
musicProfile/spotify.py
|
samkovaly/SpotifyAPIMusicProfile
|
0c3a82d65713bb2ad67d5b165fc027bbec83740e
|
[
"MIT"
] | 6
|
2020-07-12T23:22:08.000Z
|
2021-09-22T18:33:21.000Z
|
musicProfile/spotify.py
|
samkovaly/SpotifyAPIMusicProfile
|
0c3a82d65713bb2ad67d5b165fc027bbec83740e
|
[
"MIT"
] | null | null | null |
from django.http import JsonResponse
import requests
import asyncio
import aiohttp
import numpy as np
import pandas as pd
from pandas import json_normalize
import json
from functools import reduce
import unidecode
from random import randint
from time import sleep
import traceback
import sys
import random
import logging
def get_spotify_music_profile(request):
spotifyAPI = SpotifyAPI(request)
try:
music_profile = spotifyAPI.get_music_profile()
return music_profile
except Exception as e:
# traceback.format_exc()
print('GLOBAL EXCEPTION - BAD. RETURNING ERROR TO FRONT END')
logging.exception("music profile refresh exception")
error_report = {
'error': {
'message': str(e),
'status': 500,
}
}
return error_report
class SpotifyAPI:
REQUEST_EXCEPTION_MSG = "Spotify API Request Exception while fetching "
SAVE_PROFILE_AS_CSV = False
USER_PLAYLISTS_ONLY = True # don't change unless you want playlists a user follows to also be included
def __init__(self, access_token):
self.header = {'Authorization' : "Bearer "+access_token}
self.user_id = self.fetch_user_id()
self.artist_columns = []
self.track_columns = []
self.artists_dataframes = []
self.tracks_dataframes = []
def get_music_profile(self):
asyncio.run(self.collect_artists_and_tracks_dataframes())
print("converting dataframes to JSON...")
print(f'returning { self.artists_df.shape[0] } artists and { self.tracks_df.shape[0] } tracks')
if self.SAVE_PROFILE_AS_CSV:
self.artists_df.to_csv('artists_df.csv')
self.tracks_df.to_csv('tracks_df.csv')
artists_json = self.get_artists_json(self.artists_df)
tracks_json = self.get_tracks_json(self.tracks_df)
music_profile = {
"artists" : artists_json,
"tracks" : tracks_json,
}
return music_profile
def get_artists_json(self, artists_df):
return artists_df.to_json(orient='records')
def get_tracks_json(self, tracks_df):
return tracks_df.to_json(orient='records')
async def collect_artists_and_tracks_dataframes(self):
# fetch artists and tracks together, due to how the Spotify API returns both
print("collect_artists_and_tracks_dataframes()...")
tasks = [self.fetch_top_artists("long_term"), self.fetch_top_artists("medium_term"), self.fetch_top_artists("short_term")
, self.fetch_top_tracks("long_term"), self.fetch_top_tracks("medium_term"), self.fetch_top_tracks("short_term")
, self.fetch_followed_artists(), self.fetch_saved_tracks(), self.get_all_playlists()]
await asyncio.gather(*tasks)
print("initial tasks (fetches) have finishing gathering..")
print("initiating get_artists_master_df(), where full artist objects will be fetched..")
self.artists_df = await self.get_artists_master_df()
print("finished fetching full objects.")
self.tracks_df = self.get_tracks_master_df()
async def get_artists_master_df(self):
if self.artists_dataframes == []:
return pd.DataFrame()
artists_df = None
if len(self.artists_dataframes) > 1:
artists_df = reduce(lambda left, right: pd.merge(left, right, how="outer"), self.artists_dataframes)
else:
artists_df = self.artists_dataframes[0]
artists_df = artists_df.drop_duplicates()
if 'id' not in artists_df:
return pd.DataFrame()
# add all columns needed if we don't have them yet
for col in self.artist_columns:
if col not in artists_df:
artists_df[col] = np.NaN
if 'track.id' not in artists_df:
artists_df['track.id'] = np.NaN
# here, i fill in missing values
# with a second gather operation
if 'image' in artists_df:
artists_missing = artists_df[artists_df['image'].isnull()]
else:
artists_missing = artists_df
missing_ids = artists_missing['id'].tolist()
missing_ids = list(set(missing_ids))
if len(missing_ids) > 0:
artists_full_df = await self.get_full_artist_dataframes(missing_ids)
artists_df = pd.merge(artists_df, artists_full_df, how="outer")
artists_df = artists_df.drop_duplicates()
artists_df['smallImage'] = artists_df['image']
artists_df['bigImage'] = artists_df['image']
artists_df.drop('image', axis = 1)
artists_df_transform = {}
for column in self.artist_columns:
artists_df_transform[column] = 'max'
artists_df_transform['bigImage'] = 'first'
artists_df_transform['smallImage'] = 'last'
artists_df_transform['uri'] = 'first'
def agg_track_list(tracks): # set to remove duplicates
track_list = [x for x in list(set(tracks)) if str(x) != 'nan']
return track_list
artists_df_transform['track.id'] = agg_track_list
def agg_genres_list(genres):
genre_list = [x for x in list(set(genres)) if str(x) != 'nan']
return genre_list
artists_df_transform['genres'] = agg_genres_list
artists_df = artists_df.groupby(['id', 'name']).agg(artists_df_transform)
artists_df.rename(columns = {'track.id': 'tracks'}, inplace = True)
artists_df[self.artist_columns] = artists_df[self.artist_columns].fillna(value=False)
artists_df.reset_index(level=['id', 'name'], inplace = True)
# add artist's tracks_length
def get_tracks_len(row):
return len(list(row['tracks']))
artists_df['tracks_length'] = artists_df.apply(get_tracks_len, axis=1)
# add artist's genres_length
def get_genres_len(row):
return len(list(row['genres']))
artists_df['genres_length'] = artists_df.apply(get_genres_len, axis=1)
def get_ascii_artist_name(row):
return unidecode.unidecode(row['name'])
artists_df['name_ascii'] = artists_df.apply(get_ascii_artist_name, axis=1)
return artists_df
def get_tracks_master_df(self):
if self.tracks_dataframes == []:
return pd.DataFrame()
tracks_df = reduce(lambda left, right: pd.merge(left, right, how="outer"), self.tracks_dataframes)
tracks_df = tracks_df.drop_duplicates()
if 'id' not in tracks_df:
return pd.DataFrame()
tracks_df[self.track_columns] = tracks_df[self.track_columns].fillna(value=False)
tracks_df_transform = {}
tracks_df_transform['image_size'] = 'min'
tracks_df_transform['image_url'] = 'first'
#tracks_df_transform['top_tracks_short_term'] = 'first'
#tracks_df_transform['saved_tracks'] = 'first'
#tracks_df_transform['top_tracks_medium_term'] = 'first'
#tracks_df_transform['top_tracks_long_term'] = 'first'
#tracks_df_transform['playlist'] = 'first'
tracks_df = tracks_df.groupby(['id', 'name', 'uri']).agg(tracks_df_transform)
tracks_df.reset_index(level=['id', 'name', 'uri'], inplace = True)
return tracks_df
async def fetch_top_artists(self, time_range):
print('fetching top artists... ', time_range)
self.artist_columns.append("top_artists_" + time_range)
self.artist_columns.append("top_artists_" + time_range + "_ranking")
offsets = [0, 49]
top_artists = []
for offset in offsets:
URL = "https://api.spotify.com/v1/me/top/artists?limit=50&offset="+str(offset)+"&time_range="+time_range
resp_dict = await self.fetch_json_from_URL(URL = URL, name = "top artists({}):".format(time_range))
# so if user's dont listen to enough artists in the short term,
# then less than 100 short term artists are returned
# in which case ['items'] equals [] and so we must check for this
# and just simply do nothing when it happens
if resp_dict and resp_dict['total'] > 0 and len(resp_dict['items']) > 0:
artists_df = self.extract_full_artist_from_json(resp_dict['items'])
artists_df["top_artists_"+time_range] = True
top_artists.append(artists_df)
if len(top_artists) > 0:
artists_df = pd.concat(top_artists)
if 'id' in artists_df:
current_ranking = 0
rankings = []
seen_id = set()
for index, row in artists_df.iterrows():
if row['id'] not in seen_id:
current_ranking += 1
seen_id.add(row['id'])
rankings.append(current_ranking)
artists_df["top_artists_" + time_range + "_ranking"] = rankings
artists_df = artists_df[artists_df['id'].notnull()]
self.artists_dataframes.append(artists_df)
async def fetch_top_tracks(self, time_range):
print('fetching top tracks... ', time_range)
#self.track_columns.append("top_tracks_" + time_range)
offsets = [0, 49]
all_artists = []
all_tracks = []
for offset in offsets:
URL = "https://api.spotify.com/v1/me/top/tracks?limit=50&offset="+str(offset)+"&time_range="+time_range
resp_dict = await self.fetch_json_from_URL(URL = URL, name = "artists from top tracks({})".format(time_range))
if resp_dict and resp_dict['total'] > 0 and len(resp_dict['items']) > 0:
artists_df = json_normalize(data = resp_dict['items'], record_path=['artists'], meta=['id'], meta_prefix='track.')
artists_df = artists_df[['id', 'name', 'track.id']]
all_artists.append(artists_df)
tracks_df = json_normalize(data = resp_dict['items'], record_path=['album', 'images'], meta=['id', 'name', 'uri'], meta_prefix='track.')
tracks_df = self.cleanup_tracks_df(tracks_df)
tracks_df["top_tracks_"+time_range] = True
all_tracks.append(tracks_df)
if len(all_artists) > 0:
all_artists_df = pd.concat(all_artists)
if 'id' in all_artists_df:
all_artists_df = all_artists_df[all_artists_df['id'].notnull()]
self.artists_dataframes.append(all_artists_df)
if len(all_tracks) > 0:
all_tracks_df = pd.concat(all_tracks)
if 'id' in all_tracks_df:
all_tracks_df = all_tracks_df[all_tracks_df['id'].notnull()]
self.tracks_dataframes.append(all_tracks_df)
async def fetch_followed_artists(self):
print('fetching followed artists... ')
self.artist_columns.append("followed_artist")
next = "https://api.spotify.com/v1/me/following?type=artist&limit=50&offset=0"
followed_artists = []
while next:
resp_dict = await self.fetch_json_from_URL(URL = next, name = "followed artists")
if resp_dict and resp_dict['artists'] and resp_dict['artists']['total'] > 0 and len(resp_dict['artists']['items']) > 0:
next = resp_dict['artists']['next']
artists_df = self.extract_full_artist_from_json(resp_dict['artists']['items'])
artists_df['followed_artist'] = True
followed_artists.append(artists_df)
else:
break
if len(followed_artists) > 0:
followed_artists_df = pd.concat(followed_artists)
if 'id' in followed_artists_df:
followed_artists_df = followed_artists_df[followed_artists_df['id'].notnull()]
self.artists_dataframes.append(followed_artists_df)
async def fetch_saved_tracks(self):
print('fetching saved tracks... ')
#self.track_columns.append("saved_tracks")
next = "https://api.spotify.com/v1/me/tracks?limit=50&offset=0"
all_artists = []
all_tracks = []
while next:
resp_dict = await self.fetch_json_from_URL(URL = next, name = "saved tracks")
if resp_dict and resp_dict['total'] > 0 and len(resp_dict['items']) > 0:
next = resp_dict['next']
artists_df = json_normalize(data = resp_dict['items'], record_path=['track', 'artists'], meta=[['track', 'id']])
artists_df = artists_df[['id', 'name', 'track.id']]
all_artists.append(artists_df)
tracks_df = json_normalize(data = resp_dict['items'], record_path=['track', 'album', 'images'], meta=[['track', 'name'], ['track', 'id'], ['track', 'uri']])
tracks_df = self.cleanup_tracks_df(tracks_df)
tracks_df["saved_tracks"] = True
all_tracks.append(tracks_df)
else:
break
if len(all_artists) > 0:
all_artists_df = pd.concat(all_artists)
if 'id' in all_artists_df:
all_artists_df = all_artists_df[all_artists_df['id'].notnull()]
self.artists_dataframes.append(all_artists_df)
if len(all_tracks) > 0:
all_tracks_df = pd.concat(all_tracks)
if 'id' in all_tracks_df:
all_tracks_df = all_tracks_df[all_tracks_df['id'].notnull()]
self.tracks_dataframes.append(all_tracks_df)
async def fetch_playlists(self):
print('fetch_playlists...')
playlists_all = []
next = "https://api.spotify.com/v1/me/playlists?limit=50&offset=0"
while next:
resp_dict = await self.fetch_json_from_URL(URL = next, name = "playlists")
if resp_dict and resp_dict['total'] > 0 and len(resp_dict['items']) > 0:
next = resp_dict['next']
playlists_full = json_normalize(resp_dict['items'])
playlists = playlists_full[['id', 'owner.id']]
if self.USER_PLAYLISTS_ONLY:
playlists = playlists[playlists['owner.id'] == self.user_id]
playlists.drop('owner.id', axis=1, inplace=True)
playlists_all.append(playlists)
else:
break
if len(playlists_all) > 0:
return pd.concat(playlists_all)
return pd.DataFrame()
async def get_all_playlists(self):
playlists = await self.fetch_playlists()
self.artist_columns.append("playlist")
if playlists.empty or 'id' not in playlists:
return
tracks = []
artists = []
print('fetching', len(playlists), 'playlists...')
tasks = [self.fetch_playlist(playlistID) for playlistID in playlists['id']]
playlistDatas = await asyncio.gather(*tasks)
for playlistData in playlistDatas:
if not playlistData[0].empty:
artists.append(playlistData[0])
if not playlistData[1].empty:
tracks.append(playlistData[1])
if artists and len(artists) > 0:
self.artists_dataframes.append(pd.concat(artists))
if tracks and len(tracks) > 0:
self.tracks_dataframes.append(pd.concat(tracks))
async def fetch_playlist(self, ID):
next = "https://api.spotify.com/v1/playlists/"+ID+"/tracks?limit=100&offset=0"
all_artists = []
all_tracks = []
while next:
resp_dict = await self.fetch_json_from_URL(URL = next, name = "tracks from playlist")
if resp_dict and resp_dict['total'] > 0 and len(resp_dict['items']) > 0:
next = resp_dict['next']
artists_df = json_normalize(data = resp_dict['items'], record_path=['track', 'artists'], meta=[['track', 'id']])
artists_df = artists_df[['id', 'name', 'track.id']]
artists_df['playlist'] = True
all_artists.append(artists_df)
tracks_df = json_normalize(data = resp_dict['items'], record_path=['track', 'album', 'images'], meta=[['track', 'name'], ['track', 'id'], ['track', 'uri']])
tracks_df = self.cleanup_tracks_df(tracks_df)
tracks_df["playlist"] = True
all_tracks.append(tracks_df)
else:
break
all_artists_df = pd.DataFrame()
all_tracks_df = pd.DataFrame()
if len(all_artists) > 0:
all_artists_df = pd.concat(all_artists)
if 'id' in all_artists_df:
all_artists_df = all_artists_df[all_artists_df['id'].notnull()]
if len(all_tracks) > 0:
all_tracks_df = pd.concat(all_tracks)
if 'id' in all_tracks_df:
all_tracks_df = all_tracks_df[all_tracks_df['id'].notnull()]
return all_artists_df, all_tracks_df
''' takes a list of artist IDs, fetches the full artist objects from spotify using these IDs (50 at a time max),
calls extract_full_artist_from_json on the returns and returns a dataframe with all the columns needed
for the mobile app '''
async def get_full_artist_dataframes(self, all_IDs):
print(f"get_all_details_on({len(all_IDs)})_artists...")
ID_segments = self.split_into_N(all_IDs, 50)
tasks = [self.fetch_full_artists(IDs) for IDs in ID_segments]
artist_dataframes = await asyncio.gather(*tasks)
return pd.concat(artist_dataframes)
''' IDs should be of length 50 or less '''
async def fetch_full_artists(self, IDs):
URL = "https://api.spotify.com/v1/artists"
resp_dict = await self.fetch_json_from_URL(
URL = URL,
params = [('ids', ",".join(IDs))],
name = "full artist objects")
if resp_dict and resp_dict['artists']:
try:
artist_df = self.extract_full_artist_from_json(resp_dict['artists'])
except Exception as e:
with open('errorArtists.json', 'w') as outfile:
json.dump(resp_dict['artists'], outfile)
if artist_df.empty:
return pd.DataFrame()
if 'id' in artist_df:
artist_df = artist_df[artist_df['id'].notnull()]
return artist_df
return pd.DataFrame()
def split_into_N(self, _list, N):
return [_list[i * N:(i + 1) * N] for i in range((len(_list) + N - 1) // N )]
''' json_data must be a JSON array of full artist objects. Returns a dataframe of all the objects with
columns: id, name, genres, image, image_size'''
def extract_full_artist_from_json(self, json_data):
json_data_no_none = []
for val in json_data:
if val != None:
json_data_no_none.append(val)
artists_genres = json_normalize(data = json_data_no_none, record_path='genres', meta=['id', 'name', 'uri'])
artists_images = json_normalize(data = json_data_no_none, record_path='images', meta=['id', 'name', 'uri'])
if artists_genres.empty or artists_images.empty:
print('artists_genres.empty', artists_genres.empty)
print('artists_images.empty', artists_images.empty)
return pd.DataFrame()
artists_df = pd.merge(artists_genres, artists_images, how="outer")
# filter out other sizes that we don't want
# don't need height and width, only size since they are the same
artists_df = artists_df.drop(['height'], axis=1)
artists_df = artists_df.drop(['width'], axis=1)
# genres columns defaults to '0' since we are extracting an array in the record_path ('genres'),
# an array of strigs, not objects
artists_df = artists_df.rename(columns={0: 'genres', 'url': 'image'})
return artists_df
'''
track: {
name
id
uri
album: {
images [{}]
}
artists: [{}]
'''
def cleanup_tracks_df(self, tracks_df):
# id name uri height width url
tracks_df = tracks_df.rename(columns={'track.id': 'id', 'track.name': 'name', 'track.uri': 'uri', 'url': 'image_url', 'width': 'image_size'})
tracks_df = tracks_df.drop(['height'], axis=1)
return tracks_df
''' fetch user id is implemented with requests library instead of asyncio '''
def fetch_user_id(self):
URL = "https://api.spotify.com/v1/me"
try:
r = requests.get(URL, headers = self.header)
r.raise_for_status()
respDict = json.loads(r.text)
user_id = respDict['id']
return user_id
except Exception as e:
print(self.REQUEST_EXCEPTION_MSG + "user id:", e)
''' basic fetch json from URL function implemented with aiohttp async. (need asyncio gath to call). '''
async def fetch_json_from_URL(self, URL, params = None, name = "", depth = 0):
r = None
async with aiohttp.ClientSession(raise_for_status=False) as session:
try:
r = await session.get(URL, params = params, headers = self.header)
# can try again after waiting for a bit, not really an error
if r.status == 429:
if depth > 3:
print('Error: recursion depth is 4')
return None
if 'Retry-After' in r.headers:
sleepFor = int(r.headers['Retry-After']) + 1
else:
sleepFor = 5
print("status is 429, Too many requests... recursively trying again, in ", sleepFor, ', depth = ', depth)
await asyncio.sleep(sleepFor)
return await self.fetch_json_from_URL(URL, params, name, depth + 1)
if r.status == 404:
# happens sometimes, try again
if depth > 3:
print('Error: recursion depth is 4')
return None
print('404... recursively trying again, depth = ', depth)
return await self.fetch_json_from_URL(URL, params, name, depth + 1)
if r.status != 200:
print('ERROR: spotify return status: ', r.status)
print(r.status, ".. recursively trying again, depth = ", depth)
await asyncio.sleep(1)
return await self.fetch_json_from_URL(URL, params, name, depth + 1)
resp_dict = json.loads(await r.text())
return resp_dict
except aiohttp.ClientConnectorError as e:
print('fetch_json_from_URL error')
print('name: ', name)
print('URL: ', URL)
print('error msg: ', str(e))
print('=========')
return None
| 37.200957
| 172
| 0.593055
|
00eb4caa9c0a968f54c038495d46d899e38f052e
| 551
|
py
|
Python
|
mundo3/parte4/parteB/ex102.py
|
fcdennis/CursoPython
|
485ef7e706af74eae9ee336714ddd8b493bd8e5d
|
[
"MIT"
] | null | null | null |
mundo3/parte4/parteB/ex102.py
|
fcdennis/CursoPython
|
485ef7e706af74eae9ee336714ddd8b493bd8e5d
|
[
"MIT"
] | null | null | null |
mundo3/parte4/parteB/ex102.py
|
fcdennis/CursoPython
|
485ef7e706af74eae9ee336714ddd8b493bd8e5d
|
[
"MIT"
] | null | null | null |
def fatorial(n = 0, show = False):
"""
-> Calcula o Fatorial de um numero inteiro.
:param n: Número inteiro a ser calculado fornecido pelo usuario;
:param show: imprime os passos realizados para o cálculo do fatorial de n.
:return: O valor do Fatorial de um numero N Inteiro qualquer fornecido pelo usuario.
"""
if show == True:
for c in range(n, 0, -1):
print (f'{c} =' if c == 1 else f'{c} X', end=' ')
if n == 0:
return 1
else:
return n * fatorial(n - 1)
help(fatorial)
| 30.611111
| 88
| 0.586207
|
1b917810e3beed59dc653c80b64b384169d39434
| 326
|
py
|
Python
|
players/random_player.py
|
Kappeh/Alpha-Zero-General
|
344a2253bf48a3d12163258d27b1e81bfeb3b4f8
|
[
"MIT"
] | null | null | null |
players/random_player.py
|
Kappeh/Alpha-Zero-General
|
344a2253bf48a3d12163258d27b1e81bfeb3b4f8
|
[
"MIT"
] | null | null | null |
players/random_player.py
|
Kappeh/Alpha-Zero-General
|
344a2253bf48a3d12163258d27b1e81bfeb3b4f8
|
[
"MIT"
] | null | null | null |
from random import choice
from player import Player
class Random_Player(Player):
def __init__(self, game, name = 'Random Player'):
super(Human_Player, self).__init__(game, name)
def get_action(self, state_history):
actions = self.game.legal_actions(state_history)
return choice(actions)
| 27.166667
| 56
| 0.705521
|
7d4eb2f65c7f193fb2227d493aeb996a1e6a6767
| 7,003
|
py
|
Python
|
simulate.py
|
jlombs/COVID19_ABS
|
fb258ac86acf3b65be923fd9106a202d778b1acd
|
[
"MIT"
] | 2
|
2020-10-19T12:49:26.000Z
|
2021-06-03T09:03:15.000Z
|
simulate.py
|
jlombs/COVID19_ABS
|
fb258ac86acf3b65be923fd9106a202d778b1acd
|
[
"MIT"
] | null | null | null |
simulate.py
|
jlombs/COVID19_ABS
|
fb258ac86acf3b65be923fd9106a202d778b1acd
|
[
"MIT"
] | null | null | null |
# Script to execute the simulation
from ABClasses import agent, environment
from matplotlib import pyplot as plt
import numpy as np
# User Interface
##########################################
# Establish the population: see the ABClasses.py documentation for details
populationSize = 250 # Must be >= 10, expect slowdowns for large social distancing on large populations
population = [agent(ageBias=-1, cleanlinessBias=.2*.5*np.random.rand(), travelerBias=1.0+2.5*np.random.rand(), socialDistanceBias=.2+0*np.random.rand()) for _ in range(populationSize)]
# Example of strong virus spread with no actions taken
# populationSize = 250
# population = [agent(ageBias=-1, cleanlinessBias=0*.5*np.random.rand(), travelerBias=2.0+2.5*np.random.rand(), socialDistanceBias=0+0*np.random.rand()) for _ in range(populationSize)]
# Example of good social distancing
# populationSize = 250
# population = [agent(ageBias=-1, cleanlinessBias=0*.5*np.random.rand(), travelerBias=.0+2.5*np.random.rand(), socialDistanceBias=1+0*np.random.rand()) for _ in range(populationSize)]
# Example of cleanliness even in the face of higher travel propensity
# populationSize = 250
# population = [agent(ageBias=-1, cleanlinessBias=1*.5*np.random.rand(), travelerBias=1.0+2.5*np.random.rand(), socialDistanceBias=.5+0*np.random.rand()) for _ in range(populationSize)]
# Establish the simulation environment: see the ABClasses.py documentation for details
landscape = environment(AOE=5, attenuation=.4)
# Logical to show or hide the viral load in the dynamical plots (will increase computation time in the plotting)
showTrails = True
##########################################
# Method to help with plotting the simulation frames
# Population is a list of agent objects
# Landscape is an environment object
# statsStorage is a list of tuples, where each element is the triple (#healty, #infected, #dead)
# ax is the axis handle
# showTrails is a logical to plot the viral environment, will increase computation / draw time
def populationPlotter(population, landscape, statsStorage, ax, showTrails=False):
if showTrails:
# Get coordinates where the viral load is larger than .1, for plotting
trailsX, trailsY = np.where(landscape.viralMap > .1)
downScale = 10 ** (-population[0].locationGranularity)
tx = trailsX*downScale
ty = trailsY*downScale
# Set the alpha of the viral trail based on viral load
alphas = []
for i, j in zip(trailsX, trailsY):
alphas.append(landscape.viralMap[i, j])
# Set the color array: for red the first column needs to be one, alphas are the 4th column
rgba_colors = np.zeros((len(tx), 4))
rgba_colors[:, 0] = 1.0
rgba_colors[:, 3] = alphas
# Scatter the viral trails
ax[1].scatter(tx, ty, color=rgba_colors, marker='*')
# Create arrays for the population stats
statsX = range(len(statsStorage))
numInfected = np.array([ss[1] for ss in statsStorage])/populationSize
numDead = np.array([ss[2] for ss in statsStorage])/populationSize
# Create stacked color plot based on population proportions
ax[0].fill_between(statsX, numDead, facecolor='k', alpha=.5, label='Dead')
ax[0].fill_between(statsX, numDead, numInfected+numDead, facecolor='r', alpha=1, label='Infected')
ax[0].fill_between(statsX, numInfected+numDead, 1, facecolor='g', alpha=1, label='Healthy')
ax[0].legend(loc='upper right')
# Separate colors and locations for the agents
x = [p.location[0] for p in population]
y = [p.location[1] for p in population]
c = [('k' if not p.alive else ('r' if p.infected else 'g')) for p in population]
# Scatter the agents
ax[1].scatter(x, y, c=c)
# Fix the axis limits and title it
ax[1].set_xlim((0, 1))
ax[1].set_ylim(0, 1)
ax[0].set_title('Infection Statistics')
# Seed the infection randomly in the population
patient0 = np.random.randint(0, populationSize)
population[patient0].infected = True
population[patient0].cleanlinessBias = 0
population[patient0].location = np.array([.5, .5])
numInfected = 1
numDead = 0
# Initialize the population outcome statistics list of tuples
statsStorage = [(populationSize-numInfected-numDead, numInfected, numDead)]
# Update environment with the population
landscape.update(population)
# Establish the dynamical plots and perform the first plot
plt.ion()
fig, ax = plt.subplots(2, 1)
populationPlotter(population, landscape, statsStorage, ax, showTrails=showTrails)
plt.show()
plt.pause(.05)
ax[0].clear()
ax[1].clear()
# Extra steps allows the simulation to run for n extra days past the elimination of the infection
extraSteps = 10
maxInfected = 0
while numInfected or extraSteps:
# Update the population agent by agent
for a in range(populationSize):
population[a].update(landscape, population)
# Update the landscape based on the new population
landscape.update(population)
# Stats update
numInfected = sum([p.alive and p.infected for p in population])
if numInfected > maxInfected:
maxInfected = numInfected
numDead = sum([not p.alive for p in population])
statsStorage.append((populationSize-numInfected-numDead, numInfected, numDead))
# Plot
populationPlotter(population, landscape, statsStorage, ax, showTrails=showTrails)
plt.show()
plt.pause(.05)
# Lagged exit condition
if numInfected == 0:
extraSteps -= 1
if not extraSteps:
break
# Clear plots (not executed on the last step in order to keep the final plot around for viewing)
ax[0].clear()
ax[1].clear()
plt.ioff()
# Static plots
fig2, ax2 = plt.subplots(2, 1)
# Plot age histogram
ages = [a.age for a in population]
ax2[0].hist([a.age for a in population], density=True)
# Compute percentage of people with pre exiting conditions in each age group, or died in each age group
ageHists, ageBins = np.histogram(ages)
PCvals = np.zeros(len(ageHists), dtype=float)
deathVals = PCvals.copy()
for a in population:
for bin in range(1, len(ageBins)):
if a.age < ageBins[bin]:
if a.preexistingCondition:
PCvals[bin-1] += 1/float(ageHists[bin-1])
if not a.alive:
deathVals[bin-1] += 1/float(ageHists[bin-1])
break
# Plot the associated graphs
ax2[1].bar(ageBins[:-1], PCvals, width=[(ageBins[i+1] - ageBins[i]) for i in range(len(ageBins)-1)], align='edge')
ax2[0].set_title('Ages')
ax2[1].set_title('Pre-existing Conditions\nLong Illnesses: {0:.0f}%'.format(100*sum([1 if a.infectionTime > 14 else 0 for a in population])/populationSize))
fig2.suptitle('Initial Distributions')
fig3, ax3 = plt.subplots()
ax3.bar(ageBins[:-1], deathVals, width=[(ageBins[i+1] - ageBins[i]) for i in range(len(ageBins)-1)], align='edge')
ax3.set_title('Deaths Given Age: Overall Death rate: {0:.0f}%'.format(100*numDead/maxInfected))
fig3.suptitle('Death Statistics')
plt.show()
| 36.664921
| 185
| 0.697558
|
718156a8f8df1fc12e965b5b800d714356098d3e
| 946
|
py
|
Python
|
setup.py
|
dcmartin/Auto_TS
|
398a009be4814709c53ca644a0d3e228350ad5d9
|
[
"Apache-2.0"
] | 1
|
2021-01-25T01:06:18.000Z
|
2021-01-25T01:06:18.000Z
|
setup.py
|
dcmartin/Auto_TS
|
398a009be4814709c53ca644a0d3e228350ad5d9
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
dcmartin/Auto_TS
|
398a009be4814709c53ca644a0d3e228350ad5d9
|
[
"Apache-2.0"
] | null | null | null |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="auto_ts",
version="0.0.28",
author="Ram Seshadri",
# author_email="author@example.com",
description="Automatically Build Multiple Time Series models fast - now with Facebook Prophet!",
long_description=long_description,
long_description_content_type="text/markdown",
license='Apache License 2.0',
url="https://github.com/AutoViML/Auto_TS",
packages=setuptools.find_packages(exclude=("tests",)),
install_requires=[
"ipython",
"jupyter",
"pmdarima",
"pandas",
"matplotlib",
"seaborn",
"scikit-learn==0.22.2",
"fbprophet",
"statsmodels",
"prettytable",
"tscv"
],
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
)
| 27.028571
| 100
| 0.610994
|
1769c8bdd6b60e366f0519add3f81d5d940c93f4
| 4,180
|
py
|
Python
|
Tests/GUI/DMachineSetup/test_PWSlot29.py
|
Superomeg4/pyleecan
|
2b695b5f39e77475a07aa0ea89489fb0a9659337
|
[
"Apache-2.0"
] | null | null | null |
Tests/GUI/DMachineSetup/test_PWSlot29.py
|
Superomeg4/pyleecan
|
2b695b5f39e77475a07aa0ea89489fb0a9659337
|
[
"Apache-2.0"
] | null | null | null |
Tests/GUI/DMachineSetup/test_PWSlot29.py
|
Superomeg4/pyleecan
|
2b695b5f39e77475a07aa0ea89489fb0a9659337
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@date Created on Wed Jan 20 14:10:24 2016
@copyright (C) 2015-2016 EOMYS ENGINEERING.
@author pierre_b
"""
import sys
from unittest import TestCase
from PyQt5 import QtWidgets
from PyQt5.QtTest import QTest
from pyleecan.Classes.LamSlotWind import LamSlotWind
from pyleecan.Classes.SlotW29 import SlotW29
from pyleecan.GUI.Dialog.DMachineSetup.SWSlot.PWSlot29.PWSlot29 import PWSlot29
class test_PWSlot29(TestCase):
"""Test that the widget PWSlot29 behave like it should"""
def setUp(self):
"""Run at the begining of every test to setup the gui"""
self.test_obj = LamSlotWind(Rint=0.1, Rext=0.2)
self.test_obj.slot = SlotW29(
H0=0.10, H1=0.11, H2=0.12, W0=0.13, W1=0.14, W2=0.15
)
self.widget = PWSlot29(self.test_obj)
@classmethod
def setUpClass(cls):
"""Start the app for the test"""
print("\nStart Test PWSlot29")
cls.app = QtWidgets.QApplication(sys.argv)
@classmethod
def tearDownClass(cls):
"""Exit the app after the test"""
cls.app.quit()
def test_init(self):
"""Check that the Widget spinbox initialise to the lamination value"""
self.assertEqual(self.widget.lf_H0.value(), 0.10)
self.assertEqual(self.widget.lf_H1.value(), 0.11)
self.assertEqual(self.widget.lf_H2.value(), 0.12)
self.assertEqual(self.widget.lf_W0.value(), 0.13)
self.assertEqual(self.widget.lf_W1.value(), 0.14)
self.assertEqual(self.widget.lf_W2.value(), 0.15)
def test_set_W0(self):
"""Check that the Widget allow to update W0"""
self.widget.lf_W0.clear() # Clear the field before writing
QTest.keyClicks(self.widget.lf_W0, "0.31")
self.widget.lf_W0.editingFinished.emit() # To trigger the slot
self.assertEqual(self.widget.slot.W0, 0.31)
self.assertEqual(self.test_obj.slot.W0, 0.31)
def test_set_W1(self):
"""Check that the Widget allow to update W1"""
self.widget.lf_W1.clear()
QTest.keyClicks(self.widget.lf_W1, "0.32")
self.widget.lf_W1.editingFinished.emit() # To trigger the slot
self.assertEqual(self.widget.slot.W1, 0.32)
self.assertEqual(self.test_obj.slot.W1, 0.32)
def test_set_W2(self):
"""Check that the Widget allow to update W2"""
self.widget.lf_W2.clear()
QTest.keyClicks(self.widget.lf_W2, "0.33")
self.widget.lf_W2.editingFinished.emit() # To trigger the slot
self.assertEqual(self.widget.slot.W2, 0.33)
self.assertEqual(self.test_obj.slot.W2, 0.33)
def test_set_H0(self):
"""Check that the Widget allow to update H0"""
self.widget.lf_H0.clear()
QTest.keyClicks(self.widget.lf_H0, "0.34")
self.widget.lf_H0.editingFinished.emit() # To trigger the slot
self.assertEqual(self.widget.slot.H0, 0.34)
self.assertEqual(self.test_obj.slot.H0, 0.34)
def test_set_H1(self):
"""Check that the Widget allow to update H1"""
self.widget.lf_H1.clear()
QTest.keyClicks(self.widget.lf_H1, "0.35")
self.widget.lf_H1.editingFinished.emit() # To trigger the slot
self.assertEqual(self.widget.slot.H1, 0.35)
self.assertEqual(self.test_obj.slot.H1, 0.35)
def test_set_H2(self):
"""Check that the Widget allow to update H2"""
self.widget.lf_H2.clear()
QTest.keyClicks(self.widget.lf_H2, "0.36")
self.widget.lf_H2.editingFinished.emit() # To trigger the slot
self.assertEqual(self.widget.slot.H2, 0.36)
self.assertEqual(self.test_obj.slot.H2, 0.36)
def test_output_txt(self):
"""Check that the Output text is computed and correct
"""
self.test_obj = LamSlotWind(
Rint=0.1, Rext=0.5, is_internal=True, is_stator=False, L1=0.9, Nrvd=2
)
self.test_obj.slot = SlotW29(
Zs=6, W0=0.05, H0=0.05, H1=0.1, W1=0.1, H2=0.2, W2=0.15
)
self.widget = PWSlot29(self.test_obj)
self.assertEqual(
self.widget.w_out.out_slot_height.text(), "Slot height: 0.3506 m"
)
| 35.12605
| 81
| 0.642105
|
f9ce48baa3aa5591af4f44f77e4dd9bc0aae86c4
| 4,540
|
py
|
Python
|
tests/sfmutils/test_find_warcs.py
|
NGTmeaty/sfm-utils
|
47ac6b8a894f5b02d947d76c74aa61d59cb5d48d
|
[
"MIT"
] | 2
|
2016-05-08T06:44:13.000Z
|
2016-05-16T15:07:22.000Z
|
tests/sfmutils/test_find_warcs.py
|
NGTmeaty/sfm-utils
|
47ac6b8a894f5b02d947d76c74aa61d59cb5d48d
|
[
"MIT"
] | 13
|
2015-12-02T22:00:22.000Z
|
2021-10-29T21:01:01.000Z
|
tests/sfmutils/test_find_warcs.py
|
NGTmeaty/sfm-utils
|
47ac6b8a894f5b02d947d76c74aa61d59cb5d48d
|
[
"MIT"
] | 4
|
2020-05-27T05:05:05.000Z
|
2021-02-12T22:28:47.000Z
|
import tests
from mock import patch, MagicMock, call
from sfmutils.find_warcs import main
from sfmutils.api_client import ApiClient
class TestFindWarcs(tests.TestCase):
@patch("sfmutils.find_warcs.sys")
@patch("sfmutils.find_warcs.ApiClient", autospec=True)
def test_find_warcs(self, mock_api_client_cls, mock_sys):
mock_api_client = MagicMock(spec=ApiClient)
mock_api_client_cls.side_effect = [mock_api_client]
mock_api_client.collections.side_effect = [[{"collection_id": "abc123"}], [{"collection_id": "def456"}]]
mock_api_client.warcs.side_effect = [[{"path": "/sfm-data/abc123"}],
[{"path": "/sfm-data/def456"}, {"path": "/sfm-data/def789"}]]
self.assertEqual("/sfm-data/abc123 /sfm-data/def456 /sfm-data/def789",
main("find_warcs.py --debug=True abc def".split(" ")))
self.assertEqual([call(collection_id_startswith='abc'), call(collection_id_startswith='def')],
mock_api_client.collections.call_args_list)
self.assertEqual(
[call(harvest_date_end=None, harvest_date_start=None, created_date_start=None,
created_date_end=None, collection_id='abc123'),
call(harvest_date_end=None, harvest_date_start=None, created_date_start=None,
created_date_end=None, collection_id='def456')],
mock_api_client.warcs.call_args_list)
mock_sys.exit.assert_not_called()
@patch("sfmutils.find_warcs.sys")
@patch("sfmutils.find_warcs.ApiClient", autospec=True)
def test_find_warcs_with_args(self, mock_api_client_cls, mock_sys):
mock_api_client = MagicMock(spec=ApiClient)
mock_api_client_cls.side_effect = [mock_api_client]
mock_api_client.collections.side_effect = [[{"collection_id": "def456"}]]
mock_api_client.warcs.side_effect = [[{"path": "/sfm-data/abc123"}],
[{"path": "/sfm-data/def456"}, {"path": "/sfm-data/def789"}]]
self.assertEqual("/sfm-data/abc123 /sfm-data/def456 /sfm-data/def789",
main("find_warcs.py --debug=True --harvest-start 2015-02-22T14:49:07Z --harvest-end "
"2016-02-22T14:49:07Z --warc-end 2014-02-22T14:49:07Z --warc-start "
"2013-02-22T14:49:07Z abcdefghijklmnopqrstuvwxyz012345 def".split(" ")))
self.assertEqual([call(collection_id_startswith='def')],
mock_api_client.collections.call_args_list)
self.assertEqual(
[call(harvest_date_end='2016-02-22T14:49:07Z', harvest_date_start='2015-02-22T14:49:07Z',
collection_id='abcdefghijklmnopqrstuvwxyz012345', created_date_end="2014-02-22T14:49:07Z",
created_date_start="2013-02-22T14:49:07Z"),
call(harvest_date_end='2016-02-22T14:49:07Z', harvest_date_start='2015-02-22T14:49:07Z',
collection_id='def456', created_date_end="2014-02-22T14:49:07Z",
created_date_start="2013-02-22T14:49:07Z")],
mock_api_client.warcs.call_args_list)
mock_sys.exit.assert_not_called()
@patch("sfmutils.find_warcs.sys")
@patch("sfmutils.find_warcs.ApiClient", autospec=True)
def test_find_warcs_no_matches(self, mock_api_client_cls, mock_sys):
mock_api_client = MagicMock(spec=ApiClient)
mock_api_client_cls.side_effect = [mock_api_client]
mock_api_client.collections.side_effect = [[]]
main("find_warcs.py --debug=True abc".split(" "))
self.assertEqual([call(collection_id_startswith='abc')],
mock_api_client.collections.call_args_list)
mock_api_client.warcs.assert_not_called()
mock_sys.exit.assert_called_once_with(1)
@patch("sfmutils.find_warcs.sys")
@patch("sfmutils.find_warcs.ApiClient", autospec=True)
def test_find_warcs_multiple_matches(self, mock_api_client_cls, mock_sys):
mock_api_client = MagicMock(spec=ApiClient)
mock_api_client_cls.side_effect = [mock_api_client]
mock_api_client.collections.side_effect = [[{"collection_id": "abc123"}, {"collection_id": "abc456"}]]
main("find_warcs.py --debug=True abc".split(" "))
self.assertEqual([call(collection_id_startswith='abc')],
mock_api_client.collections.call_args_list)
mock_api_client.warcs.assert_not_called()
mock_sys.exit.assert_called_once_with(1)
| 57.468354
| 112
| 0.659912
|
e8fd660323f01244ff7da634a5387e0ec3f63e2b
| 2,905
|
py
|
Python
|
main.py
|
coeragames/img-anal
|
ea28d91a416dd2ab2393f573e8cb83d6bb78dbfa
|
[
"Apache-2.0"
] | 1
|
2021-02-07T08:51:03.000Z
|
2021-02-07T08:51:03.000Z
|
main.py
|
coeragames/img-anal
|
ea28d91a416dd2ab2393f573e8cb83d6bb78dbfa
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
coeragames/img-anal
|
ea28d91a416dd2ab2393f573e8cb83d6bb78dbfa
|
[
"Apache-2.0"
] | null | null | null |
from PIL import Image
from PIL import ImageFilter
import wget
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
url = -1
analyse = -1
analyse = url = input("Rentre le chemin l'image à analyser: ")
if url != "text" :
wget.download(url, 'img')
img_name = -1
img_name = input(" Niquel ! Quel était le nom de l'image ? ")
if img_name != "text" :
img_type = -1
img_type = input("Parfait ! Dit moi ce que je dois faire avec " + img_name + ": ")
if img_type == "find-eges" :
image = Image.open("img/" + img_name)
image = image.filter(ImageFilter.FIND_EDGES)
image = image.save("end/" + img_name)
os.remove("img/" + img_name)
print("C'est bon ! Ton image est stockée dans end/" + img_name)
if img_type == "contour" :
image = Image.open("img/" + img_name)
image = image.filter(ImageFilter.CONTOUR)
image = image.save("end/" + img_name)
os.remove("img/" + img_name)
print("C'est bon ! Ton image est stockée dans end/" + img_name)
if img_type == "blur" :
image = Image.open("img/" + img_name)
image = image.filter(ImageFilter.BLUR)
image = image.save("end/" + img_name)
os.remove("img/" + img_name)
print("C'est bon ! Ton image est stockée dans end/" + img_name)
if img_type == "maxi-blur" :
image = Image.open("img/" + img_name)
image = image.filter(ImageFilter.GaussianBlur(20))
image = image.save("end/" + img_name)
os.remove("img/" + img_name)
print("C'est bon ! Ton image est stockée dans end/" + img_name)
if img_type == "enhance-max" :
image = Image.open("img/" + img_name)
image = image.filter(ImageFilter.EDGE_ENHANCE_MORE)
image = image.save("end/" + img_name)
os.remove("img/" + img_name)
print("C'est bon ! Ton image est stockée dans end/" + img_name)
if img_type == "enhance" :
image = Image.open("img/" + img_name)
image = image.filter(ImageFilter.EDGE_ENHANCE)
image = image.save("end/" + img_name)
os.remove("img/" + img_name)
print("C'est bon ! Ton image est stockée dans end/" + img_name)
if img_type == "blur-faces" :
def plotImages(img):
plt.imshow(img, cmap="gray")
plt.axis('off')
plt.style.use('seaborn')
plt.show()
image = cv2.imread('img/' + img_name)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plotImages(image)
face_detect = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
face_data = face_detect.detectMultiScale(image, 1.3, 5)
for (x, y, w, h) in face_data:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
roi = image[y:y+h, x:x+w]
roi = cv2.GaussianBlur(roi, (23, 23), 30)
image[y:y+roi.shape[0], x:x+roi.shape[1]] = roi
plotImages(image)
| 33.77907
| 86
| 0.598967
|
899eb1809b219a45912d3a3b45f8936257d5b266
| 1,870
|
py
|
Python
|
jobsubmit.py
|
tarasowski/machine-learning-plagiarism-detector
|
d499883af3a8968e1f96d730dfd3b4dfe1d1d50f
|
[
"MIT"
] | 3
|
2020-04-13T07:59:25.000Z
|
2021-01-28T12:57:25.000Z
|
jobsubmit.py
|
tarasowski/machine-learning-plagiarism-detector
|
d499883af3a8968e1f96d730dfd3b4dfe1d1d50f
|
[
"MIT"
] | 1
|
2021-03-31T19:33:28.000Z
|
2021-03-31T19:33:28.000Z
|
jobsubmit.py
|
tarasowski/machine-learning-plagiarism-detector
|
d499883af3a8968e1f96d730dfd3b4dfe1d1d50f
|
[
"MIT"
] | 1
|
2021-01-31T03:32:53.000Z
|
2021-01-31T03:32:53.000Z
|
import sagemaker
from sagemaker.sklearn.estimator import SKLearn
from sklearn.metrics import accuracy_score
import boto3
import pandas as pd
import os
role = 'SageMakerRole'
sagemaker_session = sagemaker.Session()
bucket = sagemaker_session.default_bucket()
train_key = './models/train.csv'
test_key = './models/test.csv'
data_dir = './models'
prefix = 'sagemaker/plagiarism'
train_path = sagemaker_session.upload_data(train_key, bucket=bucket, key_prefix=prefix)
test_path = sagemaker_session.upload_data(test_key, bucket=bucket, key_prefix=prefix)
def local():
sklearn = SKLearn(
entry_point='train.py',
source_dir='./src/',
role=role,
train_instance_count=1,
train_instance_type='local',
hyperparameters={
'max_depth': 5,
'n_estimators': 10
})
sklearn.fit({'train': 'file://models/train.csv'})
predictor = sklearn.deploy(initial_instance_count=1, instance_type='local')
test_data = pd.read_csv('./models/test.csv', header=None, names=None)
test_y = test_data.iloc[:, 0]
test_x = test_data.iloc[:, 1:]
test_y_preds = predictor.predict(test_x)
accuracy = accuracy_score(test_y, test_y_preds)
print('The current accuracy score for the prediction', accuracy)
def cloud():
sklearn = SKLearn(
entry_point='train.py',
source_dir='./src/',
role=role,
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
sagemaker_session=sagemaker_session,
hyperparameters={
'max_depth': 5,
'n_estimators': 10
})
sklearn.fit({'train': train_path})
if __name__ == '__main__':
mode = os.environ.get('MODE')
local() if mode == 'local' else cloud()
| 31.166667
| 87
| 0.629947
|
a0c8d9fe55a3dd14f4f12b8230d0caf29236abdd
| 3,554
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/alicyclobacillusacidocaldariuslaa1.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/alicyclobacillusacidocaldariuslaa1.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/alicyclobacillusacidocaldariuslaa1.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Alicyclobacillus acidocaldarius LAA1.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def AlicyclobacillusAcidocaldariusLaa1(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Alicyclobacillus acidocaldarius LAA1 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Alicyclobacillus acidocaldarius LAA1 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="AlicyclobacillusAcidocaldariusLaa1",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.847619
| 223
| 0.684299
|
ab064ba35d3d1d4f2041be96a30708fb3eb21cb6
| 12,768
|
py
|
Python
|
test/python/transpiler/test_template_matching.py
|
Elliot-Coupe/qiskit-terra
|
8a604e156ba4c2fa099b1c24cd941f59b9408398
|
[
"Apache-2.0"
] | 1
|
2021-09-25T18:49:52.000Z
|
2021-09-25T18:49:52.000Z
|
test/python/transpiler/test_template_matching.py
|
Elliot-Coupe/qiskit-terra
|
8a604e156ba4c2fa099b1c24cd941f59b9408398
|
[
"Apache-2.0"
] | 6
|
2019-02-14T04:42:41.000Z
|
2021-06-09T05:50:56.000Z
|
test/python/transpiler/test_template_matching.py
|
Elliot-Coupe/qiskit-terra
|
8a604e156ba4c2fa099b1c24cd941f59b9408398
|
[
"Apache-2.0"
] | 1
|
2020-04-15T07:23:23.000Z
|
2020-04-15T07:23:23.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=no-member
"""Test the TemplateOptimization pass."""
import unittest
import numpy as np
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.circuit import Parameter, Gate
from qiskit.extensions import UnitaryGate
from qiskit.quantum_info import Operator
from qiskit.circuit.library.templates import template_nct_2a_2, template_nct_5a_3
from qiskit.converters.circuit_to_dag import circuit_to_dag
from qiskit.converters.circuit_to_dagdependency import circuit_to_dagdependency
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import TemplateOptimization
from qiskit.test import QiskitTestCase
from qiskit.transpiler.exceptions import TranspilerError
class TestTemplateMatching(QiskitTestCase):
"""Test the TemplateOptimization pass."""
def test_pass_cx_cancellation_no_template_given(self):
"""
Check the cancellation of CX gates for the apply of the three basic
template x-x, cx-cx. ccx-ccx.
"""
qr = QuantumRegister(3)
circuit_in = QuantumCircuit(qr)
circuit_in.h(qr[0])
circuit_in.h(qr[0])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[1], qr[0])
circuit_in.cx(qr[1], qr[0])
pass_manager = PassManager()
pass_manager.append(TemplateOptimization())
circuit_in_opt = pass_manager.run(circuit_in)
circuit_out = QuantumCircuit(qr)
circuit_out.h(qr[0])
circuit_out.h(qr[0])
self.assertEqual(circuit_in_opt, circuit_out)
def test_pass_cx_cancellation_own_template(self):
"""
Check the cancellation of CX gates for the apply of a self made template cx-cx.
"""
qr = QuantumRegister(2, "qr")
circuit_in = QuantumCircuit(qr)
circuit_in.h(qr[0])
circuit_in.h(qr[0])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[1], qr[0])
circuit_in.cx(qr[1], qr[0])
dag_in = circuit_to_dag(circuit_in)
qrt = QuantumRegister(2, "qrc")
qct = QuantumCircuit(qrt)
qct.cx(0, 1)
qct.cx(0, 1)
template_list = [qct]
pass_ = TemplateOptimization(template_list)
dag_opt = pass_.run(dag_in)
circuit_expected = QuantumCircuit(qr)
circuit_expected.h(qr[0])
circuit_expected.h(qr[0])
dag_expected = circuit_to_dag(circuit_expected)
self.assertEqual(dag_opt, dag_expected)
def test_pass_cx_cancellation_template_from_library(self):
"""
Check the cancellation of CX gates for the apply of the library template cx-cx (2a_2).
"""
qr = QuantumRegister(2, "qr")
circuit_in = QuantumCircuit(qr)
circuit_in.h(qr[0])
circuit_in.h(qr[0])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[1], qr[0])
circuit_in.cx(qr[1], qr[0])
dag_in = circuit_to_dag(circuit_in)
template_list = [template_nct_2a_2()]
pass_ = TemplateOptimization(template_list)
dag_opt = pass_.run(dag_in)
circuit_expected = QuantumCircuit(qr)
circuit_expected.h(qr[0])
circuit_expected.h(qr[0])
dag_expected = circuit_to_dag(circuit_expected)
self.assertEqual(dag_opt, dag_expected)
def test_pass_template_nct_5a(self):
"""
Verify the result of template matching and substitution with the template 5a_3.
q_0: ───────■─────────■────■──
┌─┴─┐ ┌─┴─┐ │
q_1: ──■──┤ X ├──■──┤ X ├──┼──
┌─┴─┐└───┘┌─┴─┐└───┘┌─┴─┐
q_2: ┤ X ├─────┤ X ├─────┤ X ├
└───┘ └───┘ └───┘
The circuit before optimization is:
┌───┐ ┌───┐
qr_0: ┤ X ├───────────────┤ X ├─────
└─┬─┘ ┌───┐┌───┐└─┬─┘
qr_1: ──┼────■──┤ X ├┤ Z ├──┼────■──
│ │ └─┬─┘└───┘ │ │
qr_2: ──┼────┼────■────■────■────┼──
│ │ ┌───┐┌─┴─┐ │ │
qr_3: ──■────┼──┤ H ├┤ X ├──■────┼──
│ ┌─┴─┐└───┘└───┘ ┌─┴─┐
qr_4: ──■──┤ X ├───────────────┤ X ├
└───┘ └───┘
The match is given by [0,1][1,2][2,7], after substitution the circuit becomes:
┌───┐ ┌───┐
qr_0: ┤ X ├───────────────┤ X ├
└─┬─┘ ┌───┐┌───┐└─┬─┘
qr_1: ──┼───────┤ X ├┤ Z ├──┼──
│ └─┬─┘└───┘ │
qr_2: ──┼────■────■────■────■──
│ │ ┌───┐┌─┴─┐ │
qr_3: ──■────┼──┤ H ├┤ X ├──■──
│ ┌─┴─┐└───┘└───┘
qr_4: ──■──┤ X ├───────────────
└───┘
"""
qr = QuantumRegister(5, "qr")
circuit_in = QuantumCircuit(qr)
circuit_in.ccx(qr[3], qr[4], qr[0])
circuit_in.cx(qr[1], qr[4])
circuit_in.cx(qr[2], qr[1])
circuit_in.h(qr[3])
circuit_in.z(qr[1])
circuit_in.cx(qr[2], qr[3])
circuit_in.ccx(qr[2], qr[3], qr[0])
circuit_in.cx(qr[1], qr[4])
dag_in = circuit_to_dag(circuit_in)
template_list = [template_nct_5a_3()]
pass_ = TemplateOptimization(template_list)
dag_opt = pass_.run(dag_in)
circuit_expected = QuantumCircuit(qr)
circuit_expected.ccx(qr[3], qr[4], qr[0])
circuit_expected.cx(qr[2], qr[4])
circuit_expected.cx(qr[2], qr[1])
circuit_expected.z(qr[1])
circuit_expected.h(qr[3])
circuit_expected.cx(qr[2], qr[3])
circuit_expected.ccx(qr[2], qr[3], qr[0])
dag_expected = circuit_to_dag(circuit_expected)
self.assertEqual(dag_opt, dag_expected)
def test_pass_template_wrong_type(self):
"""
If a template is not equivalent to the identity, it raises an error.
"""
qr = QuantumRegister(2, "qr")
circuit_in = QuantumCircuit(qr)
circuit_in.h(qr[0])
circuit_in.h(qr[0])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[0], qr[1])
circuit_in.cx(qr[1], qr[0])
circuit_in.cx(qr[1], qr[0])
dag_in = circuit_to_dag(circuit_in)
qrt = QuantumRegister(2, "qrc")
qct = QuantumCircuit(qrt)
qct.cx(0, 1)
qct.x(0)
qct.h(1)
template_list = [qct]
pass_ = TemplateOptimization(template_list)
self.assertRaises(TranspilerError, pass_.run, dag_in)
def test_accept_dagdependency(self):
"""
Check that users can supply DAGDependency in the template list.
"""
circuit_in = QuantumCircuit(2)
circuit_in.cnot(0, 1)
circuit_in.cnot(0, 1)
templates = [circuit_to_dagdependency(circuit_in)]
pass_ = TemplateOptimization(template_list=templates)
circuit_out = PassManager(pass_).run(circuit_in)
self.assertEqual(circuit_out.count_ops().get("cx", 0), 0)
def test_parametric_template(self):
"""
Check matching where template has parameters.
┌───────────┐ ┌────────┐
q_0: ┤ P(-1.0*β) ├──■────────────■──┤0 ├
├───────────┤┌─┴─┐┌──────┐┌─┴─┐│ CZ(β) │
q_1: ┤ P(-1.0*β) ├┤ X ├┤ P(β) ├┤ X ├┤1 ├
└───────────┘└───┘└──────┘└───┘└────────┘
First test try match on
┌───────┐
q_0: ┤ P(-2) ├──■────────────■─────────────────────────────
├───────┤┌─┴─┐┌──────┐┌─┴─┐┌───────┐
q_1: ┤ P(-2) ├┤ X ├┤ P(2) ├┤ X ├┤ P(-3) ├──■────────────■──
├───────┤└───┘└──────┘└───┘└───────┘┌─┴─┐┌──────┐┌─┴─┐
q_2: ┤ P(-3) ├───────────────────────────┤ X ├┤ P(3) ├┤ X ├
└───────┘ └───┘└──────┘└───┘
Second test try match on
┌───────┐
q_0: ┤ P(-2) ├──■────────────■────────────────────────────
├───────┤┌─┴─┐┌──────┐┌─┴─┐┌──────┐
q_1: ┤ P(-2) ├┤ X ├┤ P(2) ├┤ X ├┤ P(3) ├──■────────────■──
└┬──────┤└───┘└──────┘└───┘└──────┘┌─┴─┐┌──────┐┌─┴─┐
q_2: ─┤ P(3) ├──────────────────────────┤ X ├┤ P(3) ├┤ X ├
└──────┘ └───┘└──────┘└───┘
"""
class CZp(Gate):
"""CZ gates used for the test."""
def __init__(self, num_qubits, params):
super().__init__("cz", num_qubits, params)
def inverse(self):
inverse = UnitaryGate(np.diag([1.0, 1.0, 1.0, np.exp(-2.0j * self.params[0])]))
inverse.name = "icz"
return inverse
def template_czp2():
beta = Parameter("β")
qc = QuantumCircuit(2)
qc.p(-beta, 0)
qc.p(-beta, 1)
qc.cx(0, 1)
qc.p(beta, 1)
qc.cx(0, 1)
qc.append(CZp(2, [beta]), [0, 1])
return qc
def count_cx(qc):
"""Counts the number of CX gates for testing."""
return qc.count_ops().get("cx", 0)
circuit_in = QuantumCircuit(3)
circuit_in.p(-2, 0)
circuit_in.p(-2, 1)
circuit_in.cx(0, 1)
circuit_in.p(2, 1)
circuit_in.cx(0, 1)
circuit_in.p(-3, 1)
circuit_in.p(-3, 2)
circuit_in.cx(1, 2)
circuit_in.p(3, 2)
circuit_in.cx(1, 2)
pass_ = TemplateOptimization(template_list=[template_czp2()])
circuit_out = PassManager(pass_).run(circuit_in)
np.testing.assert_almost_equal(Operator(circuit_out).data[3, 3], np.exp(-4.0j))
np.testing.assert_almost_equal(Operator(circuit_out).data[7, 7], np.exp(-10.0j))
self.assertEqual(count_cx(circuit_out), 0) # Two matches => no CX gates.
np.testing.assert_almost_equal(Operator(circuit_in).data, Operator(circuit_out).data)
circuit_in = QuantumCircuit(3)
circuit_in.p(-2, 0)
circuit_in.p(-2, 1)
circuit_in.cx(0, 1)
circuit_in.p(2, 1)
circuit_in.cx(0, 1)
circuit_in.p(3, 1)
circuit_in.p(3, 2)
circuit_in.cx(1, 2)
circuit_in.p(3, 2)
circuit_in.cx(1, 2)
pass_ = TemplateOptimization(template_list=[template_czp2()])
circuit_out = PassManager(pass_).run(circuit_in)
self.assertEqual(count_cx(circuit_out), 2) # One match => two CX gates.
np.testing.assert_almost_equal(Operator(circuit_in).data, Operator(circuit_out).data)
def test_unbound_parameters(self):
"""
Test that partial matches with parameters will not raise errors.
This tests that if parameters are still in the temporary template after
_attempt_bind then they will not be used.
"""
class PhaseSwap(Gate):
"""CZ gates used for the test."""
def __init__(self, num_qubits, params):
super().__init__("p", num_qubits, params)
def inverse(self):
inverse = UnitaryGate(
np.diag(
[1.0, 1.0, np.exp(-1.0j * self.params[0]), np.exp(-1.0j * self.params[0])]
)
)
inverse.name = "p"
return inverse
def template():
beta = Parameter("β")
qc = QuantumCircuit(2)
qc.cx(1, 0)
qc.cx(1, 0)
qc.p(beta, 1)
qc.append(PhaseSwap(2, [beta]), [0, 1])
return qc
circuit_in = QuantumCircuit(2)
circuit_in.cx(1, 0)
circuit_in.cx(1, 0)
pass_ = TemplateOptimization(template_list=[template()])
circuit_out = PassManager(pass_).run(circuit_in)
# This template will not fully match as long as gates with parameters do not
# commute with any other gates in the DAG dependency.
self.assertEqual(circuit_out.count_ops().get("cx", 0), 2)
if __name__ == "__main__":
unittest.main()
| 34.980822
| 98
| 0.512296
|
d246356b4ec75a96162d0b37d4d1cbfab9493440
| 31,626
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_reduce_op.py
|
DevilCarp/Paddle
|
04325d2cbefb029a4478bdc069d3279cd566ac6a
|
[
"Apache-2.0"
] | 2
|
2022-03-30T09:55:45.000Z
|
2022-03-30T09:55:49.000Z
|
python/paddle/fluid/tests/unittests/test_reduce_op.py
|
DevilCarp/Paddle
|
04325d2cbefb029a4478bdc069d3279cd566ac6a
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_reduce_op.py
|
DevilCarp/Paddle
|
04325d2cbefb029a4478bdc069d3279cd566ac6a
|
[
"Apache-2.0"
] | 1
|
2022-03-02T11:36:03.000Z
|
2022-03-02T11:36:03.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.framework import convert_np_dtype_to_dtype_
class TestSumOp(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp_fp16(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
}
self.attrs = {'dim': [0, 1, 2]}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
self.gradient = self.calc_gradient()
def test_check_output(self):
self.check_output()
def calc_gradient(self):
x = self.inputs["X"]
grad = np.ones(x.shape, dtype=x.dtype)
return grad,
def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSumOp_bf16(OpTest):
def setUp(self):
np.random.seed(100)
self.op_type = "reduce_sum"
self.dtype = np.uint16
self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
self.attrs = {'dim': [0, 1, 2]}
self.out = self.x.sum(axis=tuple(self.attrs['dim']))
self.gradient = self.calc_gradient()
self.inputs = {'X': convert_float_to_uint16(self.x)}
self.outputs = {'Out': convert_float_to_uint16(self.out)}
self.gradient = self.calc_gradient()
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X'], 'Out', user_defined_grads=self.gradient)
def calc_gradient(self):
x = self.x
grad = np.ones(x.shape, dtype=x.dtype)
return [grad]
class TestSumOp_fp16_withInt(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
# ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
# Precision limitations on integer values between 0 and 2048 can be exactly represented
'X': np.random.randint(0, 30, (10, 10)).astype("float16")
}
self.attrs = {'dim': [0, 1]}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
self.gradient = self.calc_gradient()
def test_check_output(self):
self.check_output()
def calc_gradient(self):
x = self.inputs["X"]
grad = np.ones(x.shape, dtype=x.dtype)
return grad,
def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
class TestSumOp5D(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp6D(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp8D(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
}
self.attrs = {'dim': (0, 3)}
self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
@skip_check_grad_ci(
reason="reduce_max is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework.")
class TestMaxOp(OpTest):
"""Remove Max with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_max"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [-1]}
self.outputs = {
'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
@skip_check_grad_ci(
reason="reduce_min is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework.")
class TestMinOp(OpTest):
"""Remove Min with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_min"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [2]}
self.outputs = {
'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
class TestMin6DOp(OpTest):
"""Remove Min with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_min"
self.inputs = {
'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64")
}
self.attrs = {'dim': [2, 4]}
self.outputs = {
'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
class TestMin8DOp(OpTest):
"""Remove Min with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_min"
self.inputs = {
'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64")
}
self.attrs = {'dim': [2, 3, 4]}
self.outputs = {
'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
class TestProdOp(OpTest):
def setUp(self):
self.op_type = "reduce_prod"
self.init_data_type()
self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.data_type)}
self.outputs = {'Out': self.inputs['X'].prod(axis=0)}
def init_data_type(self):
self.data_type = "float32" if core.is_compiled_with_rocm(
) else "float64"
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestProd6DOp(OpTest):
def setUp(self):
self.op_type = "reduce_prod"
self.init_data_type()
self.inputs = {
'X': np.random.random((5, 6, 2, 3, 4, 2)).astype(self.data_type)
}
self.attrs = {'dim': [2, 3, 4]}
self.outputs = {
'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
}
def init_data_type(self):
self.data_type = "float32" if core.is_compiled_with_rocm(
) else "float64"
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestProd8DOp(OpTest):
def setUp(self):
self.op_type = "reduce_prod"
self.init_data_type()
self.inputs = {
'X': np.random.random(
(2, 5, 3, 2, 2, 3, 4, 2)).astype(self.data_type)
}
self.attrs = {'dim': [2, 3, 4]}
self.outputs = {
'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
}
def init_data_type(self):
self.data_type = "float32" if core.is_compiled_with_rocm(
) else "float64"
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestAllOp(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.outputs = {'Out': self.inputs['X'].all()}
self.attrs = {'reduce_all': True}
def test_check_output(self):
self.check_output()
class TestAll8DOp(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'reduce_all': True, 'dim': (2, 3, 4)}
self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
class TestAllOpWithDim(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': (1, )}
self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
class TestAll8DOpWithDim(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (1, 3, 4)}
self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
class TestAllOpWithKeepDim(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': [1], 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].all(axis=1), axis=1)
}
def test_check_output(self):
self.check_output()
class TestAll8DOpWithKeepDim(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (5, ), 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].all(axis=self.attrs['dim']), axis=5)
}
def test_check_output(self):
self.check_output()
class TestAllOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of reduce_all_op must be Variable.
input1 = 12
self.assertRaises(TypeError, fluid.layers.reduce_all, input1)
# The input dtype of reduce_all_op must be bool.
input2 = fluid.layers.data(
name='input2', shape=[12, 10], dtype="int32")
self.assertRaises(TypeError, fluid.layers.reduce_all, input2)
class TestAnyOp(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.outputs = {'Out': self.inputs['X'].any()}
self.attrs = {'reduce_all': True}
def test_check_output(self):
self.check_output()
class TestAny8DOp(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'reduce_all': True, 'dim': (3, 5, 4)}
self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
class TestAnyOpWithDim(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': [1]}
self.outputs = {'Out': self.inputs['X'].any(axis=1)}
def test_check_output(self):
self.check_output()
class TestAny8DOpWithDim(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (3, 6)}
self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
class TestAnyOpWithKeepDim(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': (1, ), 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
}
def test_check_output(self):
self.check_output()
class TestAny8DOpWithKeepDim(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (1, ), 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
}
def test_check_output(self):
self.check_output()
class TestAnyOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of reduce_any_op must be Variable.
input1 = 12
self.assertRaises(TypeError, fluid.layers.reduce_any, input1)
# The input dtype of reduce_any_op must be bool.
input2 = fluid.layers.data(
name='input2', shape=[12, 10], dtype="int32")
self.assertRaises(TypeError, fluid.layers.reduce_any, input2)
class Test1DReduce(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random(120).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class Test2DReduce0(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [0]}
self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
class Test2DReduce1(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [1]}
self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test3DReduce0(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [1]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test3DReduce1(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [2]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test3DReduce2(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [-2]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test3DReduce3(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [1, 2]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test8DReduce0(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': (4, 2, 3)}
self.inputs = {
'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class TestKeepDimReduce(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [1], 'keep_dim': True}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
keepdims=self.attrs['keep_dim'])
}
class TestKeepDim8DReduce(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
}
self.attrs = {'dim': (3, 4, 5), 'keep_dim': True}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
keepdims=self.attrs['keep_dim'])
}
@skip_check_grad_ci(
reason="reduce_max is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework.")
class TestReduceMaxOpMultiAxises(OpTest):
"""Remove Max with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_max"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [-2, -1]}
self.outputs = {
'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
@skip_check_grad_ci(
reason="reduce_min is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework.")
class TestReduceMinOpMultiAxises(OpTest):
"""Remove Min with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_min"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [1, 2]}
self.outputs = {
'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
class TestKeepDimReduceSumMultiAxises(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [-2, -1], 'keep_dim': True}
self.outputs = {
'Out':
self.inputs['X'].sum(axis=tuple(self.attrs['dim']), keepdims=True)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceSumWithDimOne(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
self.attrs = {'dim': [1, 2], 'keep_dim': True}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
keepdims=True)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceSumWithNumelOne(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((100, 1)).astype("float64")}
self.attrs = {'dim': [1], 'keep_dim': False}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
keepdims=False)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceAll(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
self.attrs = {'reduce_all': True, 'keep_dim': False}
self.outputs = {'Out': self.inputs['X'].sum()}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class Test1DReduceWithAxes1(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random(100).astype("float64")}
self.attrs = {'dim': [0], 'keep_dim': False}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceWithDtype(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
self.attrs = {'reduce_all': True}
self.attrs.update({
'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
'out_dtype': int(convert_np_dtype_to_dtype_(np.float64))
})
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceWithDtype1(TestReduceWithDtype):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=1)}
self.attrs = {'dim': [1]}
self.attrs.update({
'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
'out_dtype': int(convert_np_dtype_to_dtype_(np.float64))
})
class TestReduceWithDtype2(TestReduceWithDtype):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)}
self.attrs = {'dim': [1], 'keep_dim': True}
self.attrs.update({
'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
'out_dtype': int(convert_np_dtype_to_dtype_(np.float64))
})
class TestReduceSumOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of reduce_sum_op must be Variable.
x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.reduce_sum, x1)
# The input dtype of reduce_sum_op must be float32 or float64 or int32 or int64.
x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.reduce_sum, x2)
class API_TestSumOp(unittest.TestCase):
def run_static(self,
shape,
x_dtype,
attr_axis,
attr_dtype=None,
np_axis=None):
if np_axis is None:
np_axis = attr_axis
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for place in places:
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=shape, dtype=x_dtype)
result_sum = paddle.sum(x=data,
axis=attr_axis,
dtype=attr_dtype)
exe = fluid.Executor(place)
input_data = np.random.rand(*shape).astype(x_dtype)
res, = exe.run(feed={"data": input_data},
fetch_list=[result_sum])
self.assertTrue(
np.allclose(
res, np.sum(input_data.astype(attr_dtype), axis=np_axis)))
def test_static(self):
shape = [10, 10]
axis = 1
self.run_static(shape, "bool", axis, attr_dtype=None)
self.run_static(shape, "bool", axis, attr_dtype="int32")
self.run_static(shape, "bool", axis, attr_dtype="int64")
self.run_static(shape, "bool", axis, attr_dtype="float16")
self.run_static(shape, "int32", axis, attr_dtype=None)
self.run_static(shape, "int32", axis, attr_dtype="int32")
self.run_static(shape, "int32", axis, attr_dtype="int64")
self.run_static(shape, "int32", axis, attr_dtype="float64")
self.run_static(shape, "int64", axis, attr_dtype=None)
self.run_static(shape, "int64", axis, attr_dtype="int64")
self.run_static(shape, "int64", axis, attr_dtype="int32")
self.run_static(shape, "float32", axis, attr_dtype=None)
self.run_static(shape, "float32", axis, attr_dtype="float32")
self.run_static(shape, "float32", axis, attr_dtype="float64")
self.run_static(shape, "float32", axis, attr_dtype="int64")
self.run_static(shape, "float64", axis, attr_dtype=None)
self.run_static(shape, "float64", axis, attr_dtype="float32")
self.run_static(shape, "float64", axis, attr_dtype="float64")
shape = [5, 5, 5]
self.run_static(shape, "int32", (0, 1), attr_dtype="int32")
self.run_static(
shape, "int32", (), attr_dtype="int32", np_axis=(0, 1, 2))
def test_dygraph(self):
np_x = np.random.random([2, 3, 4]).astype('int32')
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np_x)
out0 = paddle.sum(x).numpy()
out1 = paddle.sum(x, axis=0).numpy()
out2 = paddle.sum(x, axis=(0, 1)).numpy()
out3 = paddle.sum(x, axis=(0, 1, 2)).numpy()
self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all())
self.assertTrue((out1 == np.sum(np_x, axis=0)).all())
self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all())
self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all())
class TestAllAPI(unittest.TestCase):
def setUp(self):
np.random.seed(123)
paddle.enable_static()
self.places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
self.places.append(fluid.CUDAPlace(0))
def check_static_result(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[4, 4], dtype="bool")
result = paddle.all(x=input)
input_np = np.random.randint(0, 2, [4, 4]).astype("bool")
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input": input_np},
fetch_list=[result])
self.assertTrue(np.allclose(fetches[0], np.all(input_np)))
def test_static(self):
for place in self.places:
self.check_static_result(place=place)
def test_dygraph(self):
paddle.disable_static()
for place in self.places:
with fluid.dygraph.guard(place):
np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool)
x = fluid.layers.assign(np_x)
x = fluid.layers.cast(x, 'bool')
out1 = paddle.all(x)
np_out1 = out1.numpy()
expect_res1 = np.all(np_x)
self.assertTrue((np_out1 == expect_res1).all())
out2 = paddle.all(x, axis=0)
np_out2 = out2.numpy()
expect_res2 = np.all(np_x, axis=0)
self.assertTrue((np_out2 == expect_res2).all())
out3 = paddle.all(x, axis=-1)
np_out3 = out3.numpy()
expect_res3 = np.all(np_x, axis=-1)
self.assertTrue((np_out3 == expect_res3).all())
out4 = paddle.all(x, axis=1, keepdim=True)
np_out4 = out4.numpy()
expect_res4 = np.all(np_x, axis=1, keepdims=True)
self.assertTrue((np_out4 == expect_res4).all())
paddle.enable_static()
class TestAnyAPI(unittest.TestCase):
def setUp(self):
np.random.seed(123)
paddle.enable_static()
self.places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
self.places.append(fluid.CUDAPlace(0))
def check_static_result(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[4, 4], dtype="bool")
result = paddle.any(x=input)
input_np = np.random.randint(0, 2, [4, 4]).astype("bool")
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input": input_np},
fetch_list=[result])
self.assertTrue(np.allclose(fetches[0], np.any(input_np)))
def test_static(self):
for place in self.places:
self.check_static_result(place=place)
def test_dygraph(self):
paddle.disable_static()
for place in self.places:
with fluid.dygraph.guard(place):
np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool)
x = fluid.layers.assign(np_x)
x = fluid.layers.cast(x, 'bool')
out1 = paddle.any(x)
np_out1 = out1.numpy()
expect_res1 = np.any(np_x)
self.assertTrue((np_out1 == expect_res1).all())
out2 = paddle.any(x, axis=0)
np_out2 = out2.numpy()
expect_res2 = np.any(np_x, axis=0)
self.assertTrue((np_out2 == expect_res2).all())
out3 = paddle.any(x, axis=-1)
np_out3 = out3.numpy()
expect_res3 = np.any(np_x, axis=-1)
self.assertTrue((np_out3 == expect_res3).all())
out4 = paddle.any(x, axis=1, keepdim=True)
np_out4 = out4.numpy()
expect_res4 = np.any(np_x, axis=1, keepdims=True)
self.assertTrue((np_out4 == expect_res4).all())
paddle.enable_static()
if __name__ == '__main__':
import paddle
paddle.enable_static()
unittest.main()
| 33.150943
| 99
| 0.567413
|
7d795b8e6a0f3602692577a7243a8678a11f0360
| 9,211
|
py
|
Python
|
pygame_menu/examples/game_selector.py
|
arpruss/pygame-menu
|
25cefb5cfc60383544d704b83a32d43dfc621c23
|
[
"MIT"
] | null | null | null |
pygame_menu/examples/game_selector.py
|
arpruss/pygame-menu
|
25cefb5cfc60383544d704b83a32d43dfc621c23
|
[
"MIT"
] | null | null | null |
pygame_menu/examples/game_selector.py
|
arpruss/pygame-menu
|
25cefb5cfc60383544d704b83a32d43dfc621c23
|
[
"MIT"
] | null | null | null |
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
EXAMPLE - GAME SELECTOR
Game with 3 difficulty options.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2021 Pablo Pizarro R. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""
__all__ = ['main']
import pygame
import pygame_menu
from pygame_menu.examples import create_example_window
from random import randrange
from typing import Tuple, Any, Optional, List
# -----------------------------------------------------------------------------
# Constants and global variables
# -----------------------------------------------------------------------------
ABOUT = ['pygame-menu {0}'.format(pygame_menu.__version__),
'Author: @{0}'.format(pygame_menu.__author__),
'Email: {0}'.format(pygame_menu.__email__)]
DIFFICULTY = ['EASY']
FPS = 60
WINDOW_SIZE = (640, 480)
clock: Optional['pygame.time.Clock'] = None
main_menu: Optional['pygame_menu.Menu'] = None
surface: Optional['pygame.Surface'] = None
# -----------------------------------------------------------------------------
# Methods
# -----------------------------------------------------------------------------
def change_difficulty(value: Tuple[Any, int], difficulty: str) -> None:
"""
Change difficulty of the game.
:param value: Tuple containing the data of the selected object
:param difficulty: Optional parameter passed as argument to add_selector
"""
selected, index = value
print('Selected difficulty: "{0}" ({1}) at index {2}'
''.format(selected, difficulty, index))
DIFFICULTY[0] = difficulty
def random_color() -> Tuple[int, int, int]:
"""
Return a random color.
:return: Color tuple
"""
return randrange(0, 255), randrange(0, 255), randrange(0, 255)
def play_function(difficulty: List, font: 'pygame.font.Font', test: bool = False) -> None:
"""
Main game function.
:param difficulty: Difficulty of the game
:param font: Pygame font
:param test: Test method, if ``True`` only one loop is allowed
:return: None
"""
assert isinstance(difficulty, list)
difficulty = difficulty[0]
assert isinstance(difficulty, str)
# Define globals
global main_menu
global clock
if difficulty == 'EASY':
f = font.render('Playing as a baby (easy)', True, (255, 255, 255))
elif difficulty == 'MEDIUM':
f = font.render('Playing as a kid (medium)', True, (255, 255, 255))
elif difficulty == 'HARD':
f = font.render('Playing as a champion (hard)', True, (255, 255, 255))
else:
raise Exception('unknown difficulty {0}'.format(difficulty))
# Draw random color and text
bg_color = random_color()
f_width = f.get_size()[0]
# Reset main menu and disable
# You also can set another menu, like a 'pause menu', or just use the same
# main_menu as the menu that will check all your input.
main_menu.disable()
main_menu.full_reset()
while True:
# noinspection PyUnresolvedReferences
clock.tick(60)
# Application events
events = pygame.event.get()
for e in events:
if e.type == pygame.QUIT:
exit()
elif e.type == pygame.KEYDOWN:
if e.key == pygame.K_ESCAPE:
main_menu.enable()
# Quit this function, then skip to loop of main-menu on line 250
return
# Pass events to main_menu
if main_menu.is_enabled():
main_menu.update(events)
# Continue playing
surface.fill(bg_color)
surface.blit(f, ((WINDOW_SIZE[0] - f_width) / 2, WINDOW_SIZE[1] / 2))
pygame.display.flip()
# If test returns
if test:
break
def main_background() -> None:
"""
Function used by menus, draw on background while menu is active.
:return: None
"""
global surface
surface.fill((128, 0, 128))
def main(test: bool = False) -> None:
"""
Main program.
:param test: Indicate function is being tested
:return: None
"""
# -------------------------------------------------------------------------
# Globals
# -------------------------------------------------------------------------
global clock
global main_menu
global surface
# -------------------------------------------------------------------------
# Create window
# -------------------------------------------------------------------------
surface = create_example_window('Example - Game Selector', WINDOW_SIZE)
clock = pygame.time.Clock()
# -------------------------------------------------------------------------
# Create menus: Play Menu
# -------------------------------------------------------------------------
play_menu = pygame_menu.Menu(
height=WINDOW_SIZE[1] * 0.7,
title='Play Menu',
width=WINDOW_SIZE[0] * 0.75
)
submenu_theme = pygame_menu.themes.THEME_DEFAULT.copy()
submenu_theme.widget_font_size = 15
play_submenu = pygame_menu.Menu(
height=WINDOW_SIZE[1] * 0.5,
theme=submenu_theme,
title='Submenu',
width=WINDOW_SIZE[0] * 0.7
)
for i in range(30):
play_submenu.add.button('Back {0}'.format(i), pygame_menu.events.BACK)
play_submenu.add.button('Return to main menu', pygame_menu.events.RESET)
play_menu.add.button('Start', # When pressing return -> play(DIFFICULTY[0], font)
play_function,
DIFFICULTY,
pygame.font.Font(pygame_menu.font.FONT_FRANCHISE, 30))
play_menu.add.selector('Select difficulty ',
[('1 - Easy', 'EASY'),
('2 - Medium', 'MEDIUM'),
('3 - Hard', 'HARD')],
onchange=change_difficulty,
selector_id='select_difficulty')
play_menu.add.button('Another menu', play_submenu)
play_menu.add.button('Return to main menu', pygame_menu.events.BACK)
# -------------------------------------------------------------------------
# Create menus:About
# -------------------------------------------------------------------------
about_theme = pygame_menu.themes.THEME_DEFAULT.copy()
about_theme.widget_margin = (0, 0)
about_menu = pygame_menu.Menu(
height=WINDOW_SIZE[1] * 0.6,
theme=about_theme,
title='About',
width=WINDOW_SIZE[0] * 0.6
)
for m in ABOUT:
about_menu.add.label(m, align=pygame_menu.locals.ALIGN_LEFT, font_size=20)
about_menu.add.vertical_margin(30)
about_menu.add.button('Return to menu', pygame_menu.events.BACK)
# -------------------------------------------------------------------------
# Create menus: Main
# -------------------------------------------------------------------------
main_theme = pygame_menu.themes.THEME_DEFAULT.copy()
main_menu = pygame_menu.Menu(
height=WINDOW_SIZE[1] * 0.6,
theme=main_theme,
title='Main Menu',
width=WINDOW_SIZE[0] * 0.6
)
main_menu.add.button('Play', play_menu)
main_menu.add.button('About', about_menu)
main_menu.add.button('Quit', pygame_menu.events.EXIT)
# -------------------------------------------------------------------------
# Main loop
# -------------------------------------------------------------------------
while True:
# Tick
clock.tick(FPS)
# Paint background
main_background()
# Application events
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
exit()
# Main menu
if main_menu.is_enabled():
main_menu.mainloop(surface, main_background, disable_loop=test, fps_limit=FPS)
# Flip surface
pygame.display.flip()
# At first loop returns
if test:
break
if __name__ == '__main__':
main()
| 33.133094
| 90
| 0.538921
|
13afde7ce4be755730ecbf48fe1774ab9444f9ff
| 1,793
|
py
|
Python
|
Attention_recurrent_models/main.py
|
kishormishra3/DeepLearn
|
bc0dfad7b4694aa5d872b5bdddd6e3a17d139d7d
|
[
"MIT"
] | 1,756
|
2017-05-24T12:46:44.000Z
|
2022-03-30T15:23:26.000Z
|
Attention_recurrent_models/main.py
|
kishormishra3/DeepLearn
|
bc0dfad7b4694aa5d872b5bdddd6e3a17d139d7d
|
[
"MIT"
] | 20
|
2017-05-23T15:23:39.000Z
|
2019-04-12T18:07:04.000Z
|
Attention_recurrent_models/main.py
|
kishormishra3/DeepLearn
|
bc0dfad7b4694aa5d872b5bdddd6e3a17d139d7d
|
[
"MIT"
] | 355
|
2017-05-29T12:37:19.000Z
|
2022-01-25T15:23:50.000Z
|
"""
** deeplean-ai.com **
** dl-lab **
created by :: GauravBh1010tt
"""
import sys
sys.path.append("..\_deeplearn_utils")
import model_WALSTM as model
import wiki_utils as wk
from dl_text.metrics import eval_metric
from dl_text import dl
glove_fname = 'K:/workspace/neural network/Trec_QA-master/glove.6B.50d.txt'
################### DEFINING MODEL AND PREDICTION FILE ###################
lrmodel = model.WA_LSTM
model_name = lrmodel.func_name
################### DEFINING HYPERPARAMETERS ###################
dimx = 50
dimy = 50
dimft = 44
batch_size = 70
vocab_size = 8000
embedding_dim = 50
LSTM_neurons = 64
depth = 1
nb_epoch = 3
shared = 1
opt_params = [0.001,'adam']
ques, ans, label_train, train_len, test_len,\
wordVec_model, res_fname, pred_fname, feat_train, feat_test = wk.load_wiki(model_name, glove_fname)
data_l , data_r, embedding_matrix = dl.process_data(ques, ans,
wordVec_model,dimx=dimx,
dimy=dimy,vocab_size=vocab_size,
embedding_dim=embedding_dim)
X_train_l,X_test_l,X_dev_l,X_train_r,X_test_r,X_dev_r = wk.prepare_train_test(data_l,data_r,
train_len,test_len)
lrmodel = lrmodel(embedding_matrix, dimx=dimx, dimy=dimy, LSTM_neurons = LSTM_neurons, embedding_dim = embedding_dim,
depth = depth, shared = shared,opt_params = opt_params)
print '\n', model_name,'model built \n'
lrmodel.fit([X_train_l, X_train_r],label_train,batch_size=batch_size,nb_epoch=nb_epoch,verbose=2)
map_val, mrr_val = eval_metric(lrmodel, X_test_l, X_test_r, res_fname, pred_fname)
print 'MAP : ',map_val,' MRR : ',mrr_val
| 32.6
| 118
| 0.629671
|
fb0e26d3ed89dca74ad0662fe87650833b879ba3
| 2,808
|
py
|
Python
|
directories.py
|
carlocarfora/staticfolio
|
6b03041e0d323c865ba1eb696d69ce4f06b7f9a5
|
[
"Unlicense"
] | null | null | null |
directories.py
|
carlocarfora/staticfolio
|
6b03041e0d323c865ba1eb696d69ce4f06b7f9a5
|
[
"Unlicense"
] | null | null | null |
directories.py
|
carlocarfora/staticfolio
|
6b03041e0d323c865ba1eb696d69ce4f06b7f9a5
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
import os
import shutil
def setup_template_dirs(destination=None):
""" Copies template directories to output folder for deployment.
Copies the folders plus contents inside the template directory so
that the generated website has the relevant folders and scripts
needed to function.
Args:
destination: the output folder, defaults to a dir called output
"""
if destination is None:
destination = "output"
try:
os.mkdir(destination)
except OSError:
print(destination + " already exists, can't create!")
for root, dirs, files in os.walk("template"):
for folder in dirs:
# try:
# os.makedirs(os.path.join(destination, folder))
#except FileExistsError:
#print("Can't make path, already exists!")
source = os.path.join(root,folder)
output = os.path.join(destination, folder)
try:
shutil.copytree(source, output)
except OSError:
"Can't copy to output, template folders already exist"
print(destination + " has finished.")
def setup_project_dirs(src=None, dest=None):
""" Creates project directories from walking through content folder
and creating the same name directories.
Walks through the content folder, takes the name of each project
folder and creates the same folder in the output directory. Also
creates the image folder in each project folder, set up for
resized photos.
Args:
src: the folder to search for project directories
dest: the output folder for the new directories
"""
if src is None:
src = "content"
if dest is None:
dest = "output"
folders = []
for items in os.listdir(src):
if os.path.isdir(os.path.join(src, items)):
folders.append(items)
print("Found folders: {}".format(folders))
for folder in folders:
if not os.path.exists(os.path.join(dest, folder)):
os.mkdir(os.path.join(dest, folder))
os.mkdir(os.path.join(dest, folder, "images"))
print("Created folder: {}".format(folder))
else:
print("Skipped folder {}, already exists!".format(folder))
def create_blog_folder(blog_dir=None):
""" Creates blog folder in output folder
Args:
blog_dir: set this to be the blog post output folder
"""
if blog_dir is None:
blog_dir = "output/blog"
try:
os.mkdir(blog_dir)
except OSError:
print("blog folder already exists, can't create!")
| 28.653061
| 73
| 0.5901
|
0b114626314bfba1580c5a8f84cd0fec162b4b88
| 860
|
py
|
Python
|
tests/big_map_diff/onu6qg9yCUeFRRjKR9RWVp23TEKuHGk7Ew8sivZ2thPveidn5yu/test_big_map_onu6qg.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2020-08-11T02:31:24.000Z
|
2020-08-11T02:31:24.000Z
|
tests/big_map_diff/onu6qg9yCUeFRRjKR9RWVp23TEKuHGk7Ew8sivZ2thPveidn5yu/test_big_map_onu6qg.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2020-12-30T16:44:56.000Z
|
2020-12-30T16:44:56.000Z
|
tests/big_map_diff/onu6qg9yCUeFRRjKR9RWVp23TEKuHGk7Ew8sivZ2thPveidn5yu/test_big_map_onu6qg.py
|
tqtezos/pytezos
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
[
"MIT"
] | 1
|
2022-03-20T19:01:00.000Z
|
2022-03-20T19:01:00.000Z
|
from unittest import TestCase
from tests import get_data
from pytezos.michelson.contract import ContractStorage
class BigMapCodingTestonu6qg(TestCase):
def setUp(self):
self.maxDiff = None
def test_big_map_onu6qg(self):
section = get_data(
path='big_map_diff/onu6qg9yCUeFRRjKR9RWVp23TEKuHGk7Ew8sivZ2thPveidn5yu/storage_section.json')
storage = ContractStorage(section)
big_map_diff = get_data(
path='big_map_diff/onu6qg9yCUeFRRjKR9RWVp23TEKuHGk7Ew8sivZ2thPveidn5yu/big_map_diff.json')
expected = [
dict(key=item['key'], value=item.get('value'))
for item in big_map_diff
]
big_map = storage.big_map_diff_decode(expected)
actual = storage.big_map_diff_encode(big_map)
self.assertEqual(expected, actual)
| 31.851852
| 105
| 0.683721
|
a47bc3b92734c10b2e8cae084fd7c5d534397894
| 16,536
|
py
|
Python
|
airflow/jobs/triggerer_job.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 15,947
|
2019-01-05T13:51:02.000Z
|
2022-03-31T23:33:16.000Z
|
airflow/jobs/triggerer_job.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 14,603
|
2019-01-05T09:43:19.000Z
|
2022-03-31T23:11:59.000Z
|
airflow/jobs/triggerer_job.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 8,429
|
2019-01-05T19:45:47.000Z
|
2022-03-31T22:13:01.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import asyncio
import os
import signal
import sys
import threading
import time
from collections import deque
from typing import Deque, Dict, Set, Tuple, Type
from sqlalchemy import func
from airflow.compat.asyncio import create_task
from airflow.configuration import conf
from airflow.jobs.base_job import BaseJob
from airflow.models.trigger import Trigger
from airflow.stats import Stats
from airflow.triggers.base import BaseTrigger, TriggerEvent
from airflow.typing_compat import TypedDict
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.module_loading import import_string
from airflow.utils.session import provide_session
class TriggererJob(BaseJob):
"""
TriggererJob continuously runs active triggers in asyncio, watching
for them to fire off their events and then dispatching that information
to their dependent tasks/DAGs.
It runs as two threads:
- The main thread does DB calls/checkins
- A subthread runs all the async code
"""
__mapper_args__ = {'polymorphic_identity': 'TriggererJob'}
def __init__(self, capacity=None, *args, **kwargs):
# Call superclass
super().__init__(*args, **kwargs)
if capacity is None:
self.capacity = conf.getint('triggerer', 'default_capacity', fallback=1000)
elif isinstance(capacity, int) and capacity > 0:
self.capacity = capacity
else:
raise ValueError(f"Capacity number {capacity} is invalid")
# Set up runner async thread
self.runner = TriggerRunner()
def register_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
@classmethod
@provide_session
def is_needed(cls, session) -> bool:
"""
Tests if the triggerer job needs to be run (i.e., if there are triggers
in the trigger table).
This is used for the warning boxes in the UI.
"""
return session.query(func.count(Trigger.id)).scalar() > 0
def on_kill(self):
"""
Called when there is an external kill command (via the heartbeat
mechanism, for example)
"""
self.runner.stop = True
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
# The first time, try to exit nicely
if not self.runner.stop:
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.runner.stop = True
else:
self.log.warning("Forcing exit due to second exit signal %s", signum)
sys.exit(os.EX_SOFTWARE)
def _execute(self) -> None:
self.log.info("Starting the triggerer")
try:
# Kick off runner thread
self.runner.start()
# Start our own DB loop in the main thread
self._run_trigger_loop()
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing TriggererJob._run_trigger_loop")
raise
finally:
self.log.info("Waiting for triggers to clean up")
# Tell the subthread to stop and then wait for it.
# If the user interrupts/terms again, _graceful_exit will allow them
# to force-kill here.
self.runner.stop = True
self.runner.join(30)
self.log.info("Exited trigger loop")
def _run_trigger_loop(self) -> None:
"""
The main-thread trigger loop.
This runs synchronously and handles all database reads/writes.
"""
while not self.runner.stop:
# Clean out unused triggers
Trigger.clean_unused()
# Load/delete triggers
self.load_triggers()
# Handle events
self.handle_events()
# Handle failed triggers
self.handle_failed_triggers()
# Handle heartbeat
self.heartbeat(only_if_necessary=True)
# Collect stats
self.emit_metrics()
# Idle sleep
time.sleep(1)
def load_triggers(self):
"""
Queries the database to get the triggers we're supposed to be running,
adds them to our runner, and then removes ones from it we no longer
need.
"""
Trigger.assign_unassigned(self.id, self.capacity)
ids = Trigger.ids_for_triggerer(self.id)
self.runner.update_triggers(set(ids))
def handle_events(self):
"""
Handles outbound events from triggers - dispatching them into the Trigger
model where they are then pushed into the relevant task instances.
"""
while self.runner.events:
# Get the event and its trigger ID
trigger_id, event = self.runner.events.popleft()
# Tell the model to wake up its tasks
Trigger.submit_event(trigger_id=trigger_id, event=event)
# Emit stat event
Stats.incr('triggers.succeeded')
def handle_failed_triggers(self):
"""
Handles "failed" triggers - ones that errored or exited before they
sent an event. Task Instances that depend on them need failing.
"""
while self.runner.failed_triggers:
# Tell the model to fail this trigger's deps
trigger_id = self.runner.failed_triggers.popleft()
Trigger.submit_failure(trigger_id=trigger_id)
# Emit stat event
Stats.incr('triggers.failed')
def emit_metrics(self):
Stats.gauge('triggers.running', len(self.runner.triggers))
class TriggerDetails(TypedDict):
"""Type class for the trigger details dictionary"""
task: asyncio.Task
name: str
events: int
class TriggerRunner(threading.Thread, LoggingMixin):
"""
Runtime environment for all triggers.
Mainly runs inside its own thread, where it hands control off to an asyncio
event loop, but is also sometimes interacted with from the main thread
(where all the DB queries are done). All communication between threads is
done via Deques.
"""
# Maps trigger IDs to their running tasks and other info
triggers: Dict[int, TriggerDetails]
# Cache for looking up triggers by classpath
trigger_cache: Dict[str, Type[BaseTrigger]]
# Inbound queue of new triggers
to_create: Deque[Tuple[int, BaseTrigger]]
# Inbound queue of deleted triggers
to_delete: Deque[int]
# Outbound queue of events
events: Deque[Tuple[int, TriggerEvent]]
# Outbound queue of failed triggers
failed_triggers: Deque[int]
# Should-we-stop flag
stop: bool = False
def __init__(self):
super().__init__()
self.triggers = {}
self.trigger_cache = {}
self.to_create = deque()
self.to_delete = deque()
self.events = deque()
self.failed_triggers = deque()
def run(self):
"""Sync entrypoint - just runs arun in an async loop."""
# Pylint complains about this with a 3.6 base, can remove with 3.7+
asyncio.run(self.arun()) # pylint: disable=no-member
async def arun(self):
"""
Main (asynchronous) logic loop.
The loop in here runs trigger addition/deletion/cleanup. Actual
triggers run in their own separate coroutines.
"""
watchdog = create_task(self.block_watchdog())
last_status = time.time()
while not self.stop:
# Run core logic
await self.create_triggers()
await self.delete_triggers()
await self.cleanup_finished_triggers()
# Sleep for a bit
await asyncio.sleep(1)
# Every minute, log status
if time.time() - last_status >= 60:
self.log.info("%i triggers currently running", len(self.triggers))
last_status = time.time()
# Wait for watchdog to complete
await watchdog
async def create_triggers(self):
"""
Drain the to_create queue and create all triggers that have been
requested in the DB that we don't yet have.
"""
while self.to_create:
trigger_id, trigger_instance = self.to_create.popleft()
if trigger_id not in self.triggers:
self.triggers[trigger_id] = {
"task": create_task(self.run_trigger(trigger_id, trigger_instance)),
"name": f"{trigger_instance!r} (ID {trigger_id})",
"events": 0,
}
else:
self.log.warning("Trigger %s had insertion attempted twice", trigger_id)
await asyncio.sleep(0)
async def delete_triggers(self):
"""
Drain the to_delete queue and ensure all triggers that are not in the
DB are cancelled, so the cleanup job deletes them.
"""
while self.to_delete:
trigger_id = self.to_delete.popleft()
if trigger_id in self.triggers:
# We only delete if it did not exit already
self.triggers[trigger_id]["task"].cancel()
await asyncio.sleep(0)
async def cleanup_finished_triggers(self):
"""
Go through all trigger tasks (coroutines) and clean up entries for
ones that have exited, optionally warning users if the exit was
not normal.
"""
for trigger_id, details in list(self.triggers.items()): # pylint: disable=too-many-nested-blocks
if details["task"].done():
# Check to see if it exited for good reasons
try:
result = details["task"].result()
except (asyncio.CancelledError, SystemExit, KeyboardInterrupt):
# These are "expected" exceptions and we stop processing here
# If we don't, then the system requesting a trigger be removed -
# which turns into CancelledError - results in a failure.
del self.triggers[trigger_id]
continue
except BaseException as e:
# This is potentially bad, so log it.
self.log.error("Trigger %s exited with error %s", details["name"], e)
else:
# See if they foolishly returned a TriggerEvent
if isinstance(result, TriggerEvent):
self.log.error(
"Trigger %s returned a TriggerEvent rather than yielding it", details["name"]
)
# See if this exited without sending an event, in which case
# any task instances depending on it need to be failed
if details["events"] == 0:
self.log.error(
"Trigger %s exited without sending an event. Dependent tasks will be failed.",
details["name"],
)
self.failed_triggers.append(trigger_id)
del self.triggers[trigger_id]
await asyncio.sleep(0)
async def block_watchdog(self):
"""
Watchdog loop that detects blocking (badly-written) triggers.
Triggers should be well-behaved async coroutines and await whenever
they need to wait; this loop tries to run every 100ms to see if
there are badly-written triggers taking longer than that and blocking
the event loop.
Unfortunately, we can't tell what trigger is blocking things, but
we can at least detect the top-level problem.
"""
while not self.stop:
last_run = time.monotonic()
await asyncio.sleep(0.1)
# We allow a generous amount of buffer room for now, since it might
# be a busy event loop.
time_elapsed = time.monotonic() - last_run
if time_elapsed > 0.2:
self.log.error(
"Triggerer's async thread was blocked for %.2f seconds, "
"likely by a badly-written trigger. Set PYTHONASYNCIODEBUG=1 "
"to get more information on overrunning coroutines.",
time_elapsed,
)
Stats.incr('triggers.blocked_main_thread')
# Async trigger logic
async def run_trigger(self, trigger_id, trigger):
"""
Wrapper which runs an actual trigger (they are async generators)
and pushes their events into our outbound event deque.
"""
self.log.info("Trigger %s starting", self.triggers[trigger_id]['name'])
try:
async for event in trigger.run():
self.log.info("Trigger %s fired: %s", self.triggers[trigger_id]['name'], event)
self.triggers[trigger_id]["events"] += 1
self.events.append((trigger_id, event))
finally:
# CancelledError will get injected when we're stopped - which is
# fine, the cleanup process will understand that, but we want to
# allow triggers a chance to cleanup, either in that case or if
# they exit cleanly.
trigger.cleanup()
# Main-thread sync API
def update_triggers(self, requested_trigger_ids: Set[int]):
"""
Called from the main thread to request that we update what
triggers we're running.
Works out the differences - ones to add, and ones to remove - then
adds them to the deques so the subthread can actually mutate the running
trigger set.
"""
# Note that `triggers` could be mutated by the other thread during this
# line's execution, but we consider that safe, since there's a strict
# add -> remove -> never again lifecycle this function is already
# handling.
current_trigger_ids = set(self.triggers.keys())
# Work out the two difference sets
new_trigger_ids = requested_trigger_ids.difference(current_trigger_ids)
old_trigger_ids = current_trigger_ids.difference(requested_trigger_ids)
# Bulk-fetch new trigger records
new_triggers = Trigger.bulk_fetch(new_trigger_ids)
# Add in new triggers
for new_id in new_trigger_ids:
# Check it didn't vanish in the meantime
if new_id not in new_triggers:
self.log.warning("Trigger ID %s disappeared before we could start it", new_id)
continue
# Resolve trigger record into an actual class instance
try:
trigger_class = self.get_trigger_by_classpath(new_triggers[new_id].classpath)
except BaseException:
# Either the trigger code or the path to it is bad. Fail the trigger.
self.failed_triggers.append(new_id)
continue
self.to_create.append((new_id, trigger_class(**new_triggers[new_id].kwargs)))
# Remove old triggers
for old_id in old_trigger_ids:
self.to_delete.append(old_id)
def get_trigger_by_classpath(self, classpath: str) -> Type[BaseTrigger]:
"""
Gets a trigger class by its classpath ("path.to.module.classname")
Uses a cache dictionary to speed up lookups after the first time.
"""
if classpath not in self.trigger_cache:
self.trigger_cache[classpath] = import_string(classpath)
return self.trigger_cache[classpath]
| 39.654676
| 105
| 0.623609
|
74211a448c022505f790947e7390e2f3d7afb3fb
| 4,312
|
py
|
Python
|
python/GafferUITest/NoduleTest.py
|
cwmartin/gaffer
|
1f8a0f75522105c9d5efefac6d55cb61c1038909
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferUITest/NoduleTest.py
|
cwmartin/gaffer
|
1f8a0f75522105c9d5efefac6d55cb61c1038909
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferUITest/NoduleTest.py
|
cwmartin/gaffer
|
1f8a0f75522105c9d5efefac6d55cb61c1038909
|
[
"BSD-3-Clause"
] | null | null | null |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import fnmatch
import IECore
import Gaffer
import GafferUI
import GafferTest
import GafferUITest
class NoduleTest( GafferUITest.TestCase ) :
def test( self ) :
class NoduleTestNode( Gaffer.Node ) :
def __init__( self ) :
Gaffer.Node.__init__( self )
self.addChild(
Gaffer.IntPlug( "i" )
)
self.addChild(
Gaffer.CompoundPlug( "c" )
)
IECore.registerRunTimeTyped( NoduleTestNode )
n = NoduleTestNode()
ni = GafferUI.Nodule.create( n["i"] )
nc = GafferUI.Nodule.create( n["c"] )
self.failUnless( isinstance( ni, GafferUI.StandardNodule ) )
self.failUnless( isinstance( nc, GafferUI.StandardNodule ) )
GafferUI.Nodule.registerNodule( NoduleTestNode, "c", GafferUI.CompoundNodule )
nc = GafferUI.Nodule.create( n["c"] )
self.failUnless( isinstance( nc, GafferUI.CompoundNodule ) )
class NoduleTestNodeSubclass( NoduleTestNode ) :
def __init__( self ) :
NoduleTestNode.__init__( self )
n2 = NoduleTestNode()
nc2 = GafferUI.Nodule.create( n2["c"] )
self.failUnless( isinstance( nc2, GafferUI.CompoundNodule ) )
def testPlugNameRegexes( self ) :
class RegexNoduleTestNode( Gaffer.Node ) :
def __init__( self ) :
Gaffer.Node.__init__( self )
self.addChild(
Gaffer.IntPlug( "i" )
)
self.addChild(
Gaffer.IntPlug( "r1" )
)
self.addChild(
Gaffer.IntPlug( "r2" )
)
self.addChild(
Gaffer.IntPlug( "da" )
)
self.addChild(
Gaffer.IntPlug( "db" )
)
IECore.registerRunTimeTyped( RegexNoduleTestNode )
node = RegexNoduleTestNode()
self.failUnless( isinstance( GafferUI.Nodule.create( node["i"] ), GafferUI.Nodule ) )
self.assertEqual( GafferUI.Nodule.create( node["i"] ).getName(), "Nodule" )
def rCreator( plug ) :
n = GafferUI.StandardNodule( plug )
n.setName( "r" )
return n
GafferUI.Nodule.registerNodule( RegexNoduleTestNode, fnmatch.translate( "r[0-9]" ), rCreator )
self.assertEqual( GafferUI.Nodule.create( node["r1"] ).getName(), "r" )
self.assertEqual( GafferUI.Nodule.create( node["r2"] ).getName(), "r" )
def dCreator( plug ) :
return None
GafferUI.Nodule.registerNodule( RegexNoduleTestNode, fnmatch.translate( "d*" ), dCreator )
self.failUnless( GafferUI.Nodule.create( node["da"] ) is None )
self.failUnless( GafferUI.Nodule.create( node["db"] ) is None )
if __name__ == "__main__":
unittest.main()
| 26.453988
| 96
| 0.675788
|
2d1b54396237bc2cb39444974ab1219eb66ae951
| 2,417
|
py
|
Python
|
zerver/tests/test_subdomains.py
|
alex784004/patient
|
a6510c4626392b9a8385cbac82698d9e23df0a55
|
[
"Apache-2.0"
] | 1
|
2019-01-13T20:47:29.000Z
|
2019-01-13T20:47:29.000Z
|
zerver/tests/test_subdomains.py
|
alex784004/patient
|
a6510c4626392b9a8385cbac82698d9e23df0a55
|
[
"Apache-2.0"
] | null | null | null |
zerver/tests/test_subdomains.py
|
alex784004/patient
|
a6510c4626392b9a8385cbac82698d9e23df0a55
|
[
"Apache-2.0"
] | null | null | null |
import mock
from typing import Any, Dict, List
from django.test import TestCase, override_settings
from zerver.lib.subdomains import get_subdomain
from zerver.models import Realm
class SubdomainsTest(TestCase):
def test_get_subdomain(self) -> None:
def request_mock(host: str) -> Any:
request = mock.Mock(spec=['get_host'])
request.attach_mock(mock.Mock(return_value=host), 'get_host')
return request
def test(expected, host, *, plusport=True,
external_host='example.org', realm_hosts={}, root_aliases=[]):
# type: (str, str, bool, str, Dict[str, str], List[str]) -> None
with self.settings(EXTERNAL_HOST=external_host,
REALM_HOSTS=realm_hosts,
ROOT_SUBDOMAIN_ALIASES=root_aliases):
self.assertEqual(get_subdomain(request_mock(host)), expected)
if plusport and ':' not in host:
self.assertEqual(get_subdomain(request_mock(host + ':443')),
expected)
ROOT = Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
# Basics
test(ROOT, 'example.org')
test('foo', 'foo.example.org')
test(ROOT, 'www.example.org', root_aliases=['www'])
# Unrecognized patterns fall back to root
test(ROOT, 'arbitrary.com')
test(ROOT, 'foo.example.org.evil.com')
# REALM_HOSTS adds a name,
test('bar', 'chat.barbar.com', realm_hosts={'bar': 'chat.barbar.com'})
# ... exactly, ...
test(ROOT, 'surchat.barbar.com', realm_hosts={'bar': 'chat.barbar.com'})
test(ROOT, 'foo.chat.barbar.com', realm_hosts={'bar': 'chat.barbar.com'})
# ... and leaves the subdomain in place too.
test('bar', 'bar.example.org', realm_hosts={'bar': 'chat.barbar.com'})
# Any port is fine in Host if there's none in EXTERNAL_HOST, ...
test('foo', 'foo.example.org:443', external_host='example.org')
test('foo', 'foo.example.org:12345', external_host='example.org')
# ... but an explicit port in EXTERNAL_HOST must be explicitly matched in Host.
test(ROOT, 'foo.example.org', external_host='example.org:12345')
test(ROOT, 'foo.example.org', external_host='example.org:443', plusport=False)
test('foo', 'foo.example.org:443', external_host='example.org:443')
| 43.945455
| 87
| 0.609433
|
29ad84965bd37a2b54e0f9dc40337551e7f6c461
| 2,839
|
py
|
Python
|
ects/pools/pool_config.py
|
ects-io/ects-blockchain
|
a798034a8c8bce34d4b87fb2c98351d06f9eaf8e
|
[
"Apache-2.0"
] | null | null | null |
ects/pools/pool_config.py
|
ects-io/ects-blockchain
|
a798034a8c8bce34d4b87fb2c98351d06f9eaf8e
|
[
"Apache-2.0"
] | null | null | null |
ects/pools/pool_config.py
|
ects-io/ects-blockchain
|
a798034a8c8bce34d4b87fb2c98351d06f9eaf8e
|
[
"Apache-2.0"
] | null | null | null |
import logging
from dataclasses import dataclass
from pathlib import Path
from typing import List
from blspy import G1Element
from ects.types.blockchain_format.sized_bytes import bytes32
from ects.util.byte_types import hexstr_to_bytes
from ects.util.config import load_config, save_config
from ects.util.streamable import Streamable, streamable
"""
Config example
This is what goes into the user's config file, to communicate between the wallet and the farmer processes.
pool_list:
launcher_id: ae4ef3b9bfe68949691281a015a9c16630fc8f66d48c19ca548fb80768791afa
authentication_public_key: 970e181ae45435ae696508a78012dc80548c334cf29676ea6ade7049eb9d2b9579cc30cb44c3fd68d35a250cfbc69e29
owner_public_key: 84c3fcf9d5581c1ddc702cb0f3b4a06043303b334dd993ab42b2c320ebfa98e5ce558448615b3f69638ba92cf7f43da5
payout_instructions: c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8
pool_url: localhost
p2_singleton_puzzle_hash: 2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824
target_puzzle_hash: 344587cf06a39db471d2cc027504e8688a0a67cce961253500c956c73603fd58
""" # noqa
log = logging.getLogger(__name__)
@dataclass(frozen=True)
@streamable
class PoolWalletConfig(Streamable):
launcher_id: bytes32
pool_url: str
payout_instructions: str
target_puzzle_hash: bytes32
p2_singleton_puzzle_hash: bytes32
owner_public_key: G1Element
authentication_public_key: G1Element
def load_pool_config(root_path: Path) -> List[PoolWalletConfig]:
config = load_config(root_path, "config.yaml")
ret_list: List[PoolWalletConfig] = []
pool_list = config["pool"].get("pool_list", [])
if pool_list is not None:
for pool_config_dict in pool_list:
try:
pool_config = PoolWalletConfig(
bytes32.from_hexstr(pool_config_dict["launcher_id"]),
pool_config_dict["pool_url"],
pool_config_dict["payout_instructions"],
bytes32.from_hexstr(pool_config_dict["target_puzzle_hash"]),
bytes32.from_hexstr(pool_config_dict["p2_singleton_puzzle_hash"]),
G1Element.from_bytes(hexstr_to_bytes(pool_config_dict["owner_public_key"])),
G1Element.from_bytes(hexstr_to_bytes(pool_config_dict["authentication_public_key"])),
)
ret_list.append(pool_config)
except Exception as e:
log.error(f"Exception loading config: {pool_config_dict} {e}")
return ret_list
async def update_pool_config(root_path: Path, pool_config_list: List[PoolWalletConfig]):
full_config = load_config(root_path, "config.yaml")
full_config["pool"]["pool_list"] = [c.to_json_dict() for c in pool_config_list]
save_config(root_path, "config.yaml", full_config)
| 41.75
| 127
| 0.755548
|
4f02942b8e373e1b5466dee07dc927fcb2c7285f
| 270
|
py
|
Python
|
examples/simple.py
|
alcarithemad/pyndata
|
67586f95a74c72ff1f8796ccc213f660bdaee110
|
[
"BSD-2-Clause"
] | 3
|
2016-03-22T17:33:27.000Z
|
2017-07-18T03:20:42.000Z
|
examples/simple.py
|
alcarithemad/pyndata
|
67586f95a74c72ff1f8796ccc213f660bdaee110
|
[
"BSD-2-Clause"
] | null | null | null |
examples/simple.py
|
alcarithemad/pyndata
|
67586f95a74c72ff1f8796ccc213f660bdaee110
|
[
"BSD-2-Clause"
] | null | null | null |
import pyndata
class Message(pyndata.Struct):
msg_type = pyndata.uint8()
msg_length = pyndata.uint16(endian='big')
payload = pyndata.bytestring(length=msg_length)
m = Message()
m.msg_type = 1
m.payload = b'asdf'
print(repr(m.pack())) # '\x01\x00\x04asdf'
| 20.769231
| 51
| 0.696296
|
ae976a63dd18e01c83ef7eeb6ee2cd443f3ebba8
| 160
|
py
|
Python
|
pybigo/__init__.py
|
endremborza/pybigo
|
8524316c8f757c4910319fe7f5cc807388a4254e
|
[
"MIT"
] | null | null | null |
pybigo/__init__.py
|
endremborza/pybigo
|
8524316c8f757c4910319fe7f5cc807388a4254e
|
[
"MIT"
] | null | null | null |
pybigo/__init__.py
|
endremborza/pybigo
|
8524316c8f757c4910319fe7f5cc807388a4254e
|
[
"MIT"
] | null | null | null |
from ._version import __version__ # noqa: F401
from .core.estimator import BigOEstimator # noqa: F401
from .core.comparison import compare_time # noqa: F401
| 40
| 55
| 0.78125
|
2e3495a74cd9637bbd8bd5e99e47ccfd4b0c74da
| 2,656
|
py
|
Python
|
quotamanager/quotamanager.py
|
betacloud/tools
|
3bb8c704d2fbd697018136505185cb1182a988fa
|
[
"Apache-2.0"
] | null | null | null |
quotamanager/quotamanager.py
|
betacloud/tools
|
3bb8c704d2fbd697018136505185cb1182a988fa
|
[
"Apache-2.0"
] | null | null | null |
quotamanager/quotamanager.py
|
betacloud/tools
|
3bb8c704d2fbd697018136505185cb1182a988fa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os_client_config
import shade
import yaml
CLOUDNAME = 'service'
#shade.simple_logging(debug=True, http_debug=True)
def set_quotaclass(project, quotaclass="default"):
keystone = os_client_config.make_client('identity', cloud=CLOUDNAME)
keystone.projects.update(project.id, **{"quotaclass": quotaclass})
def update_quota(project, cloud):
print "update network quota for %s" % project.name
cloud.set_network_quotas(project.id, **quotaclasses[project.quotaclass]["network"])
print "update compute quota for %s" % project.name
cloud.set_compute_quotas(project.id, **quotaclasses[project.quotaclass]["compute"])
print "update volume quota for %s" % project.name
cloud.set_volume_quotas(project.id, **quotaclasses[project.quotaclass]["volume"])
def check_quota(project, cloud):
if "quotaclass" in project and project.name not in ["admin"]:
quotanetwork = cloud.get_network_quotas(project.id)
quotaupdate = False
for key in quotaclasses[project.quotaclass]["network"]:
if quotaclasses[project.quotaclass]["network"][key] != quotanetwork[key]:
quotaupdate = True
print "%s [ %s ] %d != %d" % (project.name, key, quotaclasses[project.quotaclass]["network"][key], quotanetwork[key])
quotacompute = cloud.get_compute_quotas(project.id)
for key in quotaclasses[project.quotaclass]["compute"]:
if quotaclasses[project.quotaclass]["compute"][key] != quotacompute[key]:
quotaupdate = True
print "%s [ %s ] %d != %d" % (project.name, key, quotaclasses[project.quotaclass]["compute"][key], quotacompute[key])
quotavolume = cloud.get_volume_quotas(project.id)
for key in quotaclasses[project.quotaclass]["volume"]:
if quotaclasses[project.quotaclass]["volume"][key] != quotavolume[key]:
quotaupdate = True
print "%s [ %s ] %d != %d" % (project.name, key, quotaclasses[project.quotaclass]["volume"][key], quotavolume[key])
if quotaupdate:
update_quota(project, cloud)
elif "quotaclass" not in project and project.name not in ["admin"]:
print "quotaclass not set for project %s, set quotaclass to default" % project.name
set_quotaclass(project, "default")
project = cloud.get_project(project.id)
update_quota(project, cloud)
with open("quotaclasses.yml", "r") as fp:
quotaclasses = yaml.load(fp)
cloud = shade.operator_cloud(cloud=CLOUDNAME)
for project in [p for p in cloud.search_projects() if not p.is_domain]:
check_quota(project, cloud)
| 40.861538
| 133
| 0.676205
|
16c0faf601736d9a872868b1a9162c29b74ad703
| 2,454
|
py
|
Python
|
eelgraph/charts.py
|
jamsterwes/eelgraph
|
59a0b1b2c2d423a6ef6a5e370f1b6d06cd0b8469
|
[
"MIT"
] | null | null | null |
eelgraph/charts.py
|
jamsterwes/eelgraph
|
59a0b1b2c2d423a6ef6a5e370f1b6d06cd0b8469
|
[
"MIT"
] | null | null | null |
eelgraph/charts.py
|
jamsterwes/eelgraph
|
59a0b1b2c2d423a6ef6a5e370f1b6d06cd0b8469
|
[
"MIT"
] | null | null | null |
import eelgraph.colors as colors
import json
class Chart(object):
obj = {}
def recalc(self):
pass
def tojson(self):
self.recalc()
return json.dumps(self.obj)
class Doughnut(Chart):
def __init__(self, data, colorSet=colors.best):
self.data = data
self.colors = {
label: colors.randomColor(colorSet, True)
for label, _ in self.data.items()
}
def recalc(self):
bgColors = [colors.stroke(self.colors[k])
for k, v in self.data.items()]
self.obj = {
'type': 'doughnut',
'data': {
'datasets': [{
'data': [v for k, v in self.data.items()],
'backgroundColor': bgColors
}],
'labels': [k for k, v in self.data.items()]
}
}
class Line(Chart):
def __init__(self, data=[], *args, lines=1, labels=None, radius=2.5,
fill=None, smooth=None, color=None):
self.data = data
self.lines = lines
if labels is None:
self.labels = [""
for _ in range(self.lines)]
else:
self.labels = labels
self.radius = radius
if color is None:
self.colors = [colors.randomColor(colorSet=colors.best)
for _ in range(self.lines)]
else:
self.colors = color
if fill is None:
self.fill = [False for _ in range(self.lines)]
else:
self.fill = fill
if smooth is None:
self.smooth = [0.0 for _ in range(self.lines)]
else:
self.smooth = smooth
def calcsets(self):
sets = []
for n in range(self.lines):
sets.append({
'label': self.labels[n],
'data': [{'x': p[0], 'y': p[1][n]} for p in self.data],
'backgroundColor': colors.fill(self.colors[n]),
'borderColor': colors.stroke(self.colors[n]),
'pointRadius': self.radius,
'fill': self.fill[n],
'lineTension': self.smooth[n]
})
return sets
def recalc(self):
self.obj = {
'type': 'line',
'data': {
'datasets': self.calcsets(),
'labels': ["%.02f" % p[0] for p in self.data]
}
}
| 28.534884
| 72
| 0.466585
|
bd15bcea41a0b85a67c4744cb73866d81e989e8b
| 5,801
|
py
|
Python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2018_05_01/aio/_resource_management_client.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2018_05_01/aio/_resource_management_client.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2018_05_01/aio/_resource_management_client.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import ResourceManagementClientConfiguration
from .operations import Operations
from .operations import DeploymentsOperations
from .operations import ProvidersOperations
from .operations import ResourcesOperations
from .operations import ResourceGroupsOperations
from .operations import TagsOperations
from .operations import DeploymentOperationsOperations
from .. import models
class ResourceManagementClient(object):
"""Provides operations for working with resources and resource groups.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.resource.resources.v2018_05_01.aio.operations.Operations
:ivar deployments: DeploymentsOperations operations
:vartype deployments: azure.mgmt.resource.resources.v2018_05_01.aio.operations.DeploymentsOperations
:ivar providers: ProvidersOperations operations
:vartype providers: azure.mgmt.resource.resources.v2018_05_01.aio.operations.ProvidersOperations
:ivar resources: ResourcesOperations operations
:vartype resources: azure.mgmt.resource.resources.v2018_05_01.aio.operations.ResourcesOperations
:ivar resource_groups: ResourceGroupsOperations operations
:vartype resource_groups: azure.mgmt.resource.resources.v2018_05_01.aio.operations.ResourceGroupsOperations
:ivar tags: TagsOperations operations
:vartype tags: azure.mgmt.resource.resources.v2018_05_01.aio.operations.TagsOperations
:ivar deployment_operations: DeploymentOperationsOperations operations
:vartype deployment_operations: azure.mgmt.resource.resources.v2018_05_01.aio.operations.DeploymentOperationsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = ResourceManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.deployments = DeploymentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.providers = ProvidersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.resources = ResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.resource_groups = ResourceGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.tags = TagsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.deployment_operations = DeploymentOperationsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ResourceManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| 51.336283
| 129
| 0.733149
|
057e50355be2a0142f87718dcb502ab0a635bfc2
| 2,431
|
py
|
Python
|
streams code/code.py
|
saba-in/live-streaming-of-IoT-data-using-streaming-analytics
|
543016a598317c43bce167d4b210be21650f4c9e
|
[
"Apache-2.0"
] | null | null | null |
streams code/code.py
|
saba-in/live-streaming-of-IoT-data-using-streaming-analytics
|
543016a598317c43bce167d4b210be21650f4c9e
|
[
"Apache-2.0"
] | null | null | null |
streams code/code.py
|
saba-in/live-streaming-of-IoT-data-using-streaming-analytics
|
543016a598317c43bce167d4b210be21650f4c9e
|
[
"Apache-2.0"
] | 1
|
2020-07-30T10:09:08.000Z
|
2020-07-30T10:09:08.000Z
|
#
# YOU MUST EDIT THE SCHEMA and add all attributes that you are returning as output.
#
# Preinstalled Python packages can be viewed from the Settings pane.
# In the Settings pane you can also install additional Python packages.
import sys
import logging
import pickle
import pandas as pd
# Use this logger for tracing or debugging your code:
logger = logging.getLogger(__name__)
# Example:
# logger.info('Got to step 2...')
# init() function will be called once on flow initialization
# @state a Python dictionary object for keeping state. The state object is passed to the process function
def init(state):
# do something once on flow initialization and save in the state object
pass
# process() function will be invoked on every event tuple
# @event a Python dictionary object representing the input event tuple as defined by the input schema
# @state a Python dictionary object for keeping state over subsequent function calls
# return must be a Python dictionary object. It will be the output of this operator.
# Returning None results in not submitting an output tuple for this invocation.
# You must declare all output attributes in the Edit Schema window.
def process(event, state):
id = event['ID']
age = event['Age']
experience = event['Experience']
income = event['Income']
zipcode = event['ZIPCode']
family = event['Family']
ccavg = event['CCAvg']
education = event['Education']
motgage = event['Mortgage']
securitiesaccount = event['SecuritiesAccount']
cdaccount = event['CDAccount']
online = event['Online']
creditcard = event['CreditCard']
# mytupple = (id,age,experience,income,zipcode,family,ccavg,education,motgage,securitiesaccount,cdaccount,online,creditcard)
mylist = []
mylist.append([age,income,zipcode,family,ccavg,education,motgage,securitiesaccount,cdaccount,online,creditcard])
test_set = pd.DataFrame(mylist, columns=['Age', 'Income', 'ZIPCode', 'Family', 'CCAvg', 'Education', 'Mortgage', 'SecuritiesAccount', 'CDAccount', 'Online', 'CreditCard'])
# Invoke the model-specific scoring function
model = state['model']
event['Prediction'] = model.predict(test_set)
return event
# Ensure that the Python packages used in this streams flow are compatible
# with the packages that you used to create the model. (Click Settings > Runtime).
def load_model(state, model):
state['model'] = model
| 39.852459
| 175
| 0.733854
|
7c941883ac16df886cf59630b26b32ee75481c36
| 2,771
|
py
|
Python
|
src/sentry_plugins/jira_ac/utils.py
|
MattPark/sentry-plugins
|
3b08a43ea9ca1fb0fd183c3fe7bd5606f14ba993
|
[
"Apache-2.0"
] | null | null | null |
src/sentry_plugins/jira_ac/utils.py
|
MattPark/sentry-plugins
|
3b08a43ea9ca1fb0fd183c3fe7bd5606f14ba993
|
[
"Apache-2.0"
] | 2
|
2018-05-26T13:19:41.000Z
|
2018-06-01T20:14:41.000Z
|
src/sentry_plugins/jira_ac/utils.py
|
MattPark/sentry-plugins
|
3b08a43ea9ca1fb0fd183c3fe7bd5606f14ba993
|
[
"Apache-2.0"
] | 1
|
2018-05-26T11:45:46.000Z
|
2018-05-26T11:45:46.000Z
|
from __future__ import absolute_import
import hashlib
import jwt
from six.moves.urllib.parse import quote
from sentry_plugins.exceptions import ApiError
def percent_encode(val):
# see https://en.wikipedia.org/wiki/Percent-encoding
return quote(val.encode('utf8', errors='replace')).replace('%7E', '~').replace('/', '%2F')
def get_query_hash(uri, method, query_params=None):
# see
# https://developer.atlassian.com/static/connect/docs/latest/concepts/understanding-jwt.html#qsh
uri = uri.rstrip('/')
method = method.upper()
if query_params is None:
query_params = {}
sorted_query = []
for k, v in sorted(query_params.items()):
# don't include jwt query param
if k != 'jwt':
if isinstance(v, list):
param_val = [percent_encode(val) for val in v].join(',')
else:
param_val = percent_encode(v)
sorted_query.append('%s=%s' % (percent_encode(k), param_val))
query_string = '%s&%s&%s' % (method, uri, '&'.join(sorted_query))
return hashlib.sha256(query_string.encode('utf8')).hexdigest()
def get_jira_auth_from_request(request):
# https://developer.atlassian.com/static/connect/docs/latest/concepts/authentication.html
# Extract the JWT token from the request's jwt query
# parameter or the authorization header.
token = request.GET.get('jwt')
if token is None:
raise ApiError('No token parameter')
# Decode the JWT token, without verification. This gives
# you a header JSON object, a claims JSON object, and a signature.
decoded = jwt.decode(token, verify=False)
# Extract the issuer ('iss') claim from the decoded, unverified
# claims object. This is the clientKey for the tenant - an identifier
# for the Atlassian application making the call
issuer = decoded['iss']
# Look up the sharedSecret for the clientKey, as stored
# by the add-on during the installation handshake
from sentry_plugins.jira_ac.models import JiraTenant
jira_auth = JiraTenant.objects.get(client_key=issuer)
# Verify the signature with the sharedSecret and
# the algorithm specified in the header's alg field.
decoded_verified = jwt.decode(token, jira_auth.secret)
# Verify the query has not been tampered by Creating a Query Hash
# and comparing it against the qsh claim on the verified token.
# TODO: probably shouldn't need to hardcode get... for post maybe
# the secret should just be a hidden field in the form ?
qsh = get_query_hash(request.path, 'GET', request.GET)
# qsh = get_query_hash(request.path, request.method, request.GET)
if qsh != decoded_verified['qsh']:
raise ApiError('Query hash mismatch')
return jira_auth
| 39.585714
| 100
| 0.690364
|
1da3c59ec3e6707daf650f5e5b58807ae339242e
| 355
|
py
|
Python
|
L2/cifar10_download.py
|
busyyang/DL_21tensorflow
|
ccac457b66a80f3de80d14d503e6cec8681537eb
|
[
"MIT"
] | null | null | null |
L2/cifar10_download.py
|
busyyang/DL_21tensorflow
|
ccac457b66a80f3de80d14d503e6cec8681537eb
|
[
"MIT"
] | null | null | null |
L2/cifar10_download.py
|
busyyang/DL_21tensorflow
|
ccac457b66a80f3de80d14d503e6cec8681537eb
|
[
"MIT"
] | null | null | null |
# coding:utf-8
# 引入当前目录中的已经编写好的cifar10模块
import cifar10
# 引入tensorflow
import tensorflow as tf
# tf.app.flags.FLAGS是TensorFlow内部的一个全局变量存储器,同时可以用于命令行参数的处理
FLAGS = tf.app.flags.FLAGS
# 在cifar10模块中预先定义了f.app.flags.FLAGS.data_dir为CIFAR-10的数据路径
# 我们把这个路径改为cifar10_data
FLAGS.data_dir = 'cifar10_data/'
# 如果不存在数据文件,就会执行下载
cifar10.maybe_download_and_extract()
| 23.666667
| 58
| 0.830986
|
329f0ebb933f469915be13ef7f18b84afe34ee27
| 1,848
|
py
|
Python
|
beer_search_v2/management/commands/update_untappd_items.py
|
Ernir/bjorleitin
|
b1850ecfa5e2694961a34d450430a001a1a39a5e
|
[
"MIT"
] | 1
|
2016-08-18T19:06:54.000Z
|
2016-08-18T19:06:54.000Z
|
beer_search_v2/management/commands/update_untappd_items.py
|
Ernir/bjorleitin
|
b1850ecfa5e2694961a34d450430a001a1a39a5e
|
[
"MIT"
] | null | null | null |
beer_search_v2/management/commands/update_untappd_items.py
|
Ernir/bjorleitin
|
b1850ecfa5e2694961a34d450430a001a1a39a5e
|
[
"MIT"
] | null | null | null |
from beer_search_v2.utils import update_untappd_item
from beer_search_v2.models import ModifiableSetting, UntappdEntity
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def __init__(self):
self.verbose = True
super().__init__()
def get_indices(self):
"""
The untappd API is rate limited, the list needs to be limited as well.
"""
mod_settings = ModifiableSetting.objects
start_index_setting = mod_settings.get(key="untappd_start_index")
num_per_run_setting = mod_settings.get(key="untappd_items_per_run")
start_index = start_index_setting.value
end_index = start_index_setting.value + num_per_run_setting.value
return start_index, end_index
def update_indices(self, end_index):
start_index_setting = ModifiableSetting.objects.get(key="untappd_start_index")
num_items = UntappdEntity.objects.count()
if end_index > num_items: # We've finished the list for now, resetting
start_index_setting.value = 0
else: # Next time, we start further into the list.
start_index_setting.value = end_index
start_index_setting.save()
def add_arguments(self, parser):
parser.add_argument(
"--verbose",
dest="verbose",
help="specify how much the script should write",
default=True,
action="store_true"
)
def handle(self, *args, **options):
self.verbose = not not options["verbose"]
untappd_entities = UntappdEntity.objects.all()
start_index, end_index = self.get_indices()
for entity in untappd_entities[start_index:end_index]:
update_untappd_item(entity, self.verbose)
self.update_indices(end_index)
| 33.6
| 86
| 0.665584
|
394902d1ad4cb6774f7dd3f6ad2a45e768efc1d7
| 151
|
py
|
Python
|
app/tools/__init__.py
|
Tingerlink/tingerwork
|
0e3d360bf97a62e088f12aa72277200b75e43643
|
[
"MIT"
] | null | null | null |
app/tools/__init__.py
|
Tingerlink/tingerwork
|
0e3d360bf97a62e088f12aa72277200b75e43643
|
[
"MIT"
] | null | null | null |
app/tools/__init__.py
|
Tingerlink/tingerwork
|
0e3d360bf97a62e088f12aa72277200b75e43643
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Module '__init__.py' of the project 'tingerwork'
# :date_create: 04.12.2017.23:59
# :author: Tingerlink
# :description:
| 25.166667
| 51
| 0.649007
|
5ab37ca17d6d150f7bc0e8c9f3b0d72ca6fbeda6
| 59,275
|
py
|
Python
|
awx/main/models/unified_jobs.py
|
withshubh/awx
|
38f3176221fe6981f38931d050705b736ea89fdc
|
[
"Apache-2.0"
] | null | null | null |
awx/main/models/unified_jobs.py
|
withshubh/awx
|
38f3176221fe6981f38931d050705b736ea89fdc
|
[
"Apache-2.0"
] | null | null | null |
awx/main/models/unified_jobs.py
|
withshubh/awx
|
38f3176221fe6981f38931d050705b736ea89fdc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
from io import StringIO
import datetime
import codecs
import json
import logging
import os
import re
import socket
import subprocess
import tempfile
from collections import OrderedDict
# Django
from django.conf import settings
from django.db import models, connection
from django.core.exceptions import NON_FIELD_ERRORS
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from django.utils.encoding import smart_text
from django.contrib.contenttypes.models import ContentType
# REST Framework
from rest_framework.exceptions import ParseError
# Django-Polymorphic
from polymorphic.models import PolymorphicModel
# AWX
from awx.main.models.base import (
CommonModelNameNotUnique,
PasswordFieldsModel,
NotificationFieldsModel,
prevent_search
)
from awx.main.dispatch import get_local_queuename
from awx.main.dispatch.control import Control as ControlDispatcher
from awx.main.registrar import activity_stream_registrar
from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin, ExecutionEnvironmentMixin
from awx.main.utils import (
camelcase_to_underscore, get_model_for_type,
encrypt_dict, decrypt_field, _inventory_updates,
copy_model_by_class, copy_m2m_relationships,
get_type_for_model, parse_yaml_or_json, getattr_dne,
polymorphic, schedule_task_manager
)
from awx.main.constants import ACTIVE_STATES, CAN_CANCEL
from awx.main.redact import UriCleaner, REPLACE_STR
from awx.main.consumers import emit_channel_notification
from awx.main.fields import JSONField, JSONBField, AskForField, OrderedManyToManyField
__all__ = ['UnifiedJobTemplate', 'UnifiedJob', 'StdoutMaxBytesExceeded']
logger = logging.getLogger('awx.main.models.unified_jobs')
logger_job_lifecycle = logging.getLogger('awx.analytics.job_lifecycle')
# NOTE: ACTIVE_STATES moved to constants because it is used by parent modules
class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEnvironmentMixin, NotificationFieldsModel):
'''
Concrete base class for unified job templates.
'''
# status inherits from related jobs. Thus, status must be able to be set to any status that a job status is settable to.
JOB_STATUS_CHOICES = [
('new', _('New')), # Job has been created, but not started.
('pending', _('Pending')), # Job is pending Task Manager processing (blocked by dependency req, capacity or a concurrent job)
('waiting', _('Waiting')), # Job has been assigned to run on a specific node (and is about to run).
('running', _('Running')), # Job is currently running.
('successful', _('Successful')), # Job completed successfully.
('failed', _('Failed')), # Job completed, but with failures.
('error', _('Error')), # The job was unable to run.
('canceled', _('Canceled')), # The job was canceled before completion.
]
COMMON_STATUS_CHOICES = JOB_STATUS_CHOICES + [
('never updated', _('Never Updated')), # A job has never been run using this template.
]
PROJECT_STATUS_CHOICES = COMMON_STATUS_CHOICES + [
('ok', _('OK')), # Project is not configured for SCM and path exists.
('missing', _('Missing')), # Project path does not exist.
]
INVENTORY_SOURCE_STATUS_CHOICES = COMMON_STATUS_CHOICES + [
('none', _('No External Source')), # Inventory source is not configured to update from an external source.
]
JOB_TEMPLATE_STATUS_CHOICES = COMMON_STATUS_CHOICES
DEPRECATED_STATUS_CHOICES = [
# No longer used for Project / Inventory Source:
('updating', _('Updating')), # Same as running.
]
ALL_STATUS_CHOICES = OrderedDict(PROJECT_STATUS_CHOICES + INVENTORY_SOURCE_STATUS_CHOICES + JOB_TEMPLATE_STATUS_CHOICES + DEPRECATED_STATUS_CHOICES).items()
class Meta:
app_label = 'main'
ordering = ('name',)
# unique_together here is intentionally commented out. Please make sure sub-classes of this model
# contain at least this uniqueness restriction: SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name')]
#unique_together = [('polymorphic_ctype', 'name', 'organization')]
old_pk = models.PositiveIntegerField(
null=True,
default=None,
editable=False,
)
current_job = models.ForeignKey(
'UnifiedJob',
null=True,
default=None,
editable=False,
related_name='%(class)s_as_current_job+',
on_delete=models.SET_NULL,
)
last_job = models.ForeignKey(
'UnifiedJob',
null=True,
default=None,
editable=False,
related_name='%(class)s_as_last_job+',
on_delete=models.SET_NULL,
)
last_job_failed = models.BooleanField(
default=False,
editable=False,
)
last_job_run = models.DateTimeField(
null=True,
default=None,
editable=False,
)
#on_missed_schedule = models.CharField(
# max_length=32,
# choices=[],
#)
next_job_run = models.DateTimeField(
null=True,
default=None,
editable=False,
)
next_schedule = models.ForeignKey( # Schedule entry responsible for next_job_run.
'Schedule',
null=True,
default=None,
editable=False,
related_name='%(class)s_as_next_schedule+',
on_delete=polymorphic.SET_NULL,
)
status = models.CharField(
max_length=32,
choices=ALL_STATUS_CHOICES,
default='ok',
editable=False,
)
organization = models.ForeignKey(
'Organization',
blank=True,
null=True,
on_delete=polymorphic.SET_NULL,
related_name='%(class)ss',
help_text=_('The organization used to determine access to this template.'),
)
credentials = models.ManyToManyField(
'Credential',
related_name='%(class)ss',
)
labels = models.ManyToManyField(
"Label",
blank=True,
related_name='%(class)s_labels'
)
instance_groups = OrderedManyToManyField(
'InstanceGroup',
blank=True,
through='UnifiedJobTemplateInstanceGroupMembership'
)
def get_absolute_url(self, request=None):
real_instance = self.get_real_instance()
if real_instance != self:
return real_instance.get_absolute_url(request=request)
else:
return ''
def unique_error_message(self, model_class, unique_check):
# If polymorphic_ctype is part of a unique check, return a list of the
# remaining fields instead of the error message.
if len(unique_check) >= 2 and 'polymorphic_ctype' in unique_check:
return [x for x in unique_check if x != 'polymorphic_ctype']
else:
return super(UnifiedJobTemplate, self).unique_error_message(model_class, unique_check)
@classmethod
def _submodels_with_roles(cls):
ujt_classes = [c for c in cls.__subclasses__()
if c._meta.model_name not in ['inventorysource', 'systemjobtemplate']]
ct_dict = ContentType.objects.get_for_models(*ujt_classes)
return [ct.id for ct in ct_dict.values()]
@classmethod
def accessible_pk_qs(cls, accessor, role_field):
'''
A re-implementation of accessible pk queryset for the "normal" unified JTs.
Does not return inventory sources or system JTs, these should
be handled inside of get_queryset where it is utilized.
'''
# do not use this if in a subclass
if cls != UnifiedJobTemplate:
return super(UnifiedJobTemplate, cls).accessible_pk_qs(accessor, role_field)
return ResourceMixin._accessible_pk_qs(
cls, accessor, role_field, content_types=cls._submodels_with_roles())
def _perform_unique_checks(self, unique_checks):
# Handle the list of unique fields returned above. Replace with an
# appropriate error message for the remaining field(s) in the unique
# check and cleanup the errors dictionary.
errors = super(UnifiedJobTemplate, self)._perform_unique_checks(unique_checks)
for key, msgs in errors.items():
if key != NON_FIELD_ERRORS:
continue
for msg in msgs:
if isinstance(msg, (list, tuple)):
if len(msg) == 1:
new_key = msg[0]
else:
new_key = NON_FIELD_ERRORS
model_class = self.get_real_concrete_instance_class()
errors.setdefault(new_key, []).append(self.unique_error_message(model_class, msg))
errors[key] = [x for x in msgs if not isinstance(x, (list, tuple))]
for key, msgs in errors.copy().items():
if not msgs:
del errors[key]
return errors
def validate_unique(self, exclude=None):
# Make sure we set the polymorphic_ctype before validating, and omit
# it from the list of excluded fields.
self.pre_save_polymorphic()
if exclude and 'polymorphic_ctype' in exclude:
exclude = [x for x in exclude if x != 'polymorphic_ctype']
return super(UnifiedJobTemplate, self).validate_unique(exclude)
@property # Alias for backwards compatibility.
def current_update(self):
return self.current_job
@property # Alias for backwards compatibility.
def last_update(self):
return self.last_job
@property # Alias for backwards compatibility.
def last_update_failed(self):
return self.last_job_failed
@property # Alias for backwards compatibility.
def last_updated(self):
return self.last_job_run
def update_computed_fields(self):
related_schedules = self.schedules.filter(enabled=True, next_run__isnull=False).order_by('-next_run')
new_next_schedule = related_schedules.first()
if new_next_schedule:
if new_next_schedule.pk == self.next_schedule_id and new_next_schedule.next_run == self.next_job_run:
return # no-op, common for infrequent schedules
self.next_schedule = new_next_schedule
self.next_job_run = new_next_schedule.next_run
self.save(update_fields=['next_schedule', 'next_job_run'])
def save(self, *args, **kwargs):
# If update_fields has been specified, add our field names to it,
# if it hasn't been specified, then we're just doing a normal save.
update_fields = kwargs.get('update_fields', [])
# Update status and last_updated fields.
if not getattr(_inventory_updates, 'is_updating', False):
updated_fields = self._set_status_and_last_job_run(save=False)
for field in updated_fields:
if field not in update_fields:
update_fields.append(field)
# Do the actual save.
super(UnifiedJobTemplate, self).save(*args, **kwargs)
def _get_current_status(self):
# Override in subclasses as needed.
if self.current_job and self.current_job.status:
return self.current_job.status
elif not self.last_job:
return 'never updated'
elif self.last_job_failed:
return 'failed'
else:
return 'successful'
def _get_last_job_run(self):
# Override in subclasses as needed.
if self.last_job:
return self.last_job.finished
def _set_status_and_last_job_run(self, save=True):
status = self._get_current_status()
last_job_run = self._get_last_job_run()
return self.update_fields(status=status, last_job_run=last_job_run,
save=save)
def _can_update(self):
# Override in subclasses as needed.
return False
@property
def can_update(self):
return self._can_update()
def update(self, **kwargs):
if self.can_update:
unified_job = self.create_unified_job()
unified_job.signal_start(**kwargs)
return unified_job
@classmethod
def _get_unified_job_class(cls):
'''
Return subclass of UnifiedJob that is created from this template.
'''
raise NotImplementedError # Implement in subclass.
@property
def notification_templates(self):
'''
Return notification_templates relevant to this Unified Job Template
'''
# NOTE: Derived classes should implement
from awx.main.models.notifications import NotificationTemplate
return NotificationTemplate.objects.none()
def create_unified_job(self, **kwargs):
'''
Create a new unified job based on this unified job template.
'''
new_job_passwords = kwargs.pop('survey_passwords', {})
eager_fields = kwargs.pop('_eager_fields', None)
# automatically encrypt survey fields
if hasattr(self, 'survey_spec') and getattr(self, 'survey_enabled', False):
password_list = self.survey_password_variables()
encrypt_dict(kwargs.get('extra_vars', {}), password_list)
unified_job_class = self._get_unified_job_class()
fields = self._get_unified_job_field_names()
parent_field_name = None
if "_unified_job_class" in kwargs:
# Special case where spawned job is different type than usual
# Only used for slice jobs
unified_job_class = kwargs.pop("_unified_job_class")
fields = unified_job_class._get_unified_job_field_names() & fields
parent_field_name = kwargs.pop('_parent_field_name')
unallowed_fields = set(kwargs.keys()) - set(fields)
validated_kwargs = kwargs.copy()
if unallowed_fields:
if parent_field_name is None:
logger.warn('Fields {} are not allowed as overrides to spawn from {}.'.format(
', '.join(unallowed_fields), self
))
for f in unallowed_fields:
validated_kwargs.pop(f)
unified_job = copy_model_by_class(self, unified_job_class, fields, validated_kwargs)
if eager_fields:
for fd, val in eager_fields.items():
setattr(unified_job, fd, val)
unified_job.execution_environment = self.resolve_execution_environment()
# NOTE: slice workflow jobs _get_parent_field_name method
# is not correct until this is set
if not parent_field_name:
parent_field_name = unified_job._get_parent_field_name()
setattr(unified_job, parent_field_name, self)
# For JobTemplate-based jobs with surveys, add passwords to list for perma-redaction
if hasattr(self, 'survey_spec') and getattr(self, 'survey_enabled', False):
for password in self.survey_password_variables():
new_job_passwords[password] = REPLACE_STR
if new_job_passwords:
unified_job.survey_passwords = new_job_passwords
kwargs['survey_passwords'] = new_job_passwords # saved in config object for relaunch
from awx.main.signals import disable_activity_stream, activity_stream_create
with disable_activity_stream():
# Don't emit the activity stream record here for creation,
# because we haven't attached important M2M relations yet, like
# credentials and labels
unified_job.save()
# Labels and credentials copied here
if validated_kwargs.get('credentials'):
Credential = UnifiedJob._meta.get_field('credentials').related_model
cred_dict = Credential.unique_dict(self.credentials.all())
prompted_dict = Credential.unique_dict(validated_kwargs['credentials'])
# combine prompted credentials with JT
cred_dict.update(prompted_dict)
validated_kwargs['credentials'] = [cred for cred in cred_dict.values()]
kwargs['credentials'] = validated_kwargs['credentials']
with disable_activity_stream():
copy_m2m_relationships(self, unified_job, fields, kwargs=validated_kwargs)
if 'extra_vars' in validated_kwargs:
unified_job.handle_extra_data(validated_kwargs['extra_vars'])
# Create record of provided prompts for relaunch and rescheduling
unified_job.create_config_from_prompts(kwargs, parent=self)
# manually issue the create activity stream entry _after_ M2M relations
# have been associated to the UJ
if unified_job.__class__ in activity_stream_registrar.models:
activity_stream_create(None, unified_job, True)
unified_job.log_lifecycle("created")
return unified_job
@classmethod
def get_ask_mapping(cls):
'''
Creates dictionary that maps the unified job field (keys)
to the field that enables prompting for the field (values)
'''
mapping = {}
for field in cls._meta.fields:
if isinstance(field, AskForField):
mapping[field.allows_field] = field.name
return mapping
@classmethod
def _get_unified_jt_copy_names(cls):
return cls._get_unified_job_field_names()
def copy_unified_jt(self):
'''
Returns saved object, including related fields.
Create a copy of this unified job template.
'''
unified_jt_class = self.__class__
fields = self._get_unified_jt_copy_names()
unified_jt = copy_model_by_class(self, unified_jt_class, fields, {})
time_now = now()
unified_jt.name = unified_jt.name.split('@', 1)[0] + ' @ ' + time_now.strftime('%I:%M:%S %p')
unified_jt.save()
copy_m2m_relationships(self, unified_jt, fields)
return unified_jt
def _accept_or_ignore_job_kwargs(self, _exclude_errors=(), **kwargs):
'''
Override in subclass if template accepts _any_ prompted params
'''
errors = {}
if kwargs:
for field_name in kwargs.keys():
errors[field_name] = [_("Field is not allowed on launch.")]
return ({}, kwargs, errors)
def accept_or_ignore_variables(self, data, errors=None, _exclude_errors=(), extra_passwords=None):
'''
If subclasses accept any `variables` or `extra_vars`, they should
define _accept_or_ignore_variables to place those variables in the accepted dict,
according to the acceptance rules of the template.
'''
if errors is None:
errors = {}
if not isinstance(data, dict):
try:
data = parse_yaml_or_json(data, silent_failure=False)
except ParseError as exc:
errors['extra_vars'] = [str(exc)]
return ({}, data, errors)
if hasattr(self, '_accept_or_ignore_variables'):
# SurveyJobTemplateMixin cannot override any methods because of
# resolution order, forced by how metaclass processes fields,
# thus the need for hasattr check
if extra_passwords:
return self._accept_or_ignore_variables(
data, errors, _exclude_errors=_exclude_errors, extra_passwords=extra_passwords)
else:
return self._accept_or_ignore_variables(data, errors, _exclude_errors=_exclude_errors)
elif data:
errors['extra_vars'] = [
_('Variables {list_of_keys} provided, but this template cannot accept variables.'.format(
list_of_keys=', '.join(data.keys())))]
return ({}, data, errors)
class UnifiedJobTypeStringMixin(object):
@classmethod
def get_instance_by_type(cls, job_type, job_id):
model = get_model_for_type(job_type)
if not model:
return None
return model.objects.get(id=job_id)
def model_to_str(self):
return camelcase_to_underscore(self.__class__.__name__)
class UnifiedJobDeprecatedStdout(models.Model):
class Meta:
managed = False
db_table = 'main_unifiedjob'
result_stdout_text = models.TextField(
null=True,
editable=False,
)
class StdoutMaxBytesExceeded(Exception):
def __init__(self, total, supported):
self.total = total
self.supported = supported
class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique,
UnifiedJobTypeStringMixin, TaskManagerUnifiedJobMixin, ExecutionEnvironmentMixin):
'''
Concrete base class for unified job run by the task engine.
'''
STATUS_CHOICES = UnifiedJobTemplate.JOB_STATUS_CHOICES
LAUNCH_TYPE_CHOICES = [
('manual', _('Manual')), # Job was started manually by a user.
('relaunch', _('Relaunch')), # Job was started via relaunch.
('callback', _('Callback')), # Job was started via host callback.
('scheduled', _('Scheduled')), # Job was started from a schedule.
('dependency', _('Dependency')), # Job was started as a dependency of another job.
('workflow', _('Workflow')), # Job was started from a workflow job.
('webhook', _('Webhook')), # Job was started from a webhook event.
('sync', _('Sync')), # Job was started from a project sync.
('scm', _('SCM Update')) # Job was created as an Inventory SCM sync.
]
PASSWORD_FIELDS = ('start_args',)
class Meta:
app_label = 'main'
ordering = ('id',)
old_pk = models.PositiveIntegerField(
null=True,
default=None,
editable=False,
)
emitted_events = models.PositiveIntegerField(
default=0,
editable=False,
)
unified_job_template = models.ForeignKey(
'UnifiedJobTemplate',
null=True, # Some jobs can be run without a template.
default=None,
editable=False,
related_name='%(class)s_unified_jobs',
on_delete=polymorphic.SET_NULL,
)
created = models.DateTimeField(
default=None,
editable=False,
db_index=True, # add an index, this is a commonly queried field
)
launch_type = models.CharField(
max_length=20,
choices=LAUNCH_TYPE_CHOICES,
default='manual',
editable=False,
db_index=True
)
schedule = models.ForeignKey( # Which schedule entry was responsible for starting this job.
'Schedule',
null=True,
default=None,
editable=False,
on_delete=polymorphic.SET_NULL,
)
dependent_jobs = models.ManyToManyField(
'self',
editable=False,
related_name='%(class)s_blocked_jobs+',
)
execution_node = models.TextField(
blank=True,
default='',
editable=False,
help_text=_("The node the job executed on."),
)
controller_node = models.TextField(
blank=True,
default='',
editable=False,
help_text=_("The instance that managed the isolated execution environment."),
)
notifications = models.ManyToManyField(
'Notification',
editable=False,
related_name='%(class)s_notifications',
)
cancel_flag = models.BooleanField(
blank=True,
default=False,
editable=False,
)
status = models.CharField(
max_length=20,
choices=STATUS_CHOICES,
default='new',
editable=False,
db_index=True,
)
failed = models.BooleanField(
default=False,
editable=False,
)
started = models.DateTimeField(
null=True,
default=None,
editable=False,
help_text=_("The date and time the job was queued for starting."),
)
dependencies_processed = models.BooleanField(
default=False,
editable=False,
help_text=_("If True, the task manager has already processed potential dependencies for this job.")
)
finished = models.DateTimeField(
null=True,
default=None,
editable=False,
help_text=_("The date and time the job finished execution."),
db_index=True,
)
canceled_on = models.DateTimeField(
null=True,
default=None,
editable=False,
help_text=_("The date and time when the cancel request was sent."),
db_index=True,
)
elapsed = models.DecimalField(
max_digits=12,
decimal_places=3,
editable=False,
help_text=_("Elapsed time in seconds that the job ran."),
)
job_args = prevent_search(models.TextField(
blank=True,
default='',
editable=False,
))
job_cwd = models.CharField(
max_length=1024,
blank=True,
default='',
editable=False,
)
job_env = prevent_search(JSONField(
blank=True,
default=dict,
editable=False,
))
job_explanation = models.TextField(
blank=True,
default='',
editable=False,
help_text=_("A status field to indicate the state of the job if it wasn't able to run and capture stdout"),
)
start_args = prevent_search(models.TextField(
blank=True,
default='',
editable=False,
))
result_traceback = models.TextField(
blank=True,
default='',
editable=False,
)
celery_task_id = models.CharField(
max_length=100,
blank=True,
default='',
editable=False,
)
labels = models.ManyToManyField(
"Label",
blank=True,
related_name='%(class)s_labels'
)
instance_group = models.ForeignKey(
'InstanceGroup',
blank=True,
null=True,
default=None,
on_delete=polymorphic.SET_NULL,
help_text=_('The Instance group the job was run under'),
)
organization = models.ForeignKey(
'Organization',
blank=True,
null=True,
on_delete=polymorphic.SET_NULL,
related_name='%(class)ss',
help_text=_('The organization used to determine access to this unified job.'),
)
credentials = models.ManyToManyField(
'Credential',
related_name='%(class)ss',
)
installed_collections = JSONBField(
blank=True,
default=dict,
editable=False,
help_text=_("The Collections names and versions installed in the execution environment."),
)
ansible_version = models.CharField(
max_length=255,
blank=True,
default='',
editable=False,
help_text=_("The version of Ansible Core installed in the execution environment."),
)
def get_absolute_url(self, request=None):
RealClass = self.get_real_instance_class()
if RealClass != UnifiedJob:
return RealClass.get_absolute_url(RealClass(pk=self.pk), request=request)
else:
return ''
def get_ui_url(self):
real_instance = self.get_real_instance()
if real_instance != self:
return real_instance.get_ui_url()
else:
return ''
@classmethod
def _get_task_class(cls):
raise NotImplementedError # Implement in subclasses.
@classmethod
def supports_isolation(cls):
return False
@property
def can_run_containerized(self):
return False
def _get_parent_field_name(self):
return 'unified_job_template' # Override in subclasses.
@classmethod
def _get_unified_job_template_class(cls):
'''
Return subclass of UnifiedJobTemplate that applies to this unified job.
'''
raise NotImplementedError # Implement in subclass.
def _global_timeout_setting(self):
"Override in child classes, None value indicates this is not configurable"
return None
def _resources_sufficient_for_launch(self):
return True
def __str__(self):
return u'%s-%s-%s' % (self.created, self.id, self.status)
@property
def log_format(self):
return '{} {} ({})'.format(get_type_for_model(type(self)), self.id, self.status)
def _get_parent_instance(self):
return getattr(self, self._get_parent_field_name(), None)
def _update_parent_instance_no_save(self, parent_instance, update_fields=None):
if update_fields is None:
update_fields = []
def parent_instance_set(key, val):
setattr(parent_instance, key, val)
if key not in update_fields:
update_fields.append(key)
if parent_instance:
if self.status in ('pending', 'waiting', 'running'):
if parent_instance.current_job != self:
parent_instance_set('current_job', self)
# Update parent with all the 'good' states of it's child
if parent_instance.status != self.status:
parent_instance_set('status', self.status)
elif self.status in ('successful', 'failed', 'error', 'canceled'):
if parent_instance.current_job == self:
parent_instance_set('current_job', None)
parent_instance_set('last_job', self)
parent_instance_set('last_job_failed', self.failed)
return update_fields
def _update_parent_instance(self):
parent_instance = self._get_parent_instance()
if parent_instance:
update_fields = self._update_parent_instance_no_save(parent_instance)
parent_instance.save(update_fields=update_fields)
def save(self, *args, **kwargs):
"""Save the job, with current status, to the database.
Ensure that all data is consistent before doing so.
"""
# If update_fields has been specified, add our field names to it,
# if it hasn't been specified, then we're just doing a normal save.
update_fields = kwargs.get('update_fields', [])
# Get status before save...
status_before = self.status or 'new'
# If this job already exists in the database, retrieve a copy of
# the job in its prior state.
if self.pk:
self_before = self.__class__.objects.get(pk=self.pk)
if self_before.status != self.status:
status_before = self_before.status
# Sanity check: Is this a failure? Ensure that the failure value
# matches the status.
failed = bool(self.status in ('failed', 'error', 'canceled'))
if self.failed != failed:
self.failed = failed
if 'failed' not in update_fields:
update_fields.append('failed')
# Sanity check: Has the job just started? If so, mark down its start
# time.
if self.status == 'running' and not self.started:
self.started = now()
if 'started' not in update_fields:
update_fields.append('started')
# Sanity check: Has the job just completed? If so, mark down its
# completion time, and record its output to the database.
if self.status in ('successful', 'failed', 'error', 'canceled') and not self.finished:
# Record the `finished` time.
self.finished = now()
if 'finished' not in update_fields:
update_fields.append('finished')
# If we have a start and finished time, and haven't already calculated
# out the time that elapsed, do so.
if self.started and self.finished and not self.elapsed:
td = self.finished - self.started
elapsed = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / (10 ** 6 * 1.0)
else:
elapsed = 0.0
if self.elapsed != elapsed:
self.elapsed = str(elapsed)
if 'elapsed' not in update_fields:
update_fields.append('elapsed')
# Ensure that the job template information is current.
if self.unified_job_template != self._get_parent_instance():
self.unified_job_template = self._get_parent_instance()
if 'unified_job_template' not in update_fields:
update_fields.append('unified_job_template')
if self.cancel_flag and not self.canceled_on:
# Record the 'canceled' time.
self.canceled_on = now()
if 'canceled_on' not in update_fields:
update_fields.append('canceled_on')
# Okay; we're done. Perform the actual save.
result = super(UnifiedJob, self).save(*args, **kwargs)
# If status changed, update the parent instance.
if self.status != status_before:
# Update parent outside of the transaction for Job w/ allow_simultaneous=True
# This dodges lock contention at the expense of the foreign key not being
# completely correct.
if getattr(self, 'allow_simultaneous', False):
connection.on_commit(self._update_parent_instance)
else:
self._update_parent_instance()
# Done.
return result
def copy_unified_job(self, _eager_fields=None, **new_prompts):
'''
Returns saved object, including related fields.
Create a copy of this unified job for the purpose of relaunch
'''
unified_job_class = self.__class__
unified_jt_class = self._get_unified_job_template_class()
parent_field_name = self._get_parent_field_name()
fields = unified_jt_class._get_unified_job_field_names() | set([parent_field_name])
create_data = {}
if _eager_fields:
create_data = _eager_fields.copy()
create_data["launch_type"] = "relaunch"
prompts = self.launch_prompts()
if self.unified_job_template and (prompts is not None):
prompts.update(new_prompts)
prompts['_eager_fields'] = create_data
unified_job = self.unified_job_template.create_unified_job(**prompts)
else:
unified_job = copy_model_by_class(self, unified_job_class, fields, {})
for fd, val in create_data.items():
setattr(unified_job, fd, val)
unified_job.save()
# Labels copied here
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
copy_m2m_relationships(self, unified_job, fields)
return unified_job
def launch_prompts(self):
'''
Return dictionary of prompts job was launched with
returns None if unknown
'''
JobLaunchConfig = self._meta.get_field('launch_config').related_model
try:
config = self.launch_config
return config.prompts_dict()
except JobLaunchConfig.DoesNotExist:
return None
def create_config_from_prompts(self, kwargs, parent=None):
'''
Create a launch configuration entry for this job, given prompts
returns None if it can not be created
'''
JobLaunchConfig = self._meta.get_field('launch_config').related_model
config = JobLaunchConfig(job=self)
if parent is None:
parent = getattr(self, self._get_parent_field_name())
if parent is None:
return
valid_fields = list(parent.get_ask_mapping().keys())
# Special cases allowed for workflows
if hasattr(self, 'extra_vars'):
valid_fields.extend(['survey_passwords', 'extra_vars'])
else:
kwargs.pop('survey_passwords', None)
for field_name, value in kwargs.items():
if field_name not in valid_fields:
raise Exception('Unrecognized launch config field {}.'.format(field_name))
if field_name == 'credentials':
continue
key = field_name
if key == 'extra_vars':
key = 'extra_data'
setattr(config, key, value)
config.save()
job_creds = set(kwargs.get('credentials', []))
if 'credentials' in [field.name for field in parent._meta.get_fields()]:
job_creds = job_creds - set(parent.credentials.all())
if job_creds:
config.credentials.add(*job_creds)
return config
@property
def event_class(self):
raise NotImplementedError()
@property
def job_type_name(self):
return self.get_real_instance_class()._meta.verbose_name.replace(' ', '_')
@property
def result_stdout_text(self):
related = UnifiedJobDeprecatedStdout.objects.get(pk=self.pk)
return related.result_stdout_text or ''
@result_stdout_text.setter
def result_stdout_text(self, value):
# TODO: remove this method once all stdout is based on jobevents
# (because it won't be used for writing anymore)
related = UnifiedJobDeprecatedStdout.objects.get(pk=self.pk)
related.result_stdout_text = value
related.save()
@property
def event_parent_key(self):
tablename = self._meta.db_table
return {
'main_job': 'job_id',
'main_adhoccommand': 'ad_hoc_command_id',
'main_projectupdate': 'project_update_id',
'main_inventoryupdate': 'inventory_update_id',
'main_systemjob': 'system_job_id',
}[tablename]
def get_event_queryset(self):
return self.event_class.objects.filter(**{self.event_parent_key: self.id})
@property
def event_processing_finished(self):
'''
Returns True / False, whether all events from job have been saved
'''
if self.status in ACTIVE_STATES:
return False # tally of events is only available at end of run
try:
event_qs = self.get_event_queryset()
except NotImplementedError:
return True # Model without events, such as WFJT
self.log_lifecycle("event_processing_finished")
return self.emitted_events == event_qs.count()
def result_stdout_raw_handle(self, enforce_max_bytes=True):
"""
This method returns a file-like object ready to be read which contains
all stdout for the UnifiedJob.
If the size of the file is greater than
`settings.STDOUT_MAX_BYTES_DISPLAY`, a StdoutMaxBytesExceeded exception
will be raised.
"""
max_supported = settings.STDOUT_MAX_BYTES_DISPLAY
if enforce_max_bytes:
# If enforce_max_bytes is True, we're not grabbing the whole file,
# just the first <settings.STDOUT_MAX_BYTES_DISPLAY> bytes;
# in this scenario, it's probably safe to use a StringIO.
fd = StringIO()
else:
# If enforce_max_bytes = False, that means they're downloading
# the entire file. To avoid ballooning memory, let's write the
# stdout content to a temporary disk location
if not os.path.exists(settings.JOBOUTPUT_ROOT):
os.makedirs(settings.JOBOUTPUT_ROOT)
fd = tempfile.NamedTemporaryFile(
mode='w',
prefix='{}-{}-'.format(self.model_to_str(), self.pk),
suffix='.out',
dir=settings.JOBOUTPUT_ROOT,
encoding='utf-8'
)
from awx.main.tasks import purge_old_stdout_files # circular import
purge_old_stdout_files.apply_async()
# Before the addition of event-based stdout, older versions of
# awx stored stdout as raw text blobs in a certain database column
# (`main_unifiedjob.result_stdout_text`)
# For older installs, this data still exists in the database; check for
# it and use if it exists
legacy_stdout_text = self.result_stdout_text
if legacy_stdout_text:
if enforce_max_bytes and len(legacy_stdout_text) > max_supported:
raise StdoutMaxBytesExceeded(len(legacy_stdout_text), max_supported)
fd.write(legacy_stdout_text)
if hasattr(fd, 'name'):
fd.flush()
return codecs.open(fd.name, 'r', encoding='utf-8')
else:
# we just wrote to this StringIO, so rewind it
fd.seek(0)
return fd
else:
# Note: the code in this block _intentionally_ does not use the
# Django ORM because of the potential size (many MB+) of
# `main_jobevent.stdout`; we *do not* want to generate queries
# here that construct model objects by fetching large gobs of
# data (and potentially ballooning memory usage); instead, we
# just want to write concatenated values of a certain column
# (`stdout`) directly to a file
with connection.cursor() as cursor:
if enforce_max_bytes:
# detect the length of all stdout for this UnifiedJob, and
# if it exceeds settings.STDOUT_MAX_BYTES_DISPLAY bytes,
# don't bother actually fetching the data
total = self.get_event_queryset().aggregate(
total=models.Sum(models.Func(models.F('stdout'), function='LENGTH'))
)['total'] or 0
if total > max_supported:
raise StdoutMaxBytesExceeded(total, max_supported)
# psycopg2's copy_expert writes bytes, but callers of this
# function assume a str-based fd will be returned; decode
# .write() calls on the fly to maintain this interface
_write = fd.write
fd.write = lambda s: _write(smart_text(s))
cursor.copy_expert(
"copy (select stdout from {} where {}={} and stdout != '' order by start_line) to stdout".format(
self._meta.db_table + 'event',
self.event_parent_key,
self.id
),
fd
)
if hasattr(fd, 'name'):
# If we're dealing with a physical file, use `sed` to clean
# up escaped line sequences
fd.flush()
subprocess.Popen("sed -i 's/\\\\r\\\\n/\\n/g' {}".format(fd.name), shell=True).wait()
return codecs.open(fd.name, 'r', encoding='utf-8')
else:
# If we're dealing with an in-memory string buffer, use
# string.replace()
fd = StringIO(fd.getvalue().replace('\\r\\n', '\n'))
return fd
def _escape_ascii(self, content):
# Remove ANSI escape sequences used to embed event data.
content = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', content)
# Remove ANSI color escape sequences.
content = re.sub(r'\x1b[^m]*m', '', content)
return content
def _result_stdout_raw(self, redact_sensitive=False, escape_ascii=False):
content = self.result_stdout_raw_handle().read()
if redact_sensitive:
content = UriCleaner.remove_sensitive(content)
if escape_ascii:
content = self._escape_ascii(content)
return content
@property
def result_stdout_raw(self):
return self._result_stdout_raw()
@property
def result_stdout(self):
return self._result_stdout_raw(escape_ascii=True)
def _result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=True, escape_ascii=False):
return_buffer = StringIO()
if end_line is not None:
end_line = int(end_line)
stdout_lines = self.result_stdout_raw_handle().readlines()
absolute_end = len(stdout_lines)
for line in stdout_lines[int(start_line):end_line]:
return_buffer.write(line)
if int(start_line) < 0:
start_actual = len(stdout_lines) + int(start_line)
end_actual = len(stdout_lines)
else:
start_actual = int(start_line)
if end_line is not None:
end_actual = min(int(end_line), len(stdout_lines))
else:
end_actual = len(stdout_lines)
return_buffer = return_buffer.getvalue()
if redact_sensitive:
return_buffer = UriCleaner.remove_sensitive(return_buffer)
if escape_ascii:
return_buffer = self._escape_ascii(return_buffer)
return return_buffer, start_actual, end_actual, absolute_end
def result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=False):
return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive)
def result_stdout_limited(self, start_line=0, end_line=None, redact_sensitive=False):
return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive, escape_ascii=True)
@property
def workflow_job_id(self):
workflow_job = self.get_workflow_job()
if workflow_job:
return workflow_job.pk
return None
@property
def spawned_by_workflow(self):
return self.launch_type == 'workflow'
def get_workflow_job(self):
if self.spawned_by_workflow:
try:
return self.unified_job_node.workflow_job
except UnifiedJob.unified_job_node.RelatedObjectDoesNotExist:
pass
return None
@property
def workflow_node_id(self):
if self.spawned_by_workflow:
try:
return self.unified_job_node.pk
except UnifiedJob.unified_job_node.RelatedObjectDoesNotExist:
pass
return None
def get_passwords_needed_to_start(self):
return []
def handle_extra_data(self, extra_data):
if hasattr(self, 'extra_vars') and extra_data:
extra_data_dict = {}
try:
extra_data_dict = parse_yaml_or_json(extra_data, silent_failure=False)
except Exception as e:
logger.warn("Exception deserializing extra vars: " + str(e))
evars = self.extra_vars_dict
evars.update(extra_data_dict)
self.update_fields(extra_vars=json.dumps(evars))
@property
def can_start(self):
return bool(self.status in ('new', 'waiting'))
@property
def can_schedule(self):
if getattr(self, 'passwords_needed_to_start', None):
return False
if getattr(self, 'inventory', None) is None:
return False
JobLaunchConfig = self._meta.get_field('launch_config').related_model
try:
self.launch_config
if self.unified_job_template is None:
return False
return True
except JobLaunchConfig.DoesNotExist:
return False
@property
def task_impact(self):
raise NotImplementedError # Implement in subclass.
def websocket_emit_data(self):
''' Return extra data that should be included when submitting data to the browser over the websocket connection '''
websocket_data = dict(type=self.job_type_name)
if self.spawned_by_workflow:
websocket_data.update(dict(workflow_job_id=self.workflow_job_id,
workflow_node_id=self.workflow_node_id))
return websocket_data
def _websocket_emit_status(self, status):
try:
status_data = dict(unified_job_id=self.id, status=status)
if status == 'waiting':
if self.instance_group:
status_data['instance_group_name'] = self.instance_group.name
else:
status_data['instance_group_name'] = None
elif status in ['successful', 'failed', 'canceled'] and self.finished:
status_data['finished'] = datetime.datetime.strftime(self.finished, "%Y-%m-%dT%H:%M:%S.%fZ")
status_data.update(self.websocket_emit_data())
status_data['group_name'] = 'jobs'
if getattr(self, 'unified_job_template_id', None):
status_data['unified_job_template_id'] = self.unified_job_template_id
emit_channel_notification('jobs-status_changed', status_data)
if self.spawned_by_workflow:
status_data['group_name'] = "workflow_events"
status_data['workflow_job_template_id'] = self.unified_job_template.id
emit_channel_notification('workflow_events-' + str(self.workflow_job_id), status_data)
except IOError: # includes socket errors
logger.exception('%s failed to emit channel msg about status change', self.log_format)
def websocket_emit_status(self, status):
connection.on_commit(lambda: self._websocket_emit_status(status))
if hasattr(self, 'update_webhook_status'):
connection.on_commit(lambda: self.update_webhook_status(status))
def notification_data(self):
return dict(id=self.id,
name=self.name,
url=self.get_ui_url(),
created_by=smart_text(self.created_by),
started=self.started.isoformat() if self.started is not None else None,
finished=self.finished.isoformat() if self.finished is not None else None,
status=self.status,
traceback=self.result_traceback)
def pre_start(self, **kwargs):
if not self.can_start:
self.job_explanation = u'%s is not in a startable state: %s, expecting one of %s' % (self._meta.verbose_name, self.status, str(('new', 'waiting')))
self.save(update_fields=['job_explanation'])
return (False, None)
# verify that any associated credentials aren't missing required field data
missing_credential_inputs = []
for credential in self.credentials.all():
defined_fields = credential.credential_type.defined_fields
for required in credential.credential_type.inputs.get('required', []):
if required in defined_fields and not credential.has_input(required):
missing_credential_inputs.append(required)
if missing_credential_inputs:
self.job_explanation = '{} cannot start because Credential {} does not provide one or more required fields ({}).'.format(
self._meta.verbose_name.title(),
credential.name,
', '.join(sorted(missing_credential_inputs))
)
self.save(update_fields=['job_explanation'])
return (False, None)
needed = self.get_passwords_needed_to_start()
try:
start_args = json.loads(decrypt_field(self, 'start_args'))
except Exception:
start_args = None
if start_args in (None, ''):
start_args = kwargs
opts = dict([(field, start_args.get(field, '')) for field in needed])
if not all(opts.values()):
missing_fields = ', '.join([k for k,v in opts.items() if not v])
self.job_explanation = u'Missing needed fields: %s.' % missing_fields
self.save(update_fields=['job_explanation'])
return (False, None)
if 'extra_vars' in kwargs:
self.handle_extra_data(kwargs['extra_vars'])
# remove any job_explanations that may have been set while job was in pending
if self.job_explanation != "":
self.job_explanation = ""
return (True, opts)
def signal_start(self, **kwargs):
"""Notify the task runner system to begin work on this task."""
# Sanity check: Are we able to start the job? If not, do not attempt
# to do so.
if not self.can_start:
return False
# Get any passwords or other data that are prerequisites to running
# the job.
needed = self.get_passwords_needed_to_start()
opts = dict([(field, kwargs.get(field, '')) for field in needed])
if not all(opts.values()):
return False
# Save the pending status, and inform the SocketIO listener.
self.update_fields(start_args=json.dumps(kwargs), status='pending')
self.websocket_emit_status("pending")
schedule_task_manager()
# Each type of unified job has a different Task class; get the
# appropirate one.
# task_type = get_type_for_model(self)
# Actually tell the task runner to run this task.
# FIXME: This will deadlock the task runner
#from awx.main.tasks import notify_task_runner
#notify_task_runner.delay({'id': self.id, 'metadata': kwargs,
# 'task_type': task_type})
# Done!
return True
@property
def actually_running(self):
# returns True if the job is running in the appropriate dispatcher process
running = False
if all([
self.status == 'running',
self.celery_task_id,
self.execution_node
]):
# If the job is marked as running, but the dispatcher
# doesn't know about it (or the dispatcher doesn't reply),
# then cancel the job
timeout = 5
try:
running = self.celery_task_id in ControlDispatcher(
'dispatcher', self.controller_node or self.execution_node
).running(timeout=timeout)
except (socket.timeout, RuntimeError):
logger.error('could not reach dispatcher on {} within {}s'.format(
self.execution_node, timeout
))
running = False
return running
@property
def can_cancel(self):
return bool(self.status in CAN_CANCEL)
def _build_job_explanation(self):
if not self.job_explanation:
return 'Previous Task Canceled: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % \
(self.model_to_str(), self.name, self.id)
return None
def cancel(self, job_explanation=None, is_chain=False):
if self.can_cancel:
if not is_chain:
for x in self.get_jobs_fail_chain():
x.cancel(job_explanation=self._build_job_explanation(), is_chain=True)
if not self.cancel_flag:
self.cancel_flag = True
self.start_args = '' # blank field to remove encrypted passwords
cancel_fields = ['cancel_flag', 'start_args']
if self.status in ('pending', 'waiting', 'new'):
self.status = 'canceled'
cancel_fields.append('status')
if self.status == 'running' and not self.actually_running:
self.status = 'canceled'
cancel_fields.append('status')
if job_explanation is not None:
self.job_explanation = job_explanation
cancel_fields.append('job_explanation')
self.save(update_fields=cancel_fields)
self.websocket_emit_status("canceled")
return self.cancel_flag
@property
def preferred_instance_groups(self):
'''
Return Instance/Rampart Groups preferred by this unified job templates
'''
if not self.unified_job_template:
return []
template_groups = [x for x in self.unified_job_template.instance_groups.all()]
return template_groups
@property
def global_instance_groups(self):
from awx.main.models.ha import InstanceGroup
default_instance_group = InstanceGroup.objects.filter(name='tower')
if default_instance_group.exists():
return [default_instance_group.first()]
return []
def awx_meta_vars(self):
'''
The result of this method is used as extra_vars of a job launched
by AWX, for purposes of client playbook hooks
'''
r = {}
for name in ('awx', 'tower'):
r['{}_job_id'.format(name)] = self.pk
r['{}_job_launch_type'.format(name)] = self.launch_type
created_by = getattr_dne(self, 'created_by')
wj = self.get_workflow_job()
if wj:
schedule = getattr_dne(wj, 'schedule')
for name in ('awx', 'tower'):
r['{}_workflow_job_id'.format(name)] = wj.pk
r['{}_workflow_job_name'.format(name)] = wj.name
r['{}_workflow_job_launch_type'.format(name)] = wj.launch_type
if schedule:
r['{}_parent_job_schedule_id'.format(name)] = schedule.pk
r['{}_parent_job_schedule_name'.format(name)] = schedule.name
if not created_by:
schedule = getattr_dne(self, 'schedule')
if schedule:
for name in ('awx', 'tower'):
r['{}_schedule_id'.format(name)] = schedule.pk
r['{}_schedule_name'.format(name)] = schedule.name
if created_by:
for name in ('awx', 'tower'):
r['{}_user_id'.format(name)] = created_by.pk
r['{}_user_name'.format(name)] = created_by.username
r['{}_user_email'.format(name)] = created_by.email
r['{}_user_first_name'.format(name)] = created_by.first_name
r['{}_user_last_name'.format(name)] = created_by.last_name
inventory = getattr_dne(self, 'inventory')
if inventory:
for name in ('awx', 'tower'):
r['{}_inventory_id'.format(name)] = inventory.pk
r['{}_inventory_name'.format(name)] = inventory.name
return r
def get_queue_name(self):
return self.controller_node or self.execution_node or get_local_queuename()
def is_isolated(self):
return bool(self.controller_node)
@property
def is_container_group_task(self):
return False
def log_lifecycle(self, state, blocked_by=None):
extra={'type': self._meta.model_name,
'task_id': self.id,
'state': state}
if self.unified_job_template:
extra["template_name"] = self.unified_job_template.name
if state == "blocked" and blocked_by:
blocked_by_msg = f"{blocked_by._meta.model_name}-{blocked_by.id}"
msg = f"{self._meta.model_name}-{self.id} blocked by {blocked_by_msg}"
extra["blocked_by"] = blocked_by_msg
else:
msg = f"{self._meta.model_name}-{self.id} {state.replace('_', ' ')}"
logger_job_lifecycle.debug(msg, extra=extra)
| 38.945466
| 160
| 0.623383
|
7bb48801bd147abe300c5fd6239c2e6a1e97c94d
| 1,729
|
py
|
Python
|
src/outpost/django/campusonline/migrations/0029_indices.py
|
medunigraz/outpost.django.campusonline
|
06776bce7556e438c1e00a96aaa9271a7aac8fe4
|
[
"BSD-2-Clause"
] | null | null | null |
src/outpost/django/campusonline/migrations/0029_indices.py
|
medunigraz/outpost.django.campusonline
|
06776bce7556e438c1e00a96aaa9271a7aac8fe4
|
[
"BSD-2-Clause"
] | null | null | null |
src/outpost/django/campusonline/migrations/0029_indices.py
|
medunigraz/outpost.django.campusonline
|
06776bce7556e438c1e00a96aaa9271a7aac8fe4
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-31 09:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
forward = [
"""
CREATE UNIQUE INDEX campusonline_distributionlist_id_idx ON "public"."campusonline_distributionlist" ("id");
""",
"""
CREATE INDEX campusonline_distributionlist_name_idx ON "public"."campusonline_distributionlist" ("name");
""",
"""
CREATE UNIQUE INDEX campusonline_distributionlist_person_distributionlist_id_person_id_idx ON "public"."campusonline_distributionlist_person" ("distributionlist_id", "person_id");
""",
"""
CREATE INDEX campusonline_distributionlist_person_distributionlist_id_idx ON "public"."campusonline_distributionlist_person" ("distributionlist_id");
""",
"""
CREATE INDEX campusonline_distributionlist_person_person_id_idx ON "public"."campusonline_distributionlist_person" ("person_id");
""",
]
reverse = [
"""
DROP INDEX IF EXISTS campusonline_distributionlist_person_person_id_idx;
""",
"""
DROP INDEX IF EXISTS campusonline_distributionlist_person_distributionlist_id_idx;
""",
"""
DROP INDEX IF EXISTS campusonline_distributionlist_person_distributionlist_id_person_id_idx;
""",
"""
DROP INDEX IF EXISTS campusonline_distributionlist_name_idx;
""",
"""
DROP INDEX IF EXISTS campusonline_distributionlist_id_idx;
""",
]
dependencies = [("campusonline", "0028_distributionlist")]
operations = [migrations.RunSQL(forward, reverse)]
| 35.285714
| 187
| 0.676692
|
a662acb0840929271afe5603be0c35e68bf2cc5c
| 218
|
py
|
Python
|
backend/tiagoflix/utils.py
|
tiagodomp/tiagoflix
|
87b36450e55d0c9b8de9321b6cb4d44bb9fe31ce
|
[
"MIT"
] | null | null | null |
backend/tiagoflix/utils.py
|
tiagodomp/tiagoflix
|
87b36450e55d0c9b8de9321b6cb4d44bb9fe31ce
|
[
"MIT"
] | 9
|
2020-06-05T20:17:54.000Z
|
2022-02-26T21:54:45.000Z
|
backend/tiagoflix/utils.py
|
tiagodomp/tiagoflix
|
87b36450e55d0c9b8de9321b6cb4d44bb9fe31ce
|
[
"MIT"
] | null | null | null |
from core.serializers import UserSerializer
def jwt_response_handler(token, user=None, request=None):
return {
'token': token,
'user': UserSerializer(user, context={'request': request}).data
}
| 27.25
| 71
| 0.683486
|
c3bd80e8459ec985727a7d23dd7bfe995c1c7664
| 485
|
py
|
Python
|
sm/usage.py
|
lrahal/dash-component-boilerplate
|
698513dc790917110b6da08066a73cb9f8a9e871
|
[
"MIT"
] | null | null | null |
sm/usage.py
|
lrahal/dash-component-boilerplate
|
698513dc790917110b6da08066a73cb9f8a9e871
|
[
"MIT"
] | null | null | null |
sm/usage.py
|
lrahal/dash-component-boilerplate
|
698513dc790917110b6da08066a73cb9f8a9e871
|
[
"MIT"
] | null | null | null |
import sm
import dash
from dash.dependencies import Input, Output
import dash_html_components as html
app = dash.Dash(__name__)
app.layout = html.Div([
sm.SideMenu(
id='input',
value='my-value',
label='my-label'
),
html.Div(id='output')
])
@app.callback(Output('output', 'children'), [Input('input', 'value')])
def display_output(value):
return 'You have entered {}'.format(value)
if __name__ == '__main__':
app.run_server(debug=True)
| 19.4
| 70
| 0.653608
|
702f04096bbabe17f3edf7a05edcbbfabbc4c298
| 375
|
py
|
Python
|
eslearn/machine_learning/webread.py
|
lichao312214129/easylearn
|
e77b51b26e0c75b3a4d59dd5a71cf1b63ac4347d
|
[
"MIT"
] | 19
|
2020-02-29T06:00:18.000Z
|
2022-01-24T01:30:14.000Z
|
machine_learning/webread.py
|
easylearn-fmri/easylearn
|
102ff264a7672b246244a489e0fbde8e3897c52f
|
[
"MIT"
] | 7
|
2020-04-02T03:05:21.000Z
|
2020-11-11T11:45:05.000Z
|
machine_learning/webread.py
|
easylearn-fmri/easylearn
|
102ff264a7672b246244a489e0fbde8e3897c52f
|
[
"MIT"
] | 11
|
2020-03-03T03:02:15.000Z
|
2020-11-11T14:09:55.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 19 16:54:28 2021
@author: Li Chao
Email: lichao19870617@163.com
"""
import re
import requests
r = requests.get('https://github.com/easylearn-fmri/easylearn_dev/blob/dev/eslearn_news.txt')
text = r.text
s = "__version__ = (\d+.\d+.\d+)##endLabel##"
pattern = re.compile(s, re.I) # s, I表示忽略大小写
version = pattern.findall(text)
| 22.058824
| 93
| 0.682667
|
216554da2461101fa873620ad700066c05632cf4
| 5,427
|
py
|
Python
|
scrapers/hackerearth/test_unlock.py
|
0bserver07/neural-engineers-first-attempt
|
19760251b7080ffe2e7b15146af6844811da4141
|
[
"MIT"
] | 10
|
2017-09-10T14:42:36.000Z
|
2020-12-03T11:45:17.000Z
|
scrapers/hackerearth/test_unlock.py
|
0bserver07/neural-engineers-first-attempt
|
19760251b7080ffe2e7b15146af6844811da4141
|
[
"MIT"
] | null | null | null |
scrapers/hackerearth/test_unlock.py
|
0bserver07/neural-engineers-first-attempt
|
19760251b7080ffe2e7b15146af6844811da4141
|
[
"MIT"
] | 7
|
2017-10-03T04:43:50.000Z
|
2020-09-23T14:39:27.000Z
|
# -*- coding: utf-8 -*-
import shutil
import os
import re
import requests
import urllib2
from pprint import pprint
import bs4
from bs4 import BeautifulSoup
import html2text
import time
import argparse
import datetime
from sys import argv
import time
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'
def get_request(url, headers={}):
response = requests.get(url,
headers=headers)
return response
def post_request(url, data={}, headers={}, cookies={}):
response = requests.post(url,
data=data,
headers=headers,
cookies=cookies)
return response
def get_submissions():
lordoftherings_value = '13c082ac336859d586aa5364c086d26f:44751f02ffbb8d82fb3deddca4da60de'
cookies = dict()
cookies["lordoftherings"] = lordoftherings_value
browser=webdriver.Chrome()
cookie = {'name': "lordoftherings", 'value' : 'a5dr3g48ag2dg8s2b8r57gkil6ioip74:7c34ac7cc9b2c971eafaba58840e0717', 'path' : '/'}
url_home_page = 'https://www.hackerearth.com/challenges/'
url2 = 'https://www.wikipedia.org/'
browser.get(url_home_page) # This opens a firefox console
browser.implicitly_wait(1)
login_but=browser.find_element_by_xpath("//li[contains(@class,'nav-bar-menu login-menu-btn')]")
webdriver.ActionChains(browser).click(login_but).perform()
username = browser.find_element_by_id("id_login")
password = browser.find_element_by_id("id_password")
username.send_keys("gesturefm@gmail.com")
password.send_keys("Deadmau5")
browser.find_element_by_name("submit").click()
urls = ['hermione-vs-draco']
time.sleep(5)
for url in urls:
url_front = 'https://www.hackerearth.com/problem/algorithm/' + url + '/activity/'
browser.get(url_front) # This opens a firefox console
browser.implicitly_wait(.1)
time.sleep(.5)
link = None
not_avail = None
n=0
while not link:
try:
link = browser.find_element_by_link_text('View')
url_link = link.get_attribute("href")
except NoSuchElementException:
browser.get(url_front) # This opens a firefox console
browser.implicitly_wait(.1)
print "except"
n+=1
if n > 20:
break
time.sleep(1)
browser.get(url_link)
browser.implicitly_wait(.1)
time.sleep(.1)
try:
unlock_but=browser.find_element_by_xpath("//a[contains(@class,'button btn-blue ajax-get') and .//text()='Unlock it']")
webdriver.ActionChains(browser).click(unlock_but).perform()
except:
print url + " already pressed"
browser.implicitly_wait(.1)
time.sleep(.1)
unlock_but=browser.find_element_by_xpath("//a[contains(@class,'button btn-blue ajax-get') and .//text()='Unlock it']")
webdriver.ActionChains(browser).click(unlock_but).perform()
handle = 'prashantpandeyfun10'
name = 'algorithm/karan-and-even-numbers-1'
url = "https://www.hackerearth.com/submission/4440655/"
url = "https://www.hackerearth.com/problem/" + name + "/activity/"
t = get_request(url)
if t == -1 or t == {}:
return t
tmp_string = t.headers["set-cookie"]
csrf_token = re.findall(r"csrftoken=\w*", tmp_string)[0][10:]
response = {}
response["host"] = "www.hackerearth.com"
response["user-agent"] = user_agent
response["accept"] = "application/json, text/javascript, */*; q=0.01"
response["accept-language"] = "en-US,en;q=0.5"
response["accept-encoding"] = "gzip, deflate"
response["content-type"] = "application/x-www-form-urlencoded"
response["X-CSRFToken"] = csrf_token
response["X-Requested-With"] = "XMLHttpRequest"
#response["Referer"] = "https://www.hackerearth.com/submissions/" + handle + "/"
response["Referer"] = url
response["Connection"] = "keep-alive"
response["Pragma"] = "no-cache"
response["Cache-Control"] = "no-cache"
response["Cookie"] = tmp_string
it = 1
submissions = {handle: {}}
for index_number in xrange(1, 5):
print(index_number)
submissions[handle][index_number] = {}
url_post = "https://www.hackerearth.com/AJAX/algorithm/42373/unlock-problem-submission/"
url_auth = 'https://www.hackerearth.com/realtime/pusher/auth/'
data = {'csrf_token':csrf_token, 'action':'setupSubmissionFilter', 'frameProblemIndex':'A', 'verdictName':'OK'}
url_auth = 'https://www.hackerearth.com/realtime/pusher/auth/'
idk = post_request(url_post, headers=response)
url = "https://www.hackerearth.com/submission/4440655/"
page = get_request(url, headers=response)
html_content = page.text
soup = BeautifulSoup(html_content, "html.parser")
body = re.search('/submission/key/(.*)/', html_content)
w = body.group(1)
get_submissions()
| 30.661017
| 136
| 0.619495
|
882f24d80cd46b562a0b2c81a5506e70153440f2
| 1,036
|
py
|
Python
|
tests/test_kubeflow.py
|
tidylobster/workflow-orhcestrator
|
a282f21d74e6616d19bdd715b7ad3d6cbf0ec046
|
[
"Apache-2.0"
] | null | null | null |
tests/test_kubeflow.py
|
tidylobster/workflow-orhcestrator
|
a282f21d74e6616d19bdd715b7ad3d6cbf0ec046
|
[
"Apache-2.0"
] | null | null | null |
tests/test_kubeflow.py
|
tidylobster/workflow-orhcestrator
|
a282f21d74e6616d19bdd715b7ad3d6cbf0ec046
|
[
"Apache-2.0"
] | null | null | null |
import wo, os, shutil, pytest, json
import random, logging, urllib.parse
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
# Tests
# -----
@pytest.fixture
def w():
yield wo.Orchestrator(dev=True)
def test_export_outputs_json(w):
outputs = {
"mlpipeline-ui-metadata.json": {
'outputs': [
{
'type': 'tensorboard',
'source': "s3://bucket/somewhere-in-my-heart/",
},
]
},
}
w.log_execution(outputs=outputs)
with open("mlpipeline-ui-metadata.json", "r") as file:
dumped = json.load(file)
assert outputs["mlpipeline-ui-metadata.json"] == dumped
os.remove("mlpipeline-ui-metadata.json")
def test_export_outputs_other(w):
outputs = {
"random": random.randint(0, 1000000)
}
w.log_execution(outputs=outputs)
with open("random", "r") as file:
value = int(file.read())
assert outputs["random"] == value
os.remove("random")
| 24.666667
| 67
| 0.586873
|
60d4a184db567f92856e65e1d8ad8c6361769649
| 995
|
py
|
Python
|
shot_scraper/utils.py
|
ryancheley/shot-scraper
|
8d103ae6121031ea7a8799fcac4d2d2dcf8525a2
|
[
"Apache-2.0"
] | 409
|
2022-03-09T00:38:32.000Z
|
2022-03-31T18:19:37.000Z
|
shot_scraper/utils.py
|
ryancheley/shot-scraper
|
8d103ae6121031ea7a8799fcac4d2d2dcf8525a2
|
[
"Apache-2.0"
] | 52
|
2022-03-08T23:28:34.000Z
|
2022-03-29T06:22:28.000Z
|
shot_scraper/utils.py
|
ryancheley/shot-scraper
|
8d103ae6121031ea7a8799fcac4d2d2dcf8525a2
|
[
"Apache-2.0"
] | 19
|
2022-03-09T17:35:26.000Z
|
2022-03-30T09:27:28.000Z
|
import urllib.parse
import re
disallowed_re = re.compile("[^a-zA-Z0-9_-]")
def file_exists_never(filename):
return False
def filename_for_url(url, ext=None, file_exists=file_exists_never):
ext = ext or "png"
bits = urllib.parse.urlparse(url)
filename = (bits.netloc + bits.path).replace(".", "-").replace("/", "-").rstrip("-")
# Remove any characters outside of the allowed range
base_filename = disallowed_re.sub("", filename).lstrip("-")
filename = base_filename + "." + ext
suffix = 0
while file_exists(filename):
suffix += 1
filename = "{}.{}.{}".format(base_filename, suffix, ext)
return filename
def url_or_file_path(url, file_exists=file_exists_never):
# If url exists as a file, convert that to file:/
file_path = file_exists(url)
if file_path:
return "file:{}".format(file_path)
if not (url.startswith("http://") or url.startswith("https://")):
return "http://{}".format(url)
return url
| 30.151515
| 88
| 0.648241
|
b86f5d69fa94d709d3d5c26831a1d2be942d5a57
| 10,597
|
py
|
Python
|
library/pyjamas/ui/vertsplitpanel.py
|
andreyvit/pyjamas
|
1154abe3340a84dba7530b8174aaddecfc1a0944
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2017-09-09T11:58:36.000Z
|
2017-09-09T11:58:36.000Z
|
library/pyjamas/ui/vertsplitpanel.py
|
andreyvit/pyjamas
|
1154abe3340a84dba7530b8174aaddecfc1a0944
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
library/pyjamas/ui/vertsplitpanel.py
|
andreyvit/pyjamas
|
1154abe3340a84dba7530b8174aaddecfc1a0944
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
"""
Vertical Split Panel: Top and Bottom layouts with a movable splitter.
/*
* Copyright 2008 Google Inc.
* Copyright (C) 2008, 2009 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
*
* Licensed under the Apache License, Version 2.0 (the "License") you may not
* use self file except in compliance with the License. You may obtain a copy of
* the License at
*
* http:#www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
"""
from splitpanel import SplitPanel
from pyjamas import Factory
from pyjamas import DOM
from pyjamas import DeferredCommand
from pyjamas.Timer import Timer
from __pyjamas__ import JS
class ImplVerticalSplitPanel:
""" Provides a base implementation for splitter layout that relies on CSS
positioned layout.
"""
def __init__(self, panel):
self.panel = panel
DOM.setStyleAttribute(panel.getElement(), "position", "relative")
topElem = panel.getWidgetElement(0)
bottomElem = panel.getWidgetElement(1)
self.expandToFitParentHorizontally(topElem)
self.expandToFitParentHorizontally(bottomElem)
self.expandToFitParentHorizontally(panel.getSplitElement())
self.panel.expandToFitParentUsingCssOffsets(panel.container)
# Snap the bottom wrapper to the bottom side.
DOM.setStyleAttribute(bottomElem, "bottom", "0")
def expandToFitParentHorizontally(self, elem):
self.panel.addAbsolutePositoning(elem)
DOM.setStyleAttribute(elem, "left", "0")
DOM.setStyleAttribute(elem, "right", "0")
def onAttach(self):
pass
def onDetach(self):
pass
def onSplitterResize(self, px):
self.setSplitPosition(px)
def setSplitPosition(self, px):
splitElem = self.panel.getSplitElement()
rootElemHeight = self.panel.getOffsetHeight(self.panel.container)
splitElemHeight = self.panel.getOffsetHeight(splitElem)
# layout not settled, set height to what it _should_ be... yuk.
if splitElemHeight == 0:
splitElemHeight = 7
if rootElemHeight < splitElemHeight:
return
newBottomHeight = rootElemHeight - px - splitElemHeight
if px < 0:
px = 0
newBottomHeight = rootElemHeight - splitElemHeight
elif newBottomHeight < 0:
px = rootElemHeight - splitElemHeight
newBottomHeight = 0
self.updateElements(self.panel.getWidgetElement(0),
splitElem,
self.panel.getWidgetElement(1),
px, px + splitElemHeight, newBottomHeight)
def updateElements(self, topElem, splitElem,
bottomElem, topHeight, bottomTop, bottomHeight):
self.panel.setElemHeight(topElem, "%dpx" % topHeight)
self.panel.setTop(splitElem, "%dpx" % topHeight)
self.panel.setTop(bottomElem, "%dpx" % bottomTop)
# bottom's height is handled by CSS.
class ImplIE6VerticalSplitPanel:
""" Provides an implementation for IE6/7 that relies on 100% length in CSS.
"""
def __init__(self, panel):
self.panel = panel
self.isResizeInProgress = False
self.isTopHidden = False
self.isBottomHidden = False
elem = panel.getElement()
# Prevents inherited text-align settings from interfering with the
# panel's layout.
DOM.setStyleAttribute(elem, "textAlign", "left")
DOM.setStyleAttribute(elem, "position", "relative")
topElem = panel.getWidgetElement(0)
bottomElem = panel.getWidgetElement(1)
self.expandToFitParentHorizontally(topElem)
self.expandToFitParentHorizontally(bottomElem)
self.expandToFitParentHorizontally(panel.getSplitElement())
self.expandToFitParentUsingPercentages(panel.container)
def expandToFitParentHorizontally(self, elem):
self.addAbsolutePositoning(elem)
self.setLeft(elem, "0")
self.setElemWidth(elem, "100%")
def onAttach(self):
self.addResizeListener(self.panel.container)
self.onResize()
def onDetach(self):
DOM.setElementProperty(self.panel.container, "onresize", None)
def onSplitterResize(self, px):
""" IE6/7 has event priority issues that will prevent
the repaints from happening quickly enough causing the
interaction to seem unresponsive. The following is simply
a poor man's mouse event coalescing.
"""
resizeUpdatePeriod = 20 # ms
if not self.isResizeInProgress:
self.isResizeInProgress = True
Timer(resizeUpdatePeriod, self)
self.splitPosition = px
def onTimer(self, t):
self.setSplitPosition(splitPosition)
self.isResizeInProgress = False
def updateElements(topElem, splitElem, bottomElem,
topHeight, bottomTop, bottomHeight):
""" IE6/7 has a quirk where a zero height element with
non-zero height children will expand larger than 100%. To
prevent self, the width is explicitly set to zero when
height is zero.
"""
if topHeight == 0:
self.setWidth(topElem, "0px")
self.isTopHidden = True
elif self.isTopHidden:
self.setWidth(topElem, "100%")
self.isTopHidden = False
if bottomHeight == 0:
self.setElemWidth(bottomElem, "0px")
self.isBottomHidden = True
elif self.isBottomHidden:
self.setElemWidth(bottomElem, "100%")
self.isBottomHidden = False
self.panel.setElemHeight(topElem, "%dpx" % topHeight)
self.panel.setTop(splitElem, "%dpx" % topHeight)
self.panel.setTop(bottomElem, "%dpx" % bottomTop)
# IE6/7 cannot update properly with CSS alone.
self.panel.setElemHeight(bottomElem, bottomHeight + "px")
def addResizeListener(self, container):
JS("""
this.container.onresize = function() {
__ImplIE6VerticalSplitPanel_onResize();
}
""")
def onResize(self):
self.setSplitPosition(self.panel.getOffsetHeight(self.panel.getWidgetElement(0)))
class VerticalSplitPanel(SplitPanel):
""" A panel that arranges two widgets in a single vertical
column and allows the user to interactively
change the proportion of the height dedicated to
each of the two widgets. Widgets contained within a
<code>VerticalSplitterPanel</code> will be automatically
decorated with scrollbars when necessary.
"""
def __init__(self, **kwargs):
""" Creates an empty vertical split panel.
"""
if not kwargs.has_key('StyleName'): kwargs['StyleName']="gwt-VerticalSplitPanel"
if kwargs.has_key('Element'):
element = kwargs.pop('Element')
else:
element = DOM.createDiv()
SplitPanel.__init__(self, element,
DOM.createDiv(),
self.preventBoxStyles(DOM.createDiv()),
self.preventBoxStyles(DOM.createDiv()),
**kwargs)
self.container = self.preventBoxStyles(DOM.createDiv())
self.buildDOM()
self.impl = ImplVerticalSplitPanel(self)
self.setSplitPosition("50%")
# Captures the height of the top container when drag resizing starts.
self.initialTopHeight = 0
# Captures the offset of a user's mouse pointer during drag resizing.
self.initialThumbPos = 0
self.lastSplitPosition = ""
def getBottomWidget(self):
""" Gets the widget in the bottom of the panel.
@return the widget, <code>None</code> if there is not one
"""
return self.getWidget(1)
def getTopWidget(self):
""" Gets the widget in the top of the panel.
@return the widget, <code>None</code> if there is not one
"""
return self.getWidget(0)
def setBottomWidget(self, w):
""" Sets the widget in the bottom of the panel.
@param w the widget
"""
self.setWidget(1, w)
def setSplitPosition(self, pos):
self.lastSplitPosition = pos
topElem = self.getWidgetElement(0)
self.setElemHeight(topElem, pos)
self.impl.setSplitPosition(self.getOffsetHeight(topElem))
def setTopWidget(self, w):
""" Sets the widget in the top of the panel.
@param w the widget
"""
self.setWidget(0, w)
def onLoad(self):
self.impl.onAttach()
# Set the position realizing it might not work until
# after layout runs. This first call is simply to try
# to avoid a jitter effect if possible.
self.setSplitPosition(self.lastSplitPosition)
DeferredCommand.add(self)
def execute(self):
self.setSplitPosition(self.lastSplitPosition)
def onUnload(self):
self.impl.onDetach()
def onSplitterResize(self, x, y):
self.impl.onSplitterResize(self.initialTopHeight + y -
self.initialThumbPos)
def onSplitterResizeStarted(self, x, y):
self.initialThumbPos = y
self.initialTopHeight = self.getOffsetHeight(self.getWidgetElement(0))
def buildDOM(self):
topDiv = self.getWidgetElement(0)
bottomDiv = self.getWidgetElement(1)
splitDiv = self.getSplitElement()
DOM.appendChild(self.getElement(), self.container)
DOM.appendChild(self.container, topDiv)
DOM.appendChild(self.container, splitDiv)
DOM.appendChild(self.container, bottomDiv)
# The style name is placed on the table rather than splitElem
# to allow the splitter to be styled without interfering
# with layout.
thumb_html = '<img src="splitPanelThumb.png" />'
DOM.setInnerHTML(splitDiv, "<div class='vsplitter' " +
"style='text-align:center'>" +
thumb_html + "</div>")
self.addScrolling(topDiv)
self.addScrolling(bottomDiv)
Factory.registerClass('pyjamas.ui.VerticalSplitPanel', VerticalSplitPanel)
| 34.973597
| 89
| 0.638388
|
409177455fc83030b6536fb67dd5d72c321db1b2
| 4,735
|
py
|
Python
|
tax-processing-pipeline-python/main.py
|
jiya-zhang/document-ai-samples
|
554187272cae6732b42e6f8311c4c57ee927fb80
|
[
"Apache-2.0"
] | 3
|
2022-02-23T19:59:32.000Z
|
2022-03-10T21:59:37.000Z
|
tax-processing-pipeline-python/main.py
|
jiya-zhang/document-ai-samples
|
554187272cae6732b42e6f8311c4c57ee927fb80
|
[
"Apache-2.0"
] | 1
|
2022-03-10T22:15:09.000Z
|
2022-03-10T22:53:23.000Z
|
tax-processing-pipeline-python/main.py
|
jiya-zhang/document-ai-samples
|
554187272cae6732b42e6f8311c4c57ee927fb80
|
[
"Apache-2.0"
] | 2
|
2022-03-10T21:33:53.000Z
|
2022-03-14T15:25:53.000Z
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# type: ignore[1]
"""Flask Web Server"""
import os
from uuid import uuid4
from tempfile import TemporaryDirectory
from typing import List, Tuple
from flask import Flask, after_this_request, render_template, request
from werkzeug.exceptions import HTTPException
from consts import FIRESTORE_PROJECT_ID, FIRESTORE_COLLECTION_PREFIX
from docai_pipeline import run_docai_pipeline
from firestore_utils import read_collection, delete_collection
from tax_pipeline import calculate_tax_values
SESSION_ID = str(uuid4())
FIRESTORE_COLLECTION = f"{FIRESTORE_COLLECTION_PREFIX}-{SESSION_ID}"
app = Flask(__name__)
UPLOAD_FOLDER = "/tmp"
ALLOWED_MIMETYPES = set(["application/pdf", "image/tiff", "image/jpeg"])
@app.route("/", methods=["GET"])
def index() -> str:
"""
Web Server, Homepage
"""
return render_template("index.html")
@app.route("/file_upload", methods=["POST"])
def file_upload() -> str:
"""
Handle file upload request
"""
# pylint: disable=consider-using-with
temp_dir = TemporaryDirectory()
@after_this_request
def cleanup(response):
temp_dir.cleanup()
return response
# Check if POST Request includes Files
if not request.files:
return render_template("index.html", message_error="No files provided")
files = request.files.getlist("files")
uploaded_filenames = save_files_to_temp_directory(files, temp_dir)
if not uploaded_filenames:
return render_template("index.html", message_error="No valid files provided")
status_messages = run_docai_pipeline(uploaded_filenames, FIRESTORE_COLLECTION)
return render_template(
"index.html",
message_success="Successfully uploaded & processed files",
status_messages=status_messages,
)
@app.route("/view_extracted_data", methods=["GET"])
def view_extracted_data() -> str:
"""
Display Raw extracted data from Documents
"""
extracted_data = read_collection(FIRESTORE_PROJECT_ID, FIRESTORE_COLLECTION)
if not extracted_data:
return render_template("index.html", message_error="No data to display")
return render_template("index.html", extracted_data=extracted_data)
@app.route("/view_tax_bill", methods=["GET"])
def view_tax_bill() -> str:
"""
Calculate Tax Return with Document Information from Firestore
"""
extracted_data = read_collection(FIRESTORE_PROJECT_ID, FIRESTORE_COLLECTION)
tax_data = calculate_tax_values(extracted_data)
if not tax_data:
return render_template("index.html", message_error="No data to display")
return render_template("index.html", tax_data=tax_data)
@app.route("/delete_data", methods=["GET"])
def delete_data() -> str:
"""
Remove Saved Data from Database
"""
delete_collection(FIRESTORE_PROJECT_ID, FIRESTORE_COLLECTION)
return render_template("index.html", message_success="Successfully deleted data")
def save_files_to_temp_directory(files, temp_dir) -> List[Tuple[str, str]]:
"""
Save files to temporary directory
Returns a list of tuples containing file paths and mimetypes
"""
uploaded_filenames = []
for file in files:
if not file or file.filename == "":
print("Skipping corrupt file")
continue
if file.mimetype not in ALLOWED_MIMETYPES:
print(f"Invalid File Type: {file.filename}: {file.mimetype}")
continue
input_file_path = os.path.join(temp_dir.name, file.filename)
file.save(input_file_path)
uploaded_filenames.append((input_file_path, file.mimetype))
print(f"Uploaded file: {input_file_path}, {file.mimetype}")
return uploaded_filenames
@app.errorhandler(Exception)
def handle_exception(ex):
"""
Handle Application Exceptions
"""
# Pass through HTTP errors
if isinstance(ex, HTTPException):
return ex
# Non-HTTP exceptions only
return render_template(
"index.html",
message_error="An unknown error occurred, please try again later",
)
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=int(os.environ.get("PORT", 8080)))
| 30.159236
| 85
| 0.714467
|
7f06fb3a7788c671a5dcbb9ebdac2cd4b43bd981
| 793
|
py
|
Python
|
pandas/core/internals/__init__.py
|
mojones/pandas
|
3d4f9dc19d784526f71a197bfb6e36b0409e0760
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-04-26T22:11:21.000Z
|
2020-04-26T22:11:21.000Z
|
pandas/core/internals/__init__.py
|
mojones/pandas
|
3d4f9dc19d784526f71a197bfb6e36b0409e0760
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-04-16T05:21:06.000Z
|
2020-04-16T05:21:06.000Z
|
pandas/core/internals/__init__.py
|
mojones/pandas
|
3d4f9dc19d784526f71a197bfb6e36b0409e0760
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-06-19T11:52:05.000Z
|
2020-06-19T11:52:05.000Z
|
from pandas.core.internals.blocks import ( # io.pytables, io.packers
Block,
BoolBlock,
CategoricalBlock,
ComplexBlock,
DatetimeBlock,
DatetimeTZBlock,
ExtensionBlock,
FloatBlock,
IntBlock,
ObjectBlock,
TimeDeltaBlock,
_safe_reshape,
make_block,
)
from pandas.core.internals.concat import concatenate_block_managers
from pandas.core.internals.managers import BlockManager, SingleBlockManager
__all__ = [
"Block",
"BoolBlock",
"CategoricalBlock",
"ComplexBlock",
"DatetimeBlock",
"DatetimeTZBlock",
"ExtensionBlock",
"FloatBlock",
"IntBlock",
"ObjectBlock",
"TimeDeltaBlock",
"_safe_reshape",
"make_block",
"BlockManager",
"SingleBlockManager",
"concatenate_block_managers",
]
| 21.432432
| 75
| 0.687264
|
bd7135e5b06691bbaf8583f7cd89bc6fbfe37400
| 6,286
|
py
|
Python
|
hifi_gan/meldataset.py
|
dodoproptit99/Multilingual_Text_to_Speech
|
8c5f2c8f3bffe7aca68c576168a12ab3451d09ec
|
[
"MIT"
] | null | null | null |
hifi_gan/meldataset.py
|
dodoproptit99/Multilingual_Text_to_Speech
|
8c5f2c8f3bffe7aca68c576168a12ab3451d09ec
|
[
"MIT"
] | null | null | null |
hifi_gan/meldataset.py
|
dodoproptit99/Multilingual_Text_to_Speech
|
8c5f2c8f3bffe7aca68c576168a12ab3451d09ec
|
[
"MIT"
] | null | null | null |
import math
import os
import random
import torch
import torch.utils.data
import numpy as np
from librosa.util import normalize
from scipy.io.wavfile import read
from librosa.filters import mel as librosa_mel_fn
MAX_WAV_VALUE = 32768.0
def load_wav(full_path):
sampling_rate, data = read(full_path)
return data, sampling_rate
def dynamic_range_compression(x, C=1, clip_val=1e-5):
return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
def dynamic_range_decompression(x, C=1):
return np.exp(x) / C
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression_torch(x, C=1):
return torch.exp(x) / C
def spectral_normalize_torch(magnitudes):
output = dynamic_range_compression_torch(magnitudes)
return output
def spectral_de_normalize_torch(magnitudes):
output = dynamic_range_decompression_torch(magnitudes)
return output
mel_basis = {}
hann_window = {}
def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
if torch.min(y) < -1.:
print('min value is ', torch.min(y))
if torch.max(y) > 1.:
print('max value is ', torch.max(y))
global mel_basis, hann_window
if fmax not in mel_basis:
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
y = y.squeeze(1)
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],
center=center, pad_mode='reflect', normalized=False, onesided=True)
spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)
spec = spectral_normalize_torch(spec)
return spec
def get_dataset_filelist(a):
with open(a.input_training_file, 'r', encoding='utf-8') as fi:
training_files = [os.path.join(a.input_wavs_dir, x + '.wav')
for x in fi.read().split('\n') if len(x) > 0]
with open(a.input_validation_file, 'r', encoding='utf-8') as fi:
validation_files = [os.path.join(a.input_wavs_dir, x + '.wav')
for x in fi.read().split('\n') if len(x) > 0]
return training_files, validation_files
class MelDataset(torch.utils.data.Dataset):
def __init__(self, training_files, segment_size, n_fft, num_mels,
hop_size, win_size, sampling_rate, fmin, fmax, split=True, shuffle=True, n_cache_reuse=1,
device=None, fmax_loss=None, fine_tuning=False, base_mels_path=None):
self.audio_files = training_files
random.seed(1234)
if shuffle:
random.shuffle(self.audio_files)
self.segment_size = segment_size
self.sampling_rate = sampling_rate
self.split = split
self.n_fft = n_fft
self.num_mels = num_mels
self.hop_size = hop_size
self.win_size = win_size
self.fmin = fmin
self.fmax = fmax
self.fmax_loss = fmax_loss
self.cached_wav = None
self.n_cache_reuse = n_cache_reuse
self._cache_ref_count = 0
self.device = device
self.fine_tuning = fine_tuning
self.base_mels_path = base_mels_path
def __getitem__(self, index):
filename = self.audio_files[index]
if self._cache_ref_count == 0:
audio, sampling_rate = load_wav(filename)
audio = audio / MAX_WAV_VALUE
if not self.fine_tuning:
audio = normalize(audio) * 0.95
self.cached_wav = audio
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
self._cache_ref_count = self.n_cache_reuse
else:
audio = self.cached_wav
self._cache_ref_count -= 1
audio = torch.FloatTensor(audio)
audio = audio.unsqueeze(0)
if not self.fine_tuning:
if self.split:
if audio.size(1) >= self.segment_size:
max_audio_start = audio.size(1) - self.segment_size
audio_start = random.randint(0, max_audio_start)
audio = audio[:, audio_start:audio_start+self.segment_size]
else:
audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')
mel = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax,
center=False)
else:
mel = np.load(
os.path.join(self.base_mels_path, os.path.splitext(os.path.split(filename)[-1])[0] + '.npy'))
mel = torch.from_numpy(mel)
if len(mel.shape) < 3:
mel = mel.unsqueeze(0)
if self.split:
frames_per_seg = math.ceil(self.segment_size / self.hop_size)
if audio.size(1) >= self.segment_size:
mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1)
mel = mel[:, :, mel_start:mel_start + frames_per_seg]
audio = audio[:, mel_start * self.hop_size:(mel_start + frames_per_seg) * self.hop_size]
else:
mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), 'constant')
audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')
mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax_loss,
center=False)
return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())
def __len__(self):
return len(self.audio_files)
| 37.195266
| 115
| 0.616927
|
e061c5ecbaf9c84ce4b9d2234af342afb376a75b
| 4,650
|
py
|
Python
|
invenio_cli/commands/services_health.py
|
inveniosoftware/invenio-scripts
|
9888acdb6b7cc408f4ac244d68fd897256273f15
|
[
"MIT"
] | null | null | null |
invenio_cli/commands/services_health.py
|
inveniosoftware/invenio-scripts
|
9888acdb6b7cc408f4ac244d68fd897256273f15
|
[
"MIT"
] | 9
|
2019-10-01T12:10:28.000Z
|
2019-10-02T14:07:32.000Z
|
invenio_cli/commands/services_health.py
|
inveniosoftware/invenio-scripts
|
9888acdb6b7cc408f4ac244d68fd897256273f15
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# Invenio-Cli is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio module to ease the creation and management of applications."""
#####
# IMPORTANT NOTE: If you are going to modify any code here.
# Check `docker-service-cli` since the original code belongs there,
# and any bug might have already been fixed there.
# The reason for the copy-paste was to simplify the complexity of the
# integration. Might be integrated in the future when `docker-services-cli`
# reaches a higher maturity level.
#####
import time
import click
from ..helpers.process import run_cmd
class ServicesHealthCommands(object):
"""Services status commands."""
@classmethod
def es_healthcheck(cls, *args, **kwargs):
"""Elasticsearch healthcheck."""
verbose = kwargs["verbose"]
return run_cmd(
["curl", "-f", "localhost:9200/_cluster/health?wait_for_status=yellow"]
)
@classmethod
def postgresql_healthcheck(cls, *args, **kwargs):
"""Postgresql healthcheck."""
filepath = kwargs["filepath"]
verbose = kwargs["verbose"]
return run_cmd(
[
"docker-compose",
"--file",
filepath,
"exec",
"-T",
"db",
"bash",
"-c",
"pg_isready",
]
)
@classmethod
def mysql_healthcheck(cls, *args, **kwargs):
"""Mysql healthcheck."""
filepath = kwargs["filepath"]
verbose = kwargs["verbose"]
password = kwargs["project_shortname"]
return run_cmd(
[
"docker-compose",
"--file",
filepath,
"exec",
"-T",
"db",
"bash",
"-c",
f'mysql -p{password} -e "select Version();"',
]
)
@classmethod
def redis_healthcheck(cls, *args, **kwargs):
"""Redis healthcheck."""
filepath = kwargs["filepath"]
verbose = kwargs["verbose"]
return run_cmd(
[
"docker-compose",
"--file",
filepath,
"exec",
"-T",
"cache",
"bash",
"-c",
"redis-cli ping",
"|",
"grep 'PONG'",
"&>/dev/null;",
]
)
@classmethod
def wait_for_services(
cls,
services,
project_shortname,
filepath="docker-services.yml",
max_retries=6,
verbose=False,
):
"""Wait for services to be up.
It performs configured healthchecks in a serial fashion, following the
order given in the ``up`` command. If the services is an empty list,
to be compliant with `docker-compose` it will perform the healthchecks
of all the services.
"""
if len(services) == 0:
services = HEALTHCHECKS.keys()
for service in services:
exp_backoff_time = 2
try_ = 1
check = HEALTHCHECKS[service]
ready = check(
filepath=filepath,
verbose=verbose,
project_shortname=project_shortname,
)
while not ready and try_ < max_retries:
click.secho(
f"{service} not ready at {try_} retries, waiting "
+ f"{exp_backoff_time}s",
fg="yellow",
)
try_ += 1
time.sleep(exp_backoff_time)
exp_backoff_time *= 2
ready = (
check(
filepath=filepath,
verbose=verbose,
project_shortname=project_shortname,
).status_code
== 0
)
if not ready:
click.secho(f"Unable to boot up {service}", fg="red")
exit(1)
else:
click.secho(f"{service} up and running!", fg="green")
HEALTHCHECKS = {
"es": ServicesHealthCommands.es_healthcheck,
"postgresql": ServicesHealthCommands.postgresql_healthcheck,
"mysql": ServicesHealthCommands.mysql_healthcheck,
"redis": ServicesHealthCommands.redis_healthcheck,
}
"""Health check functions module path, as string."""
| 28.703704
| 83
| 0.505591
|
a4fbe0483ee2aad02d5214cc7a72798046bde8c3
| 9,909
|
py
|
Python
|
src/zope/server/interfaces/__init__.py
|
cjwatson/zope.server
|
9c40c8a1ae57d28f1e0fa21e740826befefc30d5
|
[
"ZPL-2.1"
] | null | null | null |
src/zope/server/interfaces/__init__.py
|
cjwatson/zope.server
|
9c40c8a1ae57d28f1e0fa21e740826befefc30d5
|
[
"ZPL-2.1"
] | null | null | null |
src/zope/server/interfaces/__init__.py
|
cjwatson/zope.server
|
9c40c8a1ae57d28f1e0fa21e740826befefc30d5
|
[
"ZPL-2.1"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Server interfaces.
"""
from zope.interface import Interface
from zope.interface import Attribute
class ISocket(Interface):
"""Represents a socket.
Note: Most of this documentation is taken from the Python Library
Reference.
"""
def listen(backlog):
"""Listen for connections made to the socket.
The 'backlog' argument specifies the maximum number of queued
connections and should be at least 1; the maximum value is
system-dependent (usually 5).
"""
def bind(addr):
"""Bind the socket to address.
The socket must not already be bound.
"""
def connect(address):
"""Connect to a remote socket at address."""
def accept():
"""Accept a connection.
The socket must be bound to an address and listening for
connections. The return value is a pair (conn, address) where conn is
a new socket object usable to send and receive data on the connection,
and address is the address bound to the socket on the other end of the
connection.
"""
def recv(buffer_size):
"""Receive data from the socket.
The return value is a string representing the data received. The
maximum amount of data to be received at once is specified by
bufsize. See the Unix manual page recv(2) for the meaning of the
optional argument flags; it defaults to zero.
"""
def send(data):
"""Send data to the socket.
The socket must be connected to a remote socket. The optional flags
argument has the same meaning as for recv() above. Returns the number
of bytes sent. Applications are responsible for checking that all data
has been sent; if only some of the data was transmitted, the
application needs to attempt delivery of the remaining data.
"""
def close():
"""Close the socket.
All future operations on the socket object will fail. The remote end
will receive no more data (after queued data is flushed). Sockets are
automatically closed when they are garbage-collected.
"""
class ITaskDispatcher(Interface):
"""An object that accepts tasks and dispatches them to threads.
"""
def setThreadCount(count):
"""Sets the number of handler threads.
"""
def addTask(task):
"""Receives a task and dispatches it to a thread.
Note that, depending on load, a task may have to wait a
while for its turn.
"""
def shutdown(cancel_pending=True, timeout=5):
"""Shuts down all handler threads and may cancel pending tasks.
"""
def getPendingTasksEstimate():
"""Returns an estimate of the number of tasks waiting to be serviced.
This method may be useful for monitoring purposes. If the
number of pending tasks is continually climbing, your server
is becoming overloaded and the operator should be notified.
"""
class ITask(Interface):
"""
The interface expected of an object placed in the queue of
a ThreadedTaskDispatcher. Provides facilities for executing
or canceling the task.
"""
def service():
"""
Services the task. Either service() or cancel() is called
for every task queued.
"""
def cancel():
"""
Called instead of service() during shutdown or if an
exception occurs that prevents the task from being
serviced. Must return quickly and should not throw exceptions.
"""
def defer():
"""
Called just before the task is queued to be executed in
a different thread.
"""
class IDispatcherEventHandler(Interface):
"""The Dispatcher can receive several different types of events. This
interface describes the necessary methods that handle these common
event types.
"""
def handle_read_event():
"""Given a read event, a server has to handle the event and
read the input from the client.
"""
def handle_write_event():
"""Given a write event, a server has to handle the event and
write the output to the client.
"""
def handle_expt_event():
"""An exception event was handed to the server.
"""
def handle_error():
"""An error occurred, but we are still trying to fix it.
"""
def handle_expt():
"""Handle unhandled exceptions. This is usually a time to log.
"""
def handle_read():
"""Read output from client.
"""
def handle_write():
"""Write output via the socket to the client.
"""
def handle_connect():
"""A client requests a connection, now we need to do soemthing.
"""
def handle_accept():
"""A connection is accepted.
"""
def handle_close():
"""A connection is being closed.
"""
class IStreamConsumer(Interface):
"""Consumes a data stream until reaching a completion point.
The actual amount to be consumed might not be known ahead of time.
"""
def received(data):
"""Accepts data, returning the number of bytes consumed."""
completed = Attribute(
'completed', 'Set to a true value when finished consuming data.')
class IServer(Interface):
"""This interface describes the basic base server.
The most unusual part about the Zope servers (since they all
implement this interface or inherit its base class) is that it
uses a mix of asynchronous and thread-based mechanism to
serve. While the low-level socket listener uses async, the
actual request is executed in a thread. This is important
because even if a request takes a long time to process, the
server can service other requests simultaneously.
"""
channel_class = Attribute("""
The channel class defines the type of channel
to be used by the server. See IServerChannel
for more information.
""")
SERVER_IDENT = Attribute("""
This string identifies the server. By default
this is 'zope.server.' and should be
overridden.
""")
class IDispatcherLogging(Interface):
"""This interface provides methods through which the Dispatcher will
write its logs. A distinction is made between hit and message logging,
since they often go to different output types and can have very
different structure.
"""
def log (message):
"""Logs general requests made to the server.
"""
def log_info(message, type='info'):
"""Logs informational messages, warnings and errors.
"""
class IServerChannel(Interface):
parser_class = Attribute("""Subclasses must provide a parser class""")
task_class = Attribute("""Specifies the ITask class to be used for
generating tasks.""")
def queue_task(task):
"""Queues a channel-related task to be processed in sequence.
"""
class IDispatcher(ISocket, IDispatcherEventHandler, IDispatcherLogging):
"""The dispatcher is the most low-level component of a server.
1. It manages the socket connections and distributes the
request to the appropriate channel.
2. It handles the events passed to it, such as reading input,
writing output and handling errors. More about this
functionality can be found in IDispatcherEventHandler.
3. It handles logging of the requests passed to the server as
well as other informational messages and erros. Please see
IDispatcherLogging for more details.
Note: Most of this documentation is taken from the Python
Library Reference.
"""
def add_channel(map=None):
"""After the low-level socket connection negotiation is
completed, a channel is created that handles all requests
and responses until the end of the connection.
"""
def del_channel(map=None):
"""Delete a channel. This should include also closing the
socket to the client.
"""
def create_socket(family, type):
"""This is identical to the creation of a normal socket, and
will use the same options for creation. Refer to the socket
documentation for information on creating sockets.
"""
def readable():
"""Each time through the select() loop, the set of sockets is
scanned, and this method is called to see if there is any
interest in reading. The default method simply returns 1,
indicating that by default, all channels will be
interested.
"""
def writable():
"""Each time through the select() loop, the set of sockets is
scanned, and this method is called to see if there is any
interest in writing. The default method simply returns 1,
indicating that by default, all channels will be
interested.
"""
| 33.140468
| 78
| 0.628419
|
6ecfca1eacb1a57fbc200d2e3b544c6d3e873bc9
| 2,236
|
py
|
Python
|
doubling_agent/image_steve_functions.py
|
lkmartin90/doubling_agent
|
73a7f06aa43c5fa51ea1263b72ebe6f8319bf894
|
[
"MIT"
] | 1
|
2020-12-03T15:47:24.000Z
|
2020-12-03T15:47:24.000Z
|
doubling_agent/image_steve_functions.py
|
lkmartin90/doubling_agent
|
73a7f06aa43c5fa51ea1263b72ebe6f8319bf894
|
[
"MIT"
] | null | null | null |
doubling_agent/image_steve_functions.py
|
lkmartin90/doubling_agent
|
73a7f06aa43c5fa51ea1263b72ebe6f8319bf894
|
[
"MIT"
] | null | null | null |
from doubling_agent.image_analysis_functions import *
plt.style.use('ggplot')
def plot_cells_cont(data_df, value, folder_name, r, time_step):
# basic function to plot the cells at a given time snapshot, when cell states are continuous
df_to_plot = data_df.loc[data_df['count'] == value]
df_to_plot.plot.scatter(x='x', y='y', c='state', s=8, cmap='viridis', vmin=0, vmax=100)
plt.savefig(folder_name + '/repeat_' + str(r) + '_day_' +
str(int(value*time_step)) + '_cont.png')
plt.cla()
plt.close('all')
def plot_cells_3d_cont(data_df, value, folder_name, r, time_step):
# basic function to plot the cells at a given time snapshot, when the cell states are continuous
df_to_plot_3d = data_df.loc[data_df['count'] == value]
fig = plt.figure()
threedee = fig.gca(projection='3d')
#print(df_to_plot.state.values)
p = threedee.scatter(df_to_plot_3d.x.values, df_to_plot_3d.y.values, df_to_plot_3d.z.values, c=df_to_plot_3d.state,
cmap='viridis', vmin=0, vmax=100)
threedee.set_xlabel('x')
threedee.set_ylabel('y')
threedee.set_zlabel('z')
fig.colorbar(p)
plt.savefig(folder_name + '/repeat_' + str(r) + '_day_' +
str(int(value*time_step)) + '_cont.png')
plt.cla()
plt.close('all')
def plot_2d_slice_cont(folder_name, data_df, value, time_step, r):
# plot 2d slices of the data when the data is continuous
df_2d_slice = data_df.loc[data_df['count'] == value].copy()
# will take slices in x to get 2d analysis
x_values = df_2d_slice['z'].values
unique, counts = np.unique(x_values, return_counts=True)
tot_dict = dict(zip(unique, counts))
for sect in unique:
# if there are more than 10 cells at this value of z, then plot
if tot_dict.get(sect) > 10:
print(sect)
df_for_image = df_2d_slice.loc[(data_df['z'] == sect)].copy()
plt.figure()
df_for_image.plot.scatter(x='x', y='y', c='state', s=8, cmap='viridis', vmin=0, vmax=100)
plt.savefig(folder_name + '/repeat_' + str(r) + '_day_' +
str(int(value * time_step)) + '_sect_' + str(sect) + '_cont.png')
plt.close('all')
| 43.843137
| 119
| 0.638193
|
f4e364013098c428465cf0707f1cd83085c433d8
| 7,120
|
py
|
Python
|
tests/test_distributed.py
|
terrorizer1980/ParlAI
|
f8fda24bd11804104b0a91aa84e170d3efbd8983
|
[
"MIT"
] | 2
|
2020-08-27T05:21:14.000Z
|
2020-09-29T14:34:09.000Z
|
tests/test_distributed.py
|
terrorizer1980/ParlAI
|
f8fda24bd11804104b0a91aa84e170d3efbd8983
|
[
"MIT"
] | 316
|
2021-03-19T14:53:31.000Z
|
2022-03-27T03:36:51.000Z
|
tests/test_distributed.py
|
terrorizer1980/ParlAI
|
f8fda24bd11804104b0a91aa84e170d3efbd8983
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import copy
import unittest
import torch.distributed as dist
import parlai.utils.testing as testing_utils
import parlai.scripts.build_dict as build_dict
import parlai.scripts.multiprocessing_train as mp_train
def _forced_parse(parser, opt):
parser.set_params(**opt)
parser.set_params(log_every_n_sec=10)
popt = parser.parse_args([])
# in some rare cases, like for instance if the model class also
# overrides its default params, the params override will not
# be taken into account.
for k, v in opt.items():
popt[k] = v
return popt
@testing_utils.skipUnlessGPU
class TestDistributed(unittest.TestCase):
_base_config = dict(
task='integration_tests:nocandidate',
model='transformer/generator',
optimizer='adamax',
validation_metric='ppl',
learningrate=7e-3,
batchsize=7,
validation_every_n_epochs=5,
num_epochs=20,
n_layers=1,
n_heads=1,
ffn_size=32,
embedding_size=32,
beam_size=1,
verbose=True,
)
def setUp(self):
print(f'[Setting up test {self._testMethodName}]')
def tearDown(self):
# we need to de-initialize the distributed world, otherwise other
# tests will they're we're distributed when we're really not.
dist.destroy_process_group()
def _distributed_train_model(self, opt):
with testing_utils.tempdir() as tmpdir:
if 'model_file' not in opt:
opt['model_file'] = os.path.join(tmpdir, 'model')
if 'dict_file' not in opt:
opt['dict_file'] = os.path.join(tmpdir, 'model.dict')
parser = mp_train.setup_args()
popt = _forced_parse(parser, opt)
# we need a prebuilt dictionary
parser = build_dict.setup_args()
build_dict.build_dict(popt)
valid, test = mp_train.launch_and_train(popt, 31337)
return (valid, test)
def test_generator_distributed(self):
valid, test = self._distributed_train_model(self._base_config)
self.assertLessEqual(valid['ppl'], 1.20)
self.assertGreaterEqual(valid['bleu-4'], 0.95)
self.assertLessEqual(test['ppl'], 1.20)
self.assertGreaterEqual(test['bleu-4'], 0.95)
# Tests that DialogData.get() is doing the right thing
# Ensure no duplication of examples among workers
# It would be 200 if each worker did all the examples
self.assertEqual(valid['exs'].value(), 100)
self.assertEqual(test['exs'].value(), 100)
def test_multitask_distributed(self):
config = copy.deepcopy(self._base_config)
config['task'] = 'integration_tests:nocandidate,integration_tests:multiturn'
config['dynb'] = 'full'
config['skip_generation'] = 'true'
valid, test = self._distributed_train_model(config)
self.assertLessEqual(valid['ppl'], 1.20)
self.assertLessEqual(test['ppl'], 1.20)
# Tests that DialogData.get() is doing the right thing
# Ensure no duplication of examples among workers
# It would be 200 if each worker did all the examples
self.assertEqual(valid['exs'].value(), 500)
self.assertEqual(test['exs'].value(), 500)
def test_distributed_eval_max_exs(self):
config = copy.deepcopy(self._base_config)
config['validation_max_exs'] = 90
config['short_final_eval'] = True
valid, test = self._distributed_train_model(config)
# Tests that DialogData.get() is doing the right thing
# Ensure no duplication of examples among workers
# It would be 200 if each worker did all the examples
# Note: we decided that it was OK for the total count to be slightly off
# when using validation_max_exs and distributed.
# It's > 90 b/c there are two workers, told to do 45 each, & BatchWorld
# parley() does batchsize examples each time, so each worker will do 49
# example. In the future, if we fix VME, this assert should be changed
# to exactly 90.
self.assertEqual(valid['exs'].value(), 98)
self.assertEqual(test['exs'].value(), 98)
def test_distributed_eval_stream_mode(self):
config = copy.deepcopy(self._base_config)
config['datatype'] = 'train:stream'
valid, test = self._distributed_train_model(config)
# Tests that StreamDialogData.get() is doing the right thing
# Ensure no duplication of examples among workers
# It would be 200 if each worker did all the examples
self.assertEqual(valid['exs'].value(), 100)
self.assertEqual(test['exs'].value(), 100)
def test_distributed_eval_stream_mode_max_exs(self):
config = copy.deepcopy(self._base_config)
config['datatype'] = 'train:stream'
config['validation_max_exs'] = 90
config['short_final_eval'] = True
valid, test = self._distributed_train_model(config)
# Tests that StreamDialogData.get() is doing the right thing
# Ensure no duplication of examples among workers
# It would be 200 if each worker did all the examples
# As in the test above:
# It does 98 instead of 90 b/c there are two workers, told to do 45
# each, and BatchWorld parley() does batchsize examples each time, so
# each worker will do 49 examples.
# In the future, if we fix VME, this assert should be changed to
# exactly 90.
self.assertEqual(valid['exs'].value(), 98)
self.assertEqual(test['exs'].value(), 98)
def test_chunked_dynamic_teacher(self):
config = copy.deepcopy(self._base_config)
config['datatype'] = 'train:stream'
config['dynamic_batching'] = 'full'
config['truncate'] = 16
valid, test = self._distributed_train_model(config)
assert valid['exs'].value() == 100
assert test['exs'].value() == 100
def test_chunked_teacher(self):
config = copy.deepcopy(self._base_config)
config['datatype'] = 'train:stream'
config['num_epochs'] = 5
config['dynamic_batching'] = None
valid, test = self._distributed_train_model(config)
assert valid['exs'].value() == 100
assert test['exs'].value() == 100
def test_no_model_parallel(self):
"""
Checks that we throw an error when combining mp_train with.
--model-parallel true.
"""
config = copy.deepcopy(self._base_config)
config['model_parallel'] = True
for m in [
'transformer/generator',
'transformer/ranker',
'transformer/classifier',
]:
config['model'] = m
with self.assertRaises(RuntimeError):
_ = self._distributed_train_model(config)
if __name__ == '__main__':
unittest.main()
| 37.083333
| 84
| 0.644101
|
d225df31647e36690f8b22fcdb6b2a1e42d7d230
| 1,511
|
py
|
Python
|
src/tests/ModAdderTester.py
|
datenlord/poseidon-spinalhdl
|
1e4ce3f0ae06d6c264e103629dd28a75c119a548
|
[
"MIT"
] | null | null | null |
src/tests/ModAdderTester.py
|
datenlord/poseidon-spinalhdl
|
1e4ce3f0ae06d6c264e103629dd28a75c119a548
|
[
"MIT"
] | null | null | null |
src/tests/ModAdderTester.py
|
datenlord/poseidon-spinalhdl
|
1e4ce3f0ae06d6c264e103629dd28a75c119a548
|
[
"MIT"
] | 1
|
2021-12-18T08:44:10.000Z
|
2021-12-18T08:44:10.000Z
|
import cocotb
from poseidon_python import basic
from cocotb.triggers import Timer
import random
from cocotb_test import simulator
@cocotb.test()
async def ModAdderTest(dut):
"""test ModAdder"""
for i in range(100):
res = random.randint(1, basic.P - 1)
op1 = random.randint(0, res)
op2 = res - op1
dut.op1_i.value = op1
dut.op2_i.value = op2
await Timer(5, units="ns")
assert dut.res_o == res % basic.P, "the result of {} + {} is wrong".format(
op1, op2
)
for i in range(100):
res = random.randint(basic.P, pow(2, 255) - 1)
op1 = random.randint(0, res)
op2 = res - op1
dut.op1_i.value = op1
dut.op2_i.value = op2
await Timer(5, units="ns")
assert dut.res_o == res % basic.P, "the result of {} + {} is wrong".format(
op1, op2
)
for i in range(100):
res = random.randint(pow(2, 255), (2 * basic.P - 2))
op1 = random.randint((res - pow(2, 255) + 1), (pow(2, 255) - 1))
op2 = res - op1
dut.op1_i.value = op1
dut.op2_i.value = op2
await Timer(5, units="ns")
assert dut.res_o == res % basic.P, "the result of {} + {} is wrong".format(
op1, op2
)
# pytest
def test_ModAdder():
simulator.run(
verilog_sources=["../main/verilog/ModAdder.v"],
toplevel="ModAdder",
module="ModAdderTester",
python_search="./src/reference_model/",
)
| 27.981481
| 83
| 0.550629
|
2d364e3ea2c692adda1c81af78c53b57f9dcdeb4
| 19,903
|
py
|
Python
|
cogs/gaems.py
|
Code-Cecilia/botman-rewrite
|
9d8baeebf267c62df975d2f209e85589b81934af
|
[
"MIT"
] | 2
|
2022-02-21T14:10:15.000Z
|
2022-02-21T14:10:50.000Z
|
cogs/gaems.py
|
Code-Cecilia/botman-rewrite
|
9d8baeebf267c62df975d2f209e85589b81934af
|
[
"MIT"
] | null | null | null |
cogs/gaems.py
|
Code-Cecilia/botman-rewrite
|
9d8baeebf267c62df975d2f209e85589b81934af
|
[
"MIT"
] | null | null | null |
import asyncio
import random
import string
import discord
from discord.ext import commands
from assets import internet_funcs, discord_funcs
from assets import tictactoe_assets
from assets.discord_funcs import get_avatar_url
from assets.tictactoe_assets import TicTacToe
number_list = string.digits
def is_author_check(ctx):
return lambda message: message.channel == ctx.message.channel and message.author == ctx.author
def not_author_check(ctx):
return lambda message: message.channel == ctx.message.channel and message.author != ctx.author
def is_member_check(ctx, member):
return lambda message: message.channel == ctx.message.channel and message.author == member
async def ttt_send_embed(ctx, board, title, color):
embed = discord.Embed(title=title, color=color)
embed.description = f"```\n{board}\n```"
return await ctx.send(embed=embed)
class Gaems(commands.Cog, description="A collection of gaems. Play gaem, life good."):
def __init__(self, bot):
self.bot = bot
self.timeout = 20
self.madlibsApi = "https://madlibz.herokuapp.com/api/random?minlength=5&maxlength=15"
self.vowels = ["a", "e", "i", "o", "u"]
self.playing = []
@commands.command(name="setgameschannel", aliases=["gameschannel", "setgaemschannel", "gaemschannel"],
description="Sets the channels for playing Gaems.")
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def set_games_channel(self, ctx, channel: discord.TextChannel):
"""Set the channel for Games. Restrict usage of the game commands to the channel you set."""
message = await ctx.send(f"Setting Gaems channel as {channel.mention}....")
self.bot.dbmanager.set_games_channel(ctx.guild.id, channel.id)
await message.edit(content=f"Set Gaems channel as {channel.mention} successfully!")
@commands.command(name="guessthenumber", aliases=["luckychoice", "numberfinder"],
description="Play games, have fun. It's a simple life.")
async def guess_the_number(self, ctx):
await ctx.trigger_typing()
number = random.choice(number_list)
channel = self.bot.dbmanager.get_games_channel(ctx.guild.id)
if not channel:
pass
else:
channel = self.bot.get_channel(channel)
if channel and not channel == ctx.message.channel:
# that channel needs to exist, and should not be the current channel
return await ctx.send(f"You can only play Guess The Number in {channel.mention}")
if ctx.author.id in self.playing:
return await ctx.send("You're already have a game in progress!")
else:
self.playing.append(ctx.author.id)
# checks passed, let's play
await ctx.send(f"Welcome to **Guess The Number**, _{ctx.author.display_name}_!\n"
f"The rules are simple.\n"
f"I will think of a number from 1 to 10, and you have to find it within 3 tries.\n"
f"The game starts in 3 seconds.")
await asyncio.sleep(3)
await ctx.send("**Go!**")
n = 3
win = False
while n > 0:
try:
user_input = await self.bot.wait_for("message", timeout=self.timeout,
check=lambda message: message.author == ctx.author)
except asyncio.exceptions.TimeoutError:
if ctx.author.id in self.playing:
self.playing.remove(ctx.author.id)
return await ctx.send(f"_{ctx.author.display_name}_, I'm done waiting. We'll play again later.")
if user_input.content not in number_list:
await ctx.send(f"_{ctx.author.display_name}_, Not a valid guess! "
f"You need to choose a number from 1 to 10.")
break
if str(user_input.content) == number:
await user_input.add_reaction("🎉".strip())
await ctx.send(f"You won!, _{ctx.author.display_name}_!")
win = True
break
n -= 1
await ctx.send(f"_{ctx.author.display_name}_, Wrong! You have **{n}** {'tries' if n != 1 else 'try'} left.")
if not win:
await ctx.reply(f"The correct answer is {number}.")
if ctx.author.id in self.playing:
self.playing.remove(ctx.author.id)
await ctx.send(f"Thanks for playing **Guess The Number**!")
@commands.command(name="trivia", aliases=["quiz"], description="The bot asks a question, you answer. Simple.")
async def trivia(self, ctx):
await ctx.trigger_typing()
channel = self.bot.dbmanager.get_games_channel(ctx.guild.id)
if not channel:
pass
else:
channel = self.bot.get_channel(channel)
if channel and not channel == ctx.message.channel:
# that channel needs to exist, and should not be the current channel
return await ctx.send(f"You can only play Trivia in {channel.mention}")
if ctx.author.id in self.playing:
return await ctx.send("You're already have a game in progress!")
else:
self.playing.append(ctx.author.id)
# checks passed, let's play
response = await internet_funcs.get_json("https://opentdb.com/api.php?amount=1")
# TODO: use buttons for selecting answers, when buttons are a thing in pycord
if not response.get("response_code") == 0:
return
results = response.get("results")[0]
category = results.get("category").replace(
""", "\"").replace("'", "'")
difficulty = results.get("difficulty").replace(
""", "\"").replace("'", "'")
question = results.get("question").replace(
""", "\"").replace("'", "'")
correctans = results.get("correct_answer").replace(
""", "\"").replace("'", "'")
wrong_ans_list = results.get("incorrect_answers")
answers = wrong_ans_list
answers.append(correctans)
random.shuffle(answers)
correctans_index = list(answers).index(correctans) + 1
message_to_edit = await ctx.send("The rules are simple. I will ask you a question, you choose the answer.\n"
"If there are 4 options in the answer, "
"you can enter `1`, `2`, `3`, or `4`.\n"
"The game starts in 5 seconds.")
await asyncio.sleep(5)
await message_to_edit.edit(content=f"_{ctx.author.display_name}_, go!")
embed = discord.Embed(title=f"Category: {category}\nDifficulty: {difficulty}",
color=discord_funcs.get_color(ctx.author))
embed.add_field(name=question, value="\ufeff", inline=False)
option_string = ""
for x in answers:
option_str = x.replace(""", "\"").replace("'", "'")
option_string += f"`{answers.index(x) + 1}.` {option_str}\n"
embed.add_field(name="Options", value=option_string, inline=True)
embed.set_footer(
text=f"{ctx.author.display_name}, pick the answer! You have {self.timeout} seconds.")
await ctx.send(embed=embed)
try:
message_from_user = await self.bot.wait_for("message", check=lambda message: message.author == ctx.author,
timeout=self.timeout)
except asyncio.TimeoutError:
return await ctx.send(f"_{ctx.author.display_name}_, I'm done waiting. We'll play again later.\n"
f"The answer was **{correctans}**")
try:
content = int(message_from_user.content)
except ValueError:
if ctx.author.id in self.playing:
self.playing.remove(ctx.author.id)
return await ctx.send(f"_{ctx.author.display_name}_ , wrong format!\n"
"You can only answer with the Index of the option you think is correct.\n"
"We'll play later.")
if content == correctans_index:
await message_from_user.add_reaction("🎉")
await message_from_user.reply("You won!")
else:
await message_from_user.add_reaction("❌")
await message_from_user.reply(f"_{ctx.author.display_name}_. that was not the correct answer.\n"
f"The correct answer was **{correctans}**.")
if ctx.author.id in self.playing:
self.playing.remove(ctx.author.id)
await ctx.send(f"Thanks for playing **Trivia**, _{ctx.author.display_name}_!")
@commands.command(name="madlibs", aliases=["ml"], description="Let's play MadLibs!")
async def play_madlibs(self, ctx):
await ctx.trigger_typing()
channel_id = self.bot.dbmanager.get_games_channel(ctx.guild.id)
if channel_id:
channel = self.bot.get_channel(int(channel_id))
if not channel == ctx.message.channel:
return await ctx.send(f"You can only play MadLibs in {channel.mention}.")
if ctx.author.id in self.playing:
return await ctx.send("You already have a game in progress!")
else:
self.playing.append(ctx.author.id)
# checks passed, let's play!
async with ctx.typing():
madlibs_dict = await internet_funcs.get_json(self.madlibsApi)
title = madlibs_dict.get("title")
blanks = madlibs_dict.get("blanks")
value = madlibs_dict.get("value")[:-1]
user_results = []
for x in range(len(blanks)): # get the input from the user for each entry in the blanks list
await ctx.send(f"**{x + 1}/{len(blanks)}** - "
f"_{ctx.author.display_name}_, I need "
f"{'an' if blanks[x][0].lower() in self.vowels else 'a'} " # vowels
f"{blanks[x]}")
try:
user_input_message = await self.bot.wait_for(
"message", check=is_author_check(ctx), timeout=20)
except asyncio.TimeoutError:
if ctx.author.id in self.playing:
self.playing.remove(ctx.author.id)
return await ctx.send("I'm done waiting. We'll play again later.")
user_results.append(f"**{user_input_message.content}**") # append results to another dict
string = ""
for x in range(len(user_results)):
string += value[x] # adding the values to the final string
string += user_results[x]
string += value[-1] # adding the final value tha twas missed in the for loop
embed = discord.Embed(title=title, description=string, colour=discord_funcs.get_color(ctx.author))
embed.set_footer(text=f"Good job, {ctx.author.display_name}!", icon_url=get_avatar_url(ctx.author))
if ctx.author.id in self.playing:
self.playing.remove(ctx.author.id)
await ctx.send(embed=embed)
@commands.command(name="tictactoe", aliases=["ttt"])
async def tictactoe(self, ctx):
single_player = False
player_2 = None
await ctx.trigger_typing()
channel_id = self.bot.dbmanager.get_games_channel(ctx.guild.id)
if channel_id:
channel = self.bot.get_channel(int(channel_id))
if not channel == ctx.message.channel:
return await ctx.send(f"You can only play TicTacToe in {channel.mention}.")
if ctx.author.id in self.playing:
return await ctx.send("You already have a game in progress!")
else:
self.playing.append(ctx.author.id)
# checks passed, let's play!
await ctx.send(f"_{ctx.author.display_name}_, welcome to TicTacToe!\n"
f"You will be required to place your move first. To place a move, type in the coordinates using the format below.\n"
f"Format: `x,y` where x and y are the number of row and column.\n"
f"Example: `1,3`, `2,2`, `3,1`, where `2,2` corresponds to the middle square.\n\n"
f"Type `Start` to start the game.")
try:
start_message = await self.bot.wait_for("message", check=is_author_check(ctx), timeout=20)
except asyncio.TimeoutError:
self.playing.remove(ctx.author.id)
return await ctx.send("I'm done waiting. We'll play again later.")
if not start_message.content.lower() == "start":
self.playing.remove(ctx.author.id)
# the game is over before it even started :(
return await ctx.send("Alright then, we'll play later.")
await ctx.send(
f"_{ctx.author.display_name}_, Press `1` for Solos with me, and `2` to play with another member.")
try:
mode_message = await self.bot.wait_for("message", check=is_author_check(ctx), timeout=20)
except asyncio.TimeoutError:
self.playing.remove(ctx.author.id)
return await ctx.send("I'm done waiting. We'll play again later.")
if mode_message.content.lower() not in ["1", "2"]:
await ctx.send("Invalid input. Defaulting to SinglePlayer...")
single_player = True
if mode_message.content.lower() == "1":
single_player = True
elif mode_message.content.lower() == "2":
single_player = False
if single_player:
# We ask the player for difficulty settings, only if its singleplayer
difficulty = await tictactoe_assets.ask_for_difficulty(ctx, ctx.author)
tictactoe = TicTacToe(board_size=3, mode=difficulty)
else:
# its multiplayer, so we get the second player
await ctx.send(f"Player 2, please type `Me` in this channel to play with _{ctx.author.display_name}_.")
for i in range(5): # read 5 messages
try:
player_2_message = await self.bot.wait_for("message", check=not_author_check(ctx), timeout=20)
except asyncio.TimeoutError:
self.playing.remove(ctx.author.id)
return await ctx.send("I'm done waiting. We'll play again later.")
if player_2_message.content.lower() == "me":
player_2 = player_2_message.author
if player_2.id in self.playing:
self.playing.remove(ctx.author.id)
return await ctx.send(f"_{player_2.mention}_ already has a game in progress! "
f"Please try again later. Exiting...")
break
else:
await ctx.send("I didn't get a valid response from you. Defaulting to SinglePlayer...")
single_player = True
# multiplayer, so we don't care what the difficulty is - because the algo isn't used
tictactoe = TicTacToe(board_size=3)
# game loop
while True:
game_state = tictactoe.check_game_over_multi()
board = tictactoe.print_board()
if not single_player:
# if player 1 won
if game_state[1] == 1:
await tictactoe_assets.send_embeds(ctx, state=game_state[0], player=ctx.author, board=board)
break
# if player 2 won
elif game_state[1] == 2:
await tictactoe_assets.send_embeds(ctx, state=game_state[0], player=player_2, board=board)
break
elif type(game_state[1]) == int and game_state[1] == 0:
# if it's a draw
await tictactoe_assets.send_embeds(ctx, state=game_state[0], board=board)
break
else:
# if user won
if game_state[1] == 1:
await tictactoe_assets.send_embeds(ctx, state="You win!", board=board)
break
# if the bot won
elif game_state[1] == 2:
await tictactoe_assets.send_embeds(ctx, state="You lose :(", board=board)
break
elif type(game_state[1]) == int and game_state[1] == 0:
# if it's a draw
await tictactoe_assets.send_embeds(ctx, state="It's a draw!", board=board)
break
# if it's not a draw, and the game is not over, we ask for the next move
if single_player:
# we ask player for their move
await tictactoe_assets.send_embeds(ctx, state="Your turn!", player=ctx.author, board=board)
for i in range(5): # read 5 messages
coords = await tictactoe_assets.ask_for_input_coords(ctx, ctx.author, tictactoe)
if coords:
break
else:
return await ctx.send(f"I didn't get a valid response from you. "
f"Therefore, you lose :(")
tictactoe.place_piece(tictactoe.player1_turn, coords[0], coords[1])
if tictactoe.check_game_over_single():
continue # if the game is over, we don't ask for the next move
tictactoe.calculate_bot_move(auto_place=True)
else:
# we ask player 1 for their move
await tictactoe_assets.send_embeds(ctx, state="Your turn!", player=ctx.author, board=board)
for i in range(5): # read 5 messages
coords = await tictactoe_assets.ask_for_input_coords(ctx, ctx.author, tictactoe)
if coords:
break
else:
return await ctx.send(f"I didn't get a valid response from you. "
f"Therefore, {player_2.display_name} wins!")
tictactoe.place_piece(tictactoe.player1_turn, coords[0], coords[1])
if tictactoe.check_game_over_single():
continue # if the game is over, we don't ask for the next move
# we ask player 2 for their move
await tictactoe_assets.send_embeds(ctx, state="Your turn!", player=player_2, board=board)
for i in range(5): # read 5 messages
coords = await tictactoe_assets.ask_for_input_coords(ctx, player_2, tictactoe)
if coords:
break
else:
return await ctx.send(f"I didn't get a valid response from you. "
f"Therefore, {ctx.author.display_name} wins!")
tictactoe.place_piece(user=tictactoe.player2_turn, row=coords[0], column=coords[1])
if tictactoe.check_game_over_single():
continue # if the game is over, we don't ask for the next move
if single_player:
if ctx.author.id in self.playing:
self.playing.remove(ctx.author.id)
await ctx.send(f"Thanks for playing TicTacToe, _{ctx.author.display_name}_!")
else:
if ctx.author.id in self.playing:
self.playing.remove(ctx.author.id)
if player_2.id in self.playing:
self.playing.remove(player_2.id)
await ctx.send(f"Thanks for playing TicTacToe, _{ctx.author.display_name}_ and _{player_2.display_name}_!")
def setup(bot):
bot.add_cog(Gaems(bot))
| 49.633416
| 139
| 0.579209
|
f719d3c75df148a6ceda79acf57bc0e57342d5f3
| 4,311
|
py
|
Python
|
src/test.py
|
alexey-kaz/python-project
|
661fe06e09846cd1c3c6d600973a6e3433096c1d
|
[
"MIT"
] | null | null | null |
src/test.py
|
alexey-kaz/python-project
|
661fe06e09846cd1c3c6d600973a6e3433096c1d
|
[
"MIT"
] | null | null | null |
src/test.py
|
alexey-kaz/python-project
|
661fe06e09846cd1c3c6d600973a6e3433096c1d
|
[
"MIT"
] | 3
|
2021-04-25T06:37:26.000Z
|
2021-06-03T19:19:19.000Z
|
"""Тест."""
import unittest
from recipes import form_answer
from database import delete_table_data
from parsing import NEWS, AFISHA, HOROSCOPE, WEATHER
class TestBot(unittest.TestCase):
"""Тест."""
def test_form_answer(self):
"""Тест."""
rec1 = {"name": "Булочки с изюмом",
"ingrs": ["Мука", "Яйцо куриное", "Изюм"],
"link": "http://recipe"}
ans1 = '<b>Булочки с изюмом</b>\nИнгредиенты:\n 1) Мука\n' + \
'2) Яйцо куриное\n3) Изюм\n\n<a href="http://recipe">Булочки с изюмом</a>'
self.assertEqual(form_answer(rec1), ans1)
rec2 = {"name": "Омлет",
"ingrs": ["Яйцо куриное", "Соль", "Молоко"],
"link": "http://recipe"}
ans2 = '<b>Омлет</b>\nИнгредиенты:\n 1) Яйцо куриное\n2) Соль\n' + \
'3) Молоко\n\n<a href="http://recipe">Омлет</a>'
self.assertEqual(form_answer(rec2), ans2)
with self.assertRaises(KeyError):
form_answer(dict())
# def test_empty_delete_reminders(self):
# self.assertEqual(delete_table_data("reminders"), 0)
def test_parsing_horoscope(self):
"""Тест."""
obj = HOROSCOPE()
self.assertEqual(obj.url, "https://1001goroskop.ru")
def test_parsing_horoscope_1(self):
"""Тест."""
obj = HOROSCOPE()
self.assertEqual(type(obj.get_signs()), type([1, 2]))
def test_parsing_news(self):
"""Тест."""
obj = NEWS()
self.assertEqual(obj.count, None)
def test_parsing_news_1(self):
"""Тест."""
obj = NEWS()
obj.count = 5
obj.make_zero()
self.assertEqual(obj.count, 0)
def test_parsing_news_2(self):
"""Тест."""
obj = NEWS()
self.assertEqual(obj.url, "https://lenta.ru/parts/news")
def test_parsing_news_3(self):
"""Тест."""
obj = NEWS()
self.assertEqual(obj.url_part, "https://lenta.ru")
def test_parsing_news_4(self):
"""Тест."""
obj = NEWS()
self.assertEqual(type(obj.parse()), type([1, 2]))
def test_parsing_weather(self):
"""Тест."""
obj = WEATHER()
self.assertEqual(type(obj.extra_data), type({}))
def test_parsing_weather_1(self):
"""Тест."""
obj = WEATHER()
self.assertEqual(obj.url, "https://www.gismeteo.ru")
def test_parsing_weather_2(self):
"""Тест."""
obj = WEATHER()
self.assertEqual(type(obj.main_data), type({}))
def test_parsing_afisha(self):
"""Тест."""
obj = AFISHA()
self.assertEqual(obj.cinema_count, None)
def test_parsing_afisha_1(self):
"""Тест."""
obj = AFISHA()
obj.cinema_count = 1
obj.make_zero()
self.assertEqual(obj.cinema_count, 0)
def test_parsing_afisha_2(self):
"""Тест."""
obj = AFISHA()
obj.theatre_count = 2
obj.make_zero()
self.assertEqual(obj.theatre_count, 0)
def test_parsing_afisha_3(self):
"""Тест."""
obj = AFISHA()
obj.concert_count = 3
obj.make_zero()
self.assertEqual(obj.concert_count, 0)
def test_parsing_afisha_4(self):
"""Тест."""
obj = AFISHA()
obj.cinema_count = 1
obj.theatre_count = 2
obj.make_zero()
self.assertEqual(obj.cinema_count, 0)
self.assertEqual(obj.theatre_count, 0)
def test_parsing_afisha_5(self):
"""Тест."""
obj = AFISHA()
obj.cinema_count = 1
obj.concert_count = 3
obj.make_zero()
self.assertEqual(obj.cinema_count, 0)
self.assertEqual(obj.concert_count, 0)
def test_parsing_afisha_6(self):
"""Тест."""
obj = AFISHA()
obj.theatre_count = 2
obj.concert_count = 3
obj.make_zero()
self.assertEqual(obj.theatre_count, 0)
self.assertEqual(obj.concert_count, 0)
def test_parsing_afisha_total(self):
"""Тест."""
obj = AFISHA()
obj.cinema_count = 1
obj.theatre_count = 2
obj.concert_count = 3
obj.make_zero()
self.assertEqual(obj.cinema_count, 0)
self.assertEqual(obj.theatre_count, 0)
self.assertEqual(obj.concert_count, 0)
| 29.527397
| 89
| 0.567618
|
711ef7d9be3902366dd543139260c0eb059f0996
| 6,132
|
py
|
Python
|
src/sage/homology/homology_group.py
|
bopopescu/classic_diff_geom
|
2b1d88becbc8cb30962e0995cc78e429e0f5589f
|
[
"BSL-1.0"
] | 5
|
2015-01-04T07:15:06.000Z
|
2022-03-04T15:15:18.000Z
|
src/sage/homology/homology_group.py
|
bopopescu/classic_diff_geom
|
2b1d88becbc8cb30962e0995cc78e429e0f5589f
|
[
"BSL-1.0"
] | null | null | null |
src/sage/homology/homology_group.py
|
bopopescu/classic_diff_geom
|
2b1d88becbc8cb30962e0995cc78e429e0f5589f
|
[
"BSL-1.0"
] | 10
|
2016-09-28T13:12:40.000Z
|
2022-02-12T09:28:34.000Z
|
"""
Homology Groups
This module defines a :meth:`HomologyGroup` class which is an abelian
group that prints itself in a way that is suitable for homology
groups.
"""
########################################################################
# Copyright (C) 2013 John H. Palmieri <palmieri@math.washington.edu>
# Volker Braun <vbraun.name@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# http://www.gnu.org/licenses/
########################################################################
from sage.modules.free_module import VectorSpace
from sage.groups.additive_abelian.additive_abelian_group import AdditiveAbelianGroup_fixed_gens
from sage.rings.integer_ring import ZZ
class HomologyGroup_class(AdditiveAbelianGroup_fixed_gens):
"""
Discrete Abelian group on `n` generators. This class inherits from
:class:`~sage.groups.additive_abelian.additive_abelian_group.AdditiveAbelianGroup_fixed_gens`;
see :mod:`sage.groups.additive_abelian.additive_abelian_group` for more
documentation. The main difference between the classes is in the print
representation.
EXAMPLES::
sage: from sage.homology.homology_group import HomologyGroup
sage: G = AbelianGroup(5, [5,5,7,8,9]); G
Multiplicative Abelian group isomorphic to C5 x C5 x C7 x C8 x C9
sage: H = HomologyGroup(5, ZZ, [5,5,7,8,9]); H
C5 x C5 x C7 x C8 x C9
sage: G == loads(dumps(G))
True
sage: AbelianGroup(4)
Multiplicative Abelian group isomorphic to Z x Z x Z x Z
sage: HomologyGroup(4, ZZ)
Z x Z x Z x Z
sage: HomologyGroup(100, ZZ)
Z^100
"""
def __init__(self, n, invfac):
"""
See :func:`HomologyGroup` for full documentation.
EXAMPLES::
sage: from sage.homology.homology_group import HomologyGroup
sage: H = HomologyGroup(5, ZZ, [5,5,7,8,9]); H
C5 x C5 x C7 x C8 x C9
"""
n = len(invfac)
A = ZZ**n
B = A.span([A.gen(i) * invfac[i] for i in xrange(n)])
AdditiveAbelianGroup_fixed_gens.__init__(self, A, B, A.gens())
self._original_invts = invfac
def _repr_(self):
"""
Print representation of ``self``.
EXAMPLES::
sage: from sage.homology.homology_group import HomologyGroup
sage: H = HomologyGroup(7, ZZ, [4,4,4,4,4,7,7])
sage: H._repr_()
'C4^5 x C7 x C7'
sage: HomologyGroup(6, ZZ)
Z^6
"""
eldv = self._original_invts
if len(eldv) == 0:
return "0"
rank = len(filter(lambda x: x == 0, eldv))
torsion = sorted(filter(lambda x: x, eldv))
if rank > 4:
g = ["Z^%s" % rank]
else:
g = ["Z"] * rank
if len(torsion) != 0:
printed = []
for t in torsion:
numfac = torsion.count(t)
too_many = (numfac > 4)
if too_many:
if t not in printed:
g.append("C{}^{}".format(t, numfac))
printed.append(t)
else:
g.append("C%s" % t)
times = " x "
return times.join(g)
def _latex_(self):
"""
LaTeX representation of ``self``.
EXAMPLES::
sage: from sage.homology.homology_group import HomologyGroup
sage: H = HomologyGroup(7, ZZ, [4,4,4,4,4,7,7])
sage: H._latex_()
'C_{4}^{5} \\times C_{7} \\times C_{7}'
sage: latex(HomologyGroup(6, ZZ))
\ZZ^{6}
"""
eldv = self._original_invts
if len(eldv) == 0:
return "0"
rank = len(filter(lambda x: x == 0, eldv))
torsion = sorted(filter(lambda x: x, eldv))
if rank > 4:
g = ["\\ZZ^{{{}}}".format(rank)]
else:
g = ["\\ZZ"] * rank
if len(torsion) != 0:
printed = []
for t in torsion:
numfac = torsion.count(t)
too_many = (numfac > 4)
if too_many:
if t not in printed:
g.append("C_{{{}}}^{{{}}}".format(t, numfac))
printed.append(t)
else:
g.append("C_{{{}}}".format(t))
times = " \\times "
return times.join(g)
def HomologyGroup(n, base_ring, invfac=None):
"""
Abelian group on `n` generators which represents a homology group in a
fixed degree.
INPUT:
- ``n`` -- integer; the number of generators
- ``base_ring`` -- ring; the base ring over which the homology is computed
- ``inv_fac`` -- list of integers; the invariant factors -- ignored
if the base ring is a field
OUTPUT:
A class that can represent the homology group in a fixed
homological degree.
EXAMPLES::
sage: from sage.homology.homology_group import HomologyGroup
sage: G = AbelianGroup(5, [5,5,7,8,9]); G
Multiplicative Abelian group isomorphic to C5 x C5 x C7 x C8 x C9
sage: H = HomologyGroup(5, ZZ, [5,5,7,8,9]); H
C5 x C5 x C7 x C8 x C9
sage: AbelianGroup(4)
Multiplicative Abelian group isomorphic to Z x Z x Z x Z
sage: HomologyGroup(4, ZZ)
Z x Z x Z x Z
sage: HomologyGroup(100, ZZ)
Z^100
"""
if base_ring.is_field():
return VectorSpace(base_ring, n)
# copied from AbelianGroup:
if invfac is None:
if isinstance(n, (list, tuple)):
invfac = n
n = len(n)
else:
invfac = []
if len(invfac) < n:
invfac = [0] * (n - len(invfac)) + invfac
elif len(invfac) > n:
raise ValueError("invfac (={}) must have length n (={})".format(invfac, n))
M = HomologyGroup_class(n, invfac)
return M
| 32.444444
| 98
| 0.53653
|
4d5fa35c71451cea6076c59ca8e244ef5cf3a251
| 686
|
py
|
Python
|
config.py
|
SRainot/gadgetBot
|
f8e31882229f0033356a7751c8a2b57858eb817c
|
[
"MIT"
] | null | null | null |
config.py
|
SRainot/gadgetBot
|
f8e31882229f0033356a7751c8a2b57858eb817c
|
[
"MIT"
] | null | null | null |
config.py
|
SRainot/gadgetBot
|
f8e31882229f0033356a7751c8a2b57858eb817c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Author:w k
from nonebot.default_config import *
from os import path
COMMAND_START = {''}
DEBUG = True
SUPERUSERS = {}
NICKNAME = {'!', '花'}
# 数据存储文件
DATA_FOLDER = path.join(path.dirname(__file__), 'data')
# B站订阅的列表
BILIBILI_SUBSCRIPTION_INFO = []
# B站直播订阅时间(防止过快封IP,需要自己看着设置吧 0 0)
CHECK_OPEN_STATUS = 10 # 单位是秒
CHECK_CLOSE_STATUS = 10 # 单位是分钟
###CHAT SETTING
TX_CHAT_APPID = '2111616354'
TX_CHAT_APPKEY = 'wSLUnOTGuAkln8pv'
###
###BD SPEAK
BD_CLIENT_ID = '4LI50CEmqn4h4mNqfjwicu04'
BD_CLIENT_SECRET = 'PHBGwYHmWiz9Ce0eBV7jygGrUyKpSetn'
BD_TOKEN = ''
###
###DIFFBOT
##这个api因为是要自己去申请的所以就不提供了
DIFFBOT_TOKEN = ''
###刷屏禁言
REMOVE_DELAY = 5
MAX_COUNT = 7
| 16.731707
| 55
| 0.718659
|
3419b62f704f483eefe5c7645563412195382e1f
| 2,255
|
py
|
Python
|
env/lib/python3.6/site-packages/pytests/test_messagequeue.py
|
rogerscristo/BotFWD
|
4f2ab1f4f4543c157ca0a79084536c065f74159f
|
[
"MIT"
] | null | null | null |
env/lib/python3.6/site-packages/pytests/test_messagequeue.py
|
rogerscristo/BotFWD
|
4f2ab1f4f4543c157ca0a79084536c065f74159f
|
[
"MIT"
] | 3
|
2017-09-01T22:18:30.000Z
|
2017-09-01T22:24:57.000Z
|
env/lib/python3.6/site-packages/pytests/test_messagequeue.py
|
rogerscristo/BotFWD
|
4f2ab1f4f4543c157ca0a79084536c065f74159f
|
[
"MIT"
] | 3
|
2018-02-22T22:20:27.000Z
|
2018-04-22T10:58:24.000Z
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
from time import sleep
import telegram.ext.messagequeue as mq
class TestDelayQueue:
N = 128
burst_limit = 30
time_limit_ms = 1000
margin_ms = 0
testtimes = []
def call(self):
self.testtimes.append(mq.curtime())
def test_delayqueue_limits(self):
dsp = mq.DelayQueue(burst_limit=self.burst_limit, time_limit_ms=self.time_limit_ms,
autostart=True)
assert dsp.is_alive() is True
for i in range(self.N):
dsp(self.call)
starttime = mq.curtime()
app_endtime = (
(self.N * self.burst_limit /
(1000 * self.time_limit_ms)) + starttime + 20) # wait up to 20 sec more than needed
while not dsp._queue.empty() and mq.curtime() < app_endtime:
sleep(1)
assert dsp._queue.empty() is True # check loop exit condition
dsp.stop()
assert dsp.is_alive() is False
assert self.testtimes or self.N == 0 is True
passes, fails = [], []
delta = (self.time_limit_ms - self.margin_ms) / 1000
for start, stop in enumerate(range(self.burst_limit + 1, len(self.testtimes))):
part = self.testtimes[start:stop]
if (part[-1] - part[0]) >= delta:
passes.append(part)
else:
fails.append(part)
assert fails == []
| 35.234375
| 98
| 0.627051
|
0fe206a22fc7b7cee99aa2baedc9398883baee14
| 1,231
|
py
|
Python
|
Section_01/1.07.py
|
PacktPublishing/Tkinter-GUI-Application-Development-Projects
|
58b49e23e887bf45810666cb1c63d1e06291873b
|
[
"MIT"
] | 11
|
2019-05-24T17:03:55.000Z
|
2021-11-24T23:59:38.000Z
|
Section_01/1.07.py
|
PacktPublishing/Tkinter-GUI-Application-Development-Projects
|
58b49e23e887bf45810666cb1c63d1e06291873b
|
[
"MIT"
] | null | null | null |
Section_01/1.07.py
|
PacktPublishing/Tkinter-GUI-Application-Development-Projects
|
58b49e23e887bf45810666cb1c63d1e06291873b
|
[
"MIT"
] | 9
|
2019-06-19T02:20:15.000Z
|
2022-03-25T01:36:14.000Z
|
from tkinter import *
parent = Tk()
parent.title('Find & Replace')
Label(parent, text="Find:").grid(row=0, column=0, sticky='e')
Entry(parent, width=60).grid(row=0, column=1, padx=2, pady=2, sticky='we', columnspan=9)
Label(parent, text="Replace:").grid(row=1, column=0, sticky='e')
Entry(parent).grid(row=1, column=1, padx=2, pady=2, sticky='we', columnspan=9)
Button(parent, text="Find").grid(row=0, column=10, sticky='e'+'w', padx=2, pady=2)
Button(parent, text="Find All").grid(row=1, column=10, sticky='e'+'w', padx=2)
Button(parent, text="Replace").grid(row=2, column=10, sticky='e'+'w', padx=2)
Button(parent, text="Replace All").grid(row=3, column=10, sticky='e'+'w', padx=2)
Checkbutton(parent, text='Match whole word only ').grid(row =2, column=1, columnspan=4,sticky='w')
Checkbutton(parent, text='Match Case').grid(row =3, column=1, columnspan=4,sticky='w')
Checkbutton(parent, text='Wrap around').grid(row =4, column=1, columnspan=4,sticky='w')
Label(parent, text="Direction:").grid(row=2, column=6,sticky='w')
Radiobutton(parent, text='Up', value=1).grid(row=3, column=6, columnspan=6, sticky='w')
Radiobutton(parent, text='Down', value=2).grid(row=3, column=7,columnspan=2, sticky='e')
parent.mainloop()
| 42.448276
| 98
| 0.688058
|
8da556a25355f16d222bfd00866375e725576035
| 2,585
|
py
|
Python
|
{{cookiecutter.project_slug}}/setup.py
|
gunnarvoet/cookiecutter-research-project-pdoc
|
b430c7c2bacc5d29ef6218ce8ace60b846d75aa6
|
[
"BSD-3-Clause"
] | null | null | null |
{{cookiecutter.project_slug}}/setup.py
|
gunnarvoet/cookiecutter-research-project-pdoc
|
b430c7c2bacc5d29ef6218ce8ace60b846d75aa6
|
[
"BSD-3-Clause"
] | null | null | null |
{{cookiecutter.project_slug}}/setup.py
|
gunnarvoet/cookiecutter-research-project-pdoc
|
b430c7c2bacc5d29ef6218ce8ace60b846d75aa6
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
requirements = [{%- if cookiecutter.command_line_interface|lower == 'click' %}'Click>=7.0',{%- endif %} ]
setup_requirements = [{%- if cookiecutter.use_pytest == 'y' %}'pytest-runner',{%- endif %} ]
test_requirements = [{%- if cookiecutter.use_pytest == 'y' %}'pytest>=3',{%- endif %} ]
{%- set license_classifiers = {
'MIT license': 'License :: OSI Approved :: MIT License',
'BSD license': 'License :: OSI Approved :: BSD License',
'ISC license': 'License :: OSI Approved :: ISC License (ISCL)',
'Apache Software License 2.0': 'License :: OSI Approved :: Apache Software License',
'GNU General Public License v3': 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)'
} %}
setup(
author="{{ cookiecutter.full_name.replace('\"', '\\\"') }}",
author_email='{{ cookiecutter.email }}',
python_requires='>=3.5',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
{%- if cookiecutter.open_source_license in license_classifiers %}
'{{ license_classifiers[cookiecutter.open_source_license] }}',
{%- endif %}
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="{{ cookiecutter.project_short_description }}",
{%- if 'no' not in cookiecutter.command_line_interface|lower %}
entry_points={
'console_scripts': [
'{{ cookiecutter.project_slug }}={{ cookiecutter.project_slug }}.cli:main',
],
},
{%- endif %}
install_requires=requirements,
{%- if cookiecutter.open_source_license in license_classifiers %}
license="{{ cookiecutter.open_source_license }}",
{%- endif %}
long_description=readme,
include_package_data=True,
keywords='{{ cookiecutter.project_slug }}',
name='{{ cookiecutter.project_slug }}',
packages=find_packages(include=['{{ cookiecutter.project_slug }}', '{{ cookiecutter.project_slug }}.*'], exclude=["*.tests"]),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/{{ cookiecutter.github_username }}/{{ cookiecutter.project_slug }}',
version='{{ cookiecutter.version }}',
zip_safe=False,
)
| 39.769231
| 130
| 0.648356
|
3204b1bfc7aa2c388eeafebd8d222adb4e9901e4
| 8,906
|
py
|
Python
|
synergine2_xyz/utils.py
|
buxx/synergine2
|
843988df5e653a413eca8c486ee93f5e9e884f37
|
[
"MIT"
] | 1
|
2021-02-26T15:36:04.000Z
|
2021-02-26T15:36:04.000Z
|
synergine2_xyz/utils.py
|
buxx/synergine2
|
843988df5e653a413eca8c486ee93f5e9e884f37
|
[
"MIT"
] | 182
|
2017-03-06T10:20:19.000Z
|
2021-06-10T14:12:36.000Z
|
synergine2_xyz/utils.py
|
buxx/synergine2
|
843988df5e653a413eca8c486ee93f5e9e884f37
|
[
"MIT"
] | 1
|
2018-01-01T15:38:24.000Z
|
2018-01-01T15:38:24.000Z
|
# coding: utf-8
import collections
import typing
from math import sqrt
import numpy
from synergine2_xyz.xyz import DIRECTION_MODIFIERS
def get_positions_from_str_representation(str_representation):
# TODO: Manage z axis (like ------------ as separator)
lines = str_representation.split("\n") # One item per lines
lines = map(lambda l: l.strip().replace(' ', ''), lines) # Remove spaces
lines = filter(lambda l: bool(l), lines) # Only line with content
lines = list(lines)
width = len(lines[0])
height = len(lines)
if not width % 2 or not height % 2:
raise Exception(
'Width and height of your representation must be odd. '
'Actually it\'s {0}x{1}'.format(
width,
height,
))
items_positions = collections.defaultdict(list)
start_x = - int(width / 2 - 0.5)
start_y = - int(height / 2 - 0.5)
start_z = 0
current_y = start_y
current_z = start_z
for line in lines:
current_x = start_x
for char in line:
items_positions[char].append((
current_x,
current_y,
current_z,
))
current_x += 1
current_y += 1
return items_positions
def get_min_and_max(positions) -> (int, int, int, int, int):
max_x_position = max(positions, key=lambda p: p[0])
min_x_position = min(positions, key=lambda p: p[0])
max_y_position = max(positions, key=lambda p: p[1])
min_y_position = min(positions, key=lambda p: p[1])
max_z_position = max(positions, key=lambda p: p[2])
min_z_position = min(positions, key=lambda p: p[2])
max_x = max_x_position[0]
min_x = min_x_position[0]
max_y = max_y_position[1]
min_y = min_y_position[1]
max_z = max_z_position[2]
min_z = min_z_position[2]
return min_x, max_x, min_y, max_y, min_z, max_z
def get_str_representation_from_positions(
items_positions: dict,
separator='',
tabulation='',
start_with='',
end_with='',
force_items_as=None,
force_positions_as=None,
complete_lines_with=' ',
) -> str:
positions = []
for item_positions in items_positions.values():
positions.extend(item_positions)
positions = sorted(positions, key=lambda p: (p[2], p[1], p[0]))
if complete_lines_with is not None:
min_x, max_x, min_y, max_y, min_z, max_z = get_min_and_max(positions)
all_ = []
for x in range(min_x, max_x+1):
for y in range(min_y, max_y+1):
for z in range(min_z, max_z+1):
all_.append((x, y, z))
pass
for one_of_all in all_:
if one_of_all not in positions:
if complete_lines_with not in items_positions:
items_positions[complete_lines_with] = []
items_positions[complete_lines_with].append(one_of_all)
positions = []
for item_positions in items_positions.values():
positions.extend(item_positions)
positions = sorted(positions, key=lambda p: (p[2], p[1], p[0]))
str_representation = start_with + tabulation
start_x = positions[0][0]
start_y = positions[0][1]
start_z = positions[0][2]
current_y = start_y
current_z = start_z
for position in positions:
item = None
for parsed_item in items_positions:
if position in items_positions[parsed_item]:
item = parsed_item
break
if position[1] != current_y:
str_representation += "\n" + tabulation
if position[2] != current_z:
str_representation += '----' + "\n" + tabulation
str_item = item
if force_items_as:
for force_item_as in force_items_as:
if force_item_as[0] == item:
str_item = force_item_as[1]
break
if force_positions_as:
for force_position_as in force_positions_as:
if position == force_position_as[0]:
str_item = force_position_as[1]
break
added_value = str_item
if position[0] != start_x:
added_value = separator + added_value
str_representation += added_value
current_y = position[1]
current_z = position[2]
return str_representation + end_with
def get_around_positions_of_positions(position, exclude_start_position=True) -> list:
"""
TODO: compute with z (allow or disable with parameter)
Return positions around a point with distance of 1.
:param position: (x, y, z) tuple
:param exclude_start_position: if True, given position will not be
added to result list
:return: list of (x, y, z) positions
:rtype: list
"""
pz = position[2]
px = position[0]
py = position[1]
points = [
(px-1, py-1, pz),
(px, py-1, pz),
(px+1, py+1, pz),
(px-1, py , pz),
(px+1, py , pz),
(px-1, py+1, pz),
(px, py+1, pz),
(px+1, py-1, pz)
]
if not exclude_start_position:
points.append(position)
return points
def get_direct_around_positions_of_position(position, exclude_start_position=True) -> list:
"""
TODO: compute with z (allow or disable with parameter)
Return positions around a point with distance of 1 on left/right top/bottom only.
:param position: (x, y, z) tuple
:param exclude_start_position: if True, given position will not be
added to result list
:return: list of (x, y, z) positions
:rtype: list
"""
pz = position[2]
px = position[0]
py = position[1]
points = [
(px, py-1, pz),
(px-1, py , pz),
(px+1, py , pz),
(px, py+1, pz),
]
if not exclude_start_position:
points.append(position)
return points
def get_around_positions_of(
position,
distance=1,
exclude_start_point=True,
) -> list:
"""
Return positions around a point.
:param position: (x, y, z) tuple
:param distance: Distance to compute
:return: list of (x, y, z) positions
"""
start_x = position[0] - distance
start_y = position[1] - distance
# start_z = position[0] - distance
positions = []
range_distance = (distance * 2) + 1
for dx in range(range_distance):
for dy in range(range_distance):
# for dz in range(range_distance):
# points.append((start_z+dz, start_x+dx, start_y+dy))
positions.append((start_x + dx, start_y + dy, position[2]))
if exclude_start_point:
positions.remove(position)
return positions
def get_distance_between_points(a: tuple, b: tuple) -> float:
return abs(sqrt((b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2))
def get_position_for_direction(from_position: tuple, direction: int) -> tuple:
modifier = DIRECTION_MODIFIERS[direction]
return (
from_position[0] + modifier[0],
from_position[1] + modifier[1],
from_position[2] + modifier[2],
)
def get_angle(a: typing.Tuple[int, int], b: typing.Tuple[int, int]) -> int:
b = (b[0] - a[0], b[1] - a[1])
a = 0, 1
ang1 = numpy.arctan2(*a[::-1])
ang2 = numpy.arctan2(*b[::-1])
return numpy.rad2deg((ang1 - ang2) % (2 * numpy.pi))
def get_line_xy_path(start, end):
"""
TODO: copied from http://www.roguebasin.com/index.php?title=Bresenham%27s_Line_Algorithm#Python
What is the licence ?
Bresenham's Line Algorithm
Produces a list of tuples from start and end
>>> points1 = get_line((0, 0), (3, 4))
>>> points2 = get_line((3, 4), (0, 0))
>>> assert(set(points1) == set(points2))
>>> print points1
[(0, 0), (1, 1), (1, 2), (2, 3), (3, 4)]
>>> print points2
[(3, 4), (2, 3), (1, 2), (1, 1), (0, 0)]
"""
# Setup initial conditions
x1, y1 = start
x2, y2 = end
dx = x2 - x1
dy = y2 - y1
# Determine how steep the line is
is_steep = abs(dy) > abs(dx)
# Rotate line
if is_steep:
x1, y1 = y1, x1
x2, y2 = y2, x2
# Swap start and end points if necessary and store swap state
swapped = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
swapped = True
# Recalculate differentials
dx = x2 - x1
dy = y2 - y1
# Calculate error
error = int(dx / 2.0)
ystep = 1 if y1 < y2 else -1
# Iterate over bounding box generating points between start and end
y = y1
points = []
for x in range(x1, x2 + 1):
coord = (y, x) if is_steep else (x, y)
points.append(coord)
error -= abs(dy)
if error < 0:
y += ystep
error += dx
# Reverse the list if the coordinates were swapped
if swapped:
points.reverse()
return points
| 28.094637
| 99
| 0.58556
|
f358a1f1b53c02baad1137570c9345afe4d3d725
| 12,071
|
py
|
Python
|
pace/ads/skiplist/accumulo/embeddednode.py
|
LaudateCorpus1/PACE-python
|
eb61250886e51647bd1edb6d8f4fa7f83eb0bc81
|
[
"BSD-2-Clause"
] | 7
|
2016-11-01T17:36:17.000Z
|
2021-03-12T08:54:36.000Z
|
pace/ads/skiplist/accumulo/embeddednode.py
|
LaudateCorpus1/PACE-python
|
eb61250886e51647bd1edb6d8f4fa7f83eb0bc81
|
[
"BSD-2-Clause"
] | 1
|
2016-11-29T00:38:28.000Z
|
2016-12-06T14:10:24.000Z
|
pace/ads/skiplist/accumulo/embeddednode.py
|
LaudateCorpus1/PACE-python
|
eb61250886e51647bd1edb6d8f4fa7f83eb0bc81
|
[
"BSD-2-Clause"
] | 6
|
2020-09-09T08:33:17.000Z
|
2022-01-06T07:02:40.000Z
|
## **************
## Copyright 2015 MIT Lincoln Laboratory
## Project: PACE
## Authors: CS
## Description: Skiplist nodes embedded into Accumulo
## Modifications:
## Date Name Modification
## ---- ---- ------------
## 11 Feb 2015 CS Original file
## **************
import os
import sys
this_dir = os.path.dirname(os.path.abspath(__file__))
base_dir = os.path.join(this_dir, '../../../..')
sys.path.append(base_dir)
# This is the python library implementation of SHA256, which is faster than
# the PyCrypto version in PyCrypto.Hash.SHA256
from hashlib import sha256
from pyaccumulo import Range, Mutation
from collections import deque
import string
from pace.ads.skiplist.authskiplist import AuthSkipList
from pace.ads.skiplist.authnode import AuthNode
from pace.ads.skiplist.elemclass import AccumuloEntry
from pace.common.common_utils import get_single_entry
class EmbeddedNode(AuthNode):
""" An embedding of authenticated skiplist nodes into Accumulo,
attempting to duplicate as little of the complicated code
as possible.
Assumptions:
This class assumes that the user never wants to delete any of
the metadata about a node, just write or overwrite it. In particular,
this means writes of right, down, and parent nodes will always take
a node, not None.
"""
def __init__(self, sl, name):
""" Each node must refer to its parent skiplist, which contains
information about the server and table the embedding resides
in, and the name of the node, which is a unique ID used to
look it up in the relevant table in Accumulo.
Arguments:
sl - the overall embedded skiplist object
name - the unique identifier of this node
"""
self.name = name
self.sl = sl
@staticmethod
def _make_name(down, right, elem):
""" Generate a name for a node with the given children & element.
"""
if down:
# Node is an internal node; make sure its name is (probably) unique
dname = down.name
rname = right.name if right else ''
name = sha256(','.join([dname, rname, elem.serialize()])).digest()
else:
# Node is a leaf; make the hash depend only on the element.
name = sha256(elem.serialize()).digest()
return name
@classmethod
def newnode(cls, sl, down, right, elem, assign_label=False, left=None):
""" Create a new node.
Arguments:
sl - the skiplist this node is a part of.
down - the bottom neighbor of the new node (if any)
right - the right neighbor of the new node (if any)
elem - the element to be stored in the new node.
assign_label - whether to assign a hash label to this node
upon creation (alternative is to defer it to
be called manually by whoever is using this
code). Default: False (do not assign a label
yet)
left - the node to the left of this node, if any. Default: None
Returns:
A new SkipListNode with fields as given.
"""
name = cls._make_name(down, right, elem)
new_node = cls(sl, name)
new_node.elem = elem
if left is not None:
new_node.parent = left, False
if down is not None:
down.parent = (new_node, True)
new_node.down = down
if right is not None:
new_node.right = right
par = right.parent
if par is not None:
parent, from_up = par
if not from_up:
right.parent = (new_node, False)
else:
right.parent = (new_node, False)
if assign_label:
new_node.assign_label()
return new_node
def search(self, elem):
""" Search the SkipList for node containing an element that is
either equal to elem (if elem is in the list) or the closest
element in the list that is less than elem (if elem is not in the
list)
Since this is embedded in an Accumulo table, we can look up the
node in the table first by its hash value. If it is not present,
we then fail back to the older, slower method in the superclass.
Argument:
elem - the element (from elemclass.py) to be searched for
Returns:
visited - a collection of nodes visited, stored in reverse order
current - the closest node in the base list that is less than or
equal to elem
"""
# Look for the element; if it's found, return it; if not, search.
node = self.sl.from_elem(elem)
if node is not None:
# It was found! Return the iterable object for its 'visited'
# path and the node itself.
return node.path(), node
# It wasn't found! Time to search for it. This may arise when the
# actual element being searched for is not stored in Accumulo, in
# which case we need to find the greatest element less than the one
# being searched for.
current = self
right = current.right
visited = deque([])
while (isinstance(right, type(self)) and
right.elem <= elem):
visited.appendleft((current, False))
current = right
right = current.right
down = current.down
while down is not None:
visited.appendleft((current, True))
current = down
right = current.right
while (isinstance(right, type(self)) and
right.elem <= elem):
visited.appendleft((current, False))
current = right
right = current.right
down = current.down
return visited, current
def path(self):
""" Return a generator for all the nodes between this node
and the root.
"""
node = self
parent = node.parent
while parent is not None:
node, flag = parent
yield node, flag
parent = node.parent
@property
def tower(self):
_, from_above = self.parent
return not from_above
@tower.setter
def tower(self, value):
raise Exception('Cannot set embedded tower values.')
@property
def down(self):
entry = get_single_entry(self.sl.conn, self.sl.table, row=self.name,
cf='child', cq='down')
if entry is not None:
return EmbeddedNode(self.sl, entry.val)
else:
return None
@down.setter
def down(self, value):
""" Set the value of the node underneath this one.
Argument must be another node.
Arguments:
value - the EmbeddedNode object to the right of `self`
"""
assert isinstance(value, EmbeddedNode)
m = Mutation(self.name)
m.put(cf='child', cq='down', val=value.name)
self.sl.conn.write(self.sl.table, m)
@property
def right(self):
entry = get_single_entry(self.sl.conn, self.sl.table, row=self.name,
cf='child', cq='right')
if entry:
return EmbeddedNode(self.sl, entry.val)
else:
return None
@right.setter
def right(self, value):
""" Set the value of the node to the right of this one.
Argument must be another node.
Arguments:
value - the EmbeddedNode object to the right of `self`
"""
assert isinstance(value, EmbeddedNode)
m = Mutation(self.name)
m.put(cf='child', cq='right', val=value.name)
self.sl.conn.write(self.sl.table, m)
@property
def raw_elem(self):
""" Return just the serialization of the element.
"""
entry = get_single_entry(self.sl.conn, self.sl.table, row=self.name,
cf='other', cq='elem')
assert entry is not None
return entry.val
@raw_elem.setter
def raw_elem(self, value):
m = Mutation(self.name)
m.put(cf='other', cq='elem', val=value)
self.sl.conn.write(self.sl.table, m)
@property
def elem(self):
return self.sl.elemclass.deserialize(self.raw_elem)
@elem.setter
def elem(self, value):
self.raw_elem = value.serialize()
@property
def label(self):
entry = get_single_entry(self.sl.conn, self.sl.table, row=self.name,
cf='other', cq='label')
return entry.val
@label.setter
def label(self, value):
# labels are represented as unstructured strings, so we can
# just write them directly to the value field
m = Mutation(self.name)
m.put(cf='other', cq='label', val=value)
self.sl.conn.write(self.sl.table, m)
@property
def parent(self):
""" Return the parent node and a flag denoting whether it is the
node above this one ('True') or to the left ('False').
"""
parent = get_single_entry(self.sl.conn, self.sl.table, row=self.name,
cf='parent')
if parent is None:
return None
path, name = string.split(parent.val, ',', 1)
parent_node = EmbeddedNode(self.sl, name)
if path == 'from_left':
return parent_node, False
elif path == 'from_up':
return parent_node, True
else:
raise Exception('Invalid parent column qualifier: %s' %parent.cq)
@parent.setter
def parent(self, value):
""" Set the value of this node's parent node. Argument must be a
tuple of an EmbeddedNode and a boolean denoting whether the
parent is an upper (as opposed to left) neighbor.
NB: `parent` is only set in `newnode()`
"""
parnode, from_up = value
assert isinstance(parnode, EmbeddedNode)
if from_up:
strval = ','.join(['from_up', parnode.name])
else:
strval = ','.join(['from_left', parnode.name])
m = Mutation(self.name)
m.put(cf='parent', cq='', val=strval)
self.sl.conn.write(self.sl.table, m)
@property
def tower(self):
""" Return whether this node is a tower node. If the parent node in the
path is above this one, then it is a tower node; otherwise it is
a plateau node.
"""
parent = self.parent
if parent:
return self.parent[1]
else:
# Must be the root
return False
@tower.setter
def tower(self, value):
# tower is a derived property now---no need to store it explicitly
pass
def assign_label(self):
""" One possible source of optimization for this function would
be to figure out how to streamline/cache the lookups it
performs.
"""
node = self
right = node.right
if right is None:
node.label = str(0)
return
if node.down is None:
if right.tower:
node.label = AuthNode.chash(
AuthNode._hash(node.raw_elem),
AuthNode._hash(right.raw_elem))
else:
node.label = AuthNode.chash(
AuthNode._hash(node.raw_elem),
right.label)
else:
if right.tower:
node.label = node.down.label
else:
node.label = AuthNode.chash(node.down.label, right.label)
| 32.624324
| 79
| 0.561097
|
d92428ab1c187890863b507ea637900da0b5bf70
| 3,844
|
py
|
Python
|
asdl/runtime.py
|
roryokane/oil
|
6724b2e0b8142ea0d48f0269f402e420861cb3b2
|
[
"Apache-2.0"
] | null | null | null |
asdl/runtime.py
|
roryokane/oil
|
6724b2e0b8142ea0d48f0269f402e420861cb3b2
|
[
"Apache-2.0"
] | null | null | null |
asdl/runtime.py
|
roryokane/oil
|
6724b2e0b8142ea0d48f0269f402e420861cb3b2
|
[
"Apache-2.0"
] | null | null | null |
"""
runtime.py
- Base classes for generated code
- Nodes for pretty printing
"""
from __future__ import print_function
from cStringIO import StringIO
import sys
from typing import List, Tuple, Optional, IO
class Obj(object):
# NOTE: We're using CAPS for these static fields, since they are constant at
# runtime after metaprogramming.
ASDL_TYPE = None # Used for type checking
class SimpleObj(Obj):
"""Base type of simple sum types."""
def __init__(self, enum_id, name):
# type: (int, str) -> None
self.enum_id = enum_id
self.name = name
# TODO: Why is __hash__ needed? Otherwise native/fastlex_test.py fails.
# util.Enum required it too. I thought that instances would hash by
# identity?
#
# Example:
# class bool_arg_type_e(py_meta.SimpleObj):
# pass
# bool_arg_type_e.Undefined = bool_arg_type_e(1, 'Undefined')
def __hash__(self):
# type: () -> int
# Could it be the integer self.enum_id?
return hash(self.__class__.__name__ + self.name)
def __repr__(self):
# type: () -> str
return '<%s %s %s>' % (self.__class__.__name__, self.name, self.enum_id)
class CompoundObj(Obj):
# TODO: Remove tag?
# The tag is always set for constructor types, which are subclasses of sum
# types. Never set for product types.
tag = 0 # TYPED: Changed from None. 0 is invalid!
def PrettyTree(self):
# type: () -> _PrettyBase
raise NotImplementedError(self.__class__.__name__)
def _AbbreviatedTree(self):
# type: () -> _PrettyBase
raise NotImplementedError(self.__class__.__name__)
def AbbreviatedTree(self):
# type: () -> _PrettyBase
raise NotImplementedError(self.__class__.__name__)
def PrettyPrint(self, f=sys.stdout):
# type: (IO[str]) -> None
"""Print abbreviated tree in color, for debugging."""
from asdl import format as fmt
ast_f = fmt.DetectConsoleOutput(f)
tree = self.AbbreviatedTree()
fmt.PrintTree(tree, ast_f)
def __repr__(self):
# type: () -> str
# TODO: Break this circular dependency.
from asdl import format as fmt
ast_f = fmt.TextOutput(StringIO()) # No color by default.
tree = self.PrettyTree()
fmt.PrintTree(tree, ast_f)
s, _ = ast_f.GetRaw()
return s
#
# A Homogeneous Tree for Pretty Printing.
#
class _PrettyBase(object):
pass
class PrettyNode(_PrettyBase):
"""Homogeneous node for pretty-printing."""
def __init__(self, node_type=None):
# type: (Optional[str]) -> None
self.node_type = node_type or '' # type: str
# Gah this signature is complicated.
# Probably should have _PrettyRepeated?
self.fields = [] # type: List[Tuple[str, _PrettyBase]]
# Custom hooks set abbrev = True and use the nodes below.
self.abbrev = False
self.left = '('
self.right = ')'
# Used by abbreviations
self.unnamed_fields = [] # type: List[_PrettyBase]
def __repr__(self):
# type: () -> str
return '<PrettyNode %s %s>' % (self.node_type, self.fields)
class PrettyArray(_PrettyBase):
def __init__(self):
# type: () -> None
self.children = [] # type: List[_PrettyBase]
def __repr__(self):
# type: () -> str
return '<PrettyArray %s>' % (self.children)
# Color token types
Color_TypeName = 1
Color_StringConst = 2
Color_OtherConst = 3 # Int and bool. Green?
Color_UserType = 4 # UserType Id
class PrettyLeaf(_PrettyBase):
"""Colored string for pretty-printing."""
def __init__(self, s, e_color):
# type: (Optional[str], int) -> None
if s is None: # hack for repr of MaybeStrArray, which can have 'None'
self.s = '_'
self.e_color = Color_OtherConst
else:
assert isinstance(s, str), s
self.s = s
self.e_color = e_color
def __repr__(self):
# type: () -> str
return '<PrettyLeaf %s %s>' % (self.s, self.e_color)
| 25.626667
| 78
| 0.663111
|
f45c3d06f02a25c298765aecd60e5d97e56aa5a2
| 7,300
|
py
|
Python
|
Algorithm.Python/PL_Stat10/main.py
|
pasztorlacos/Lean
|
ca204c07d9bb390f853eb2f3da0ebc08150fef36
|
[
"Apache-2.0"
] | null | null | null |
Algorithm.Python/PL_Stat10/main.py
|
pasztorlacos/Lean
|
ca204c07d9bb390f853eb2f3da0ebc08150fef36
|
[
"Apache-2.0"
] | null | null | null |
Algorithm.Python/PL_Stat10/main.py
|
pasztorlacos/Lean
|
ca204c07d9bb390f853eb2f3da0ebc08150fef36
|
[
"Apache-2.0"
] | null | null | null |
environment = "Lean"
if environment=="Lean":
import sys
sys.path.append('C:/Github/Repos/pasztorlacos/Quantconnect/Libraries/')
sys.path.append('C:/Github/Repos/pasztorlacos/Quantconnect/Strategies/')
#---------------------------------------------------------------------------------------------
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Algorithm.Framework")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Indicators")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
#from QuantConnect.Algorithm.Framework import *
#from QuantConnect.Algorithm.Framework.Alphas import *
#from QuantConnect.Algorithm.Framework.Execution import *
#from QuantConnect.Algorithm.Framework.Portfolio import *
#from QuantConnect.Algorithm.Framework.Risk import *
#from QuantConnect.Algorithm.Framework.Selection import *
from QuantConnect.Orders import *
from QuantConnect.Orders.Fees import *
from QuantConnect.Securities import *
from QuantConnect.Orders.Fills import *
from QuantConnect.Brokerages import BrokerageName
from QuantConnect import Resolution, SecurityType
import pandas as pd
import numpy as np
import random
from datetime import datetime, timedelta
import time
from System.Drawing import Color
import decimal as d
import math
from pm3 import MyPositionManager
from pmB3 import MyPositionManagerB
from var31 import MyVaR, MyCharts, MyStats
import hp3
from eq2_ai_4 import Eq2_ai_4
from sim1 import MySIMPosition
class MyAlgo(QCAlgorithm):
'''
Multistrategy Framework 1.0
'''
file = __file__
def Initialize(self):
self.Debug(" ---- Initialize() Started")
self.enabled = True
self.twsSynced = False
self.updateSettings = True if self.LiveMode else False
self.settingsURL = 'https://www.dropbox.com/s/thxemetfxuisl27/QCStrategySettings.csv?dl=1'
self.strategySettings = hp3.MyStrategySettings(self)
'''DEBUG/LOG'''
self.debug = False
self.log = 0 #0) only Debug 1) Debug anf Log 2) only Log
self.debugOrderFill = False #self.debug
if self.LiveMode: self.debugOrderFill = True
self.myHelpers = hp3.MyHelpers(self)
'''DATA STORAGE'''
self.myStrategyClassList = []
self.myVaRList = []
self.mySymbolDict = {}
self.openStopMarketOrders = []
self.openLimitOrders = []
self.openMarketOrders = []
self.myVaR = None
self.foreignVaR = None
'''PositionManager instantiation'''
self.myPositionManager = MyPositionManager(self)
self.myPositionManagerB = MyPositionManagerB(self)
self.consistencyStartUpReleaseTime = self.Time - timedelta(hours=20)
'''DataNormalizationMode for Equities'''
self.myDataNormalizationMode = DataNormalizationMode.SplitAdjusted #DataNormalizationMode.Raw, DataNormalizationMode.SplitAdjusted, DataNormalizationMode.Adjusted, DataNormalizationMode.TotalReturn
#This must be before InstallStrategy() as it resets custom models
self.SetBrokerageModel(BrokerageName.InteractiveBrokersBrokerage, AccountType.Margin)
'''STARTEGIES (First would inlude benchmark)'''
self.myHelpers.InstallStrategy(Eq2_ai_4, myAllocation=1.00*0.01)
'''BACKTEST DATES and SETTINGS'''
#Start Date
#self.SetStartDate(2000,1,1)
#self.SetStartDate(2002,1,1)
#self.SetStartDate(2003,1,1)
#self.SetStartDate(2004,1,1)
#self.SetStartDate(2007,1,1)
#self.SetStartDate(2009,1,1)
#self.SetStartDate(2016,1,1)
#self.SetStartDate(2017,1,1)
#self.SetStartDate(2018,1,1)
#self.SetStartDate(2019,1,1)
#self.SetStartDate(datetime.now() - timedelta(days=30))
self.simStartYear=2018
self.SetStartDate(self.simStartYear,1,1)
#End Date
#self.SetEndDate(2003,6,30)
#self.SetEndDate(2003,12,31)
#self.SetEndDate(2004,12,31)
#self.SetEndDate(2006,12,31)
#self.SetEndDate(2009,12,31)
#self.SetEndDate(2013,12,31)
#self.SetEndDate(2012,12,31)
#self.SetEndDate(2015,6,24)
#self.SetEndDate(datetime.now())
#self.SetEndDate(2019,10,10) #Last PiData Date
self.simYears = 20
self.simEndDate = datetime(self.simStartYear+self.simYears, 1, 1, 0, 0) #Use None if not applicable
self.SetEndDate(min(self.simEndDate + timedelta(days=30), datetime(2019,10,10)))
#self.Portfolio.SetAccountCurrency("EUR")
self.SetCash(100000)
'''Resolution'''
self.mainResolution = self.myHelpers.MyResolution()
self.UniverseSettings.Resolution = self.mainResolution
'''WarmUp'''
self.SetWarmUp(self.myHelpers.WarUpDays())
#Add chartSymbol and Set Tradable Property!
self.chartTicker = "DIA" #"QQQ"
self.AddEquity(self.chartTicker, self.mainResolution)
self.chartSymbol = self.Securities[self.chartTicker].Symbol
self.Securities[self.chartSymbol].SetDataNormalizationMode(self.myDataNormalizationMode)
self.myHelpers.AddSymbolDict(self.chartSymbol, self.myStrategyClassList[0], self.myVaR)
self.mySymbolDict[self.chartSymbol].posEnabled = False
#Add Benchmark Symbol that is Not Tradable
self.benchmarkTicker = "MDY" #IWV:iShares Russell 3000 ETF, IWM Russell 2000 ETF: small cap part of R3000, MDY S&P MidCap 400 Index
self.AddEquity(self.benchmarkTicker, self.mainResolution)
self.benchmarkSymbol = self.Securities[self.benchmarkTicker].Symbol
self.myHelpers.AddSymbolDict(self.benchmarkSymbol, self.myStrategyClassList[0], self.myVaR)
self.mySymbolDict[self.benchmarkSymbol].posEnabled = False
self.SetBenchmark(self.benchmarkSymbol)
'''Charts and Stats instantiation'''
self.myCharts = MyCharts (self, self.chartSymbol, backtestUpdateHours=1)
self.myStats = MyStats (self)
self.MyDebug(" ---- Initialize() Finished")
return
'''
AFTER WARMUP
'''
def OnWarmupFinished (self):
self.myHelpers.MyOnWarmupFinished()
return
'''
ON DATA
'''
def OnData(self, data):
self.myHelpers.MyOnData(data)
#if self.LiveMode and not self.IsWarmingUp: self.MyDebug(' pendingFlipPositions:' +str(len(self.myPositionManager.pendingFlipPositions)))
return
'''
ORDEREVENT HANDLER
'''
def OnOrderEvent(self, OrderEvent):
self.myPositionManagerB.MyOrderEventHandler(OrderEvent)
return
'''
AFTER BACKTEST
'''
def OnEndOfAlgorithm(self):
if True and environment=="Lean" and not self.LiveMode:
MySIMPosition.SaveData(self)
self.myStats.PrintStrategyTradeStats()
return
'''
DEBUG
'''
def MyDebug(self, debugString):
message = str(self.Time) + debugString
if self.log == 0:
self.Debug(message)
elif self.log == 1:
self.Debug(message)
self.Log(message)
elif self.log == 2:
self.Log(message)
| 37.435897
| 206
| 0.67
|
581fb6e15155360cb62e8226cbb36d61a3ffa745
| 3,096
|
py
|
Python
|
cohesity_management_sdk/models/protection_run_response.py
|
sachinthakare-cohesity/management-sdk-python
|
c95f67b7d387d5bab8392be43190e598280ae7b5
|
[
"MIT"
] | null | null | null |
cohesity_management_sdk/models/protection_run_response.py
|
sachinthakare-cohesity/management-sdk-python
|
c95f67b7d387d5bab8392be43190e598280ae7b5
|
[
"MIT"
] | null | null | null |
cohesity_management_sdk/models/protection_run_response.py
|
sachinthakare-cohesity/management-sdk-python
|
c95f67b7d387d5bab8392be43190e598280ae7b5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.latest_protection_job_run_information
class ProtectionRunResponse(object):
"""Implementation of the 'ProtectionRunResponse' model.
Specifies the information about the Protection Runs across all snapshot
target locations.
Attributes:
archival_runs (list of LatestProtectionJobRunInformation): Specifies
the list of archival job information.
backup_runs (list of LatestProtectionJobRunInformation): Specifies the
list of local backup job information.
replication_runs (list of LatestProtectionJobRunInformation):
Specifies the list of replication job information.
"""
# Create a mapping from Model property names to API property names
_names = {
"archival_runs":'archivalRuns',
"backup_runs":'backupRuns',
"replication_runs":'replicationRuns'
}
def __init__(self,
archival_runs=None,
backup_runs=None,
replication_runs=None):
"""Constructor for the ProtectionRunResponse class"""
# Initialize members of the class
self.archival_runs = archival_runs
self.backup_runs = backup_runs
self.replication_runs = replication_runs
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
archival_runs = None
if dictionary.get('archivalRuns') != None:
archival_runs = list()
for structure in dictionary.get('archivalRuns'):
archival_runs.append(cohesity_management_sdk.models.latest_protection_job_run_information.LatestProtectionJobRunInformation.from_dictionary(structure))
backup_runs = None
if dictionary.get('backupRuns') != None:
backup_runs = list()
for structure in dictionary.get('backupRuns'):
backup_runs.append(cohesity_management_sdk.models.latest_protection_job_run_information.LatestProtectionJobRunInformation.from_dictionary(structure))
replication_runs = None
if dictionary.get('replicationRuns') != None:
replication_runs = list()
for structure in dictionary.get('replicationRuns'):
replication_runs.append(cohesity_management_sdk.models.latest_protection_job_run_information.LatestProtectionJobRunInformation.from_dictionary(structure))
# Return an object of this model
return cls(archival_runs,
backup_runs,
replication_runs)
| 37.756098
| 170
| 0.67345
|
c86067c1f0100089dac79cba99527bcb107d4e3b
| 533
|
py
|
Python
|
pos_product_lot/__manifest__.py
|
ShaheenHossain/itpp-labs_pos-addons
|
8c5047af10447eb3d137c84111127fae1a8970b6
|
[
"MIT"
] | null | null | null |
pos_product_lot/__manifest__.py
|
ShaheenHossain/itpp-labs_pos-addons
|
8c5047af10447eb3d137c84111127fae1a8970b6
|
[
"MIT"
] | null | null | null |
pos_product_lot/__manifest__.py
|
ShaheenHossain/itpp-labs_pos-addons
|
8c5047af10447eb3d137c84111127fae1a8970b6
|
[
"MIT"
] | 4
|
2020-08-25T01:49:14.000Z
|
2021-04-04T10:29:04.000Z
|
# -*- coding: utf-8 -*-
{
"name": "Product lot in POS",
"version": "10.0.1.0.3",
"author": "IT-Projects LLC, Ivan Yelizariev",
"license": "Other OSI approved licence", # MIT
"category": "Point Of Sale",
"website": "https://twitter.com/yelizariev",
"images": ["images/screenshot.png"],
"price": 9.00,
"currency": "EUR",
"depends": ["product_lot", "pos_product_available"],
"data": ["data.xml"],
"qweb": ["static/src/xml/pos.xml"],
"installable": True,
"auto_install": False,
}
| 29.611111
| 56
| 0.575985
|
1a9d751dc6bd16b25e5a010ac031886fc1554bfb
| 61,485
|
py
|
Python
|
venv/Lib/site-packages/plotly/graph_objs/_scattercarpet.py
|
kteegarden/PollGraph
|
c16fbdbd7a1cd46da7d1cd7beb789d681c46ffaa
|
[
"Apache-2.0"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/graph_objs/_scattercarpet.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | null | null | null |
plotly/graph_objs/_scattercarpet.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
from plotly.basedatatypes import BaseTraceType
import copy
class Scattercarpet(BaseTraceType):
# a
# -
@property
def a(self):
"""
Sets the quantity of component `a` in each data point. If `a`,
`b`, and `c` are all provided, they need not be normalized,
only the relative values matter. If only two arrays are
provided they must be normalized to match `ternary<i>.sum`.
The 'a' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['a']
@a.setter
def a(self, val):
self['a'] = val
# asrc
# ----
@property
def asrc(self):
"""
Sets the source reference on plot.ly for a .
The 'asrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['asrc']
@asrc.setter
def asrc(self, val):
self['asrc'] = val
# b
# -
@property
def b(self):
"""
Sets the quantity of component `a` in each data point. If `a`,
`b`, and `c` are all provided, they need not be normalized,
only the relative values matter. If only two arrays are
provided they must be normalized to match `ternary<i>.sum`.
The 'b' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['b']
@b.setter
def b(self, val):
self['b'] = val
# bsrc
# ----
@property
def bsrc(self):
"""
Sets the source reference on plot.ly for b .
The 'bsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['bsrc']
@bsrc.setter
def bsrc(self, val):
self['bsrc'] = val
# carpet
# ------
@property
def carpet(self):
"""
An identifier for this carpet, so that `scattercarpet` and
`scattercontour` traces can specify a carpet plot on which they
lie
The 'carpet' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['carpet']
@carpet.setter
def carpet(self, val):
self['carpet'] = val
# connectgaps
# -----------
@property
def connectgaps(self):
"""
Determines whether or not gaps (i.e. {nan} or missing values)
in the provided data arrays are connected.
The 'connectgaps' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['connectgaps']
@connectgaps.setter
def connectgaps(self, val):
self['connectgaps'] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['customdata']
@customdata.setter
def customdata(self, val):
self['customdata'] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on plot.ly for customdata .
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['customdatasrc']
@customdatasrc.setter
def customdatasrc(self, val):
self['customdatasrc'] = val
# fill
# ----
@property
def fill(self):
"""
Sets the area to fill with a solid color. Use with `fillcolor`
if not "none". scatterternary has a subset of the options
available to scatter. "toself" connects the endpoints of the
trace (or each segment of the trace if it has gaps) into a
closed shape. "tonext" fills the space between two traces if
one completely encloses the other (eg consecutive contour
lines), and behaves like "toself" if there is no trace before
it. "tonext" should not be used if one trace does not enclose
the other.
The 'fill' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'toself', 'tonext']
Returns
-------
Any
"""
return self['fill']
@fill.setter
def fill(self, val):
self['fill'] = val
# fillcolor
# ---------
@property
def fillcolor(self):
"""
Sets the fill color. Defaults to a half-transparent variant of
the line color, marker color, or marker line color, whichever
is available.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['fillcolor']
@fillcolor.setter
def fillcolor(self, val):
self['fillcolor'] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['a', 'b', 'text', 'name'] joined with '+' characters
(e.g. 'a+b')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self['hoverinfo']
@hoverinfo.setter
def hoverinfo(self, val):
self['hoverinfo'] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on plot.ly for hoverinfo .
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['hoverinfosrc']
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self['hoverinfosrc'] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of plotly.graph_objs.scattercarpet.Hoverlabel
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on plot.ly for
bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on plot.ly for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the length (in number of characters) of
the trace name in the hover labels for this
trace. -1 shows the whole name regardless of
length. 0-3 shows the first 0-3 characters, and
an integer >3 will show the whole name if it is
less than that many characters, but if it is
longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for
namelength .
Returns
-------
plotly.graph_objs.scattercarpet.Hoverlabel
"""
return self['hoverlabel']
@hoverlabel.setter
def hoverlabel(self, val):
self['hoverlabel'] = val
# hoveron
# -------
@property
def hoveron(self):
"""
Do the hover effects highlight individual points (markers or
line points) or do they highlight filled regions? If the fill
is "toself" or "tonext" and there are no markers or text, then
the default is "fills", otherwise it is "points".
The 'hoveron' property is a flaglist and may be specified
as a string containing:
- Any combination of ['points', 'fills'] joined with '+' characters
(e.g. 'points+fills')
Returns
-------
Any
"""
return self['hoveron']
@hoveron.setter
def hoveron(self, val):
self['hoveron'] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['ids']
@ids.setter
def ids(self, val):
self['ids'] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on plot.ly for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['idssrc']
@idssrc.setter
def idssrc(self, val):
self['idssrc'] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['legendgroup']
@legendgroup.setter
def legendgroup(self, val):
self['legendgroup'] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of plotly.graph_objs.scattercarpet.Line
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
shape
Determines the line shape. With "spline" the
lines are drawn using spline interpolation. The
other available values correspond to step-wise
line shapes.
smoothing
Has an effect only if `shape` is set to
"spline" Sets the amount of smoothing. 0
corresponds to no smoothing (equivalent to a
"linear" shape).
width
Sets the line width (in px).
Returns
-------
plotly.graph_objs.scattercarpet.Line
"""
return self['line']
@line.setter
def line(self, val):
self['line'] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of plotly.graph_objs.scattercarpet.Marker
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
colorbar
plotly.graph_objs.scattercarpet.marker.ColorBar
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color
space, use`marker.cmin` and `marker.cmax`.
Alternatively, `colorscale` may be a palette
name string of the following list: Greys,YlGnBu
,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,R
ainbow,Portland,Jet,Hot,Blackbody,Earth,Electri
c,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color
.
gradient
plotly.graph_objs.scattercarpet.marker.Gradient
instance or dict with compatible properties
line
plotly.graph_objs.scattercarpet.marker.Line
instance or dict with compatible properties
maxdisplayed
Sets a maximum number of points to be drawn on
the graph. 0 corresponds to no limit.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on plot.ly for
opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size
.
symbol
Sets the marker symbol type. Adding 100 is
equivalent to appending "-open" to a symbol
name. Adding 200 is equivalent to appending
"-dot" to a symbol name. Adding 300 is
equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on plot.ly for
symbol .
Returns
-------
plotly.graph_objs.scattercarpet.Marker
"""
return self['marker']
@marker.setter
def marker(self, val):
self['marker'] = val
# mode
# ----
@property
def mode(self):
"""
Determines the drawing mode for this scatter trace. If the
provided `mode` includes "text" then the `text` elements appear
at the coordinates. Otherwise, the `text` elements appear on
hover. If there are less than 20 points and the trace is not
stacked then the default is "lines+markers". Otherwise,
"lines".
The 'mode' property is a flaglist and may be specified
as a string containing:
- Any combination of ['lines', 'markers', 'text'] joined with '+' characters
(e.g. 'lines+markers')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self['mode']
@mode.setter
def mode(self, val):
self['mode'] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['name']
@name.setter
def name(self, val):
self['name'] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['opacity']
@opacity.setter
def opacity(self, val):
self['opacity'] = val
# selected
# --------
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of plotly.graph_objs.scattercarpet.Selected
- A dict of string/value properties that will be passed
to the Selected constructor
Supported dict properties:
marker
plotly.graph_objs.scattercarpet.selected.Marker
instance or dict with compatible properties
textfont
plotly.graph_objs.scattercarpet.selected.Textfo
nt instance or dict with compatible properties
Returns
-------
plotly.graph_objs.scattercarpet.Selected
"""
return self['selected']
@selected.setter
def selected(self, val):
self['selected'] = val
# selectedpoints
# --------------
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self['selectedpoints']
@selectedpoints.setter
def selectedpoints(self, val):
self['selectedpoints'] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showlegend']
@showlegend.setter
def showlegend(self, val):
self['showlegend'] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of plotly.graph_objs.scattercarpet.Stream
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.
Returns
-------
plotly.graph_objs.scattercarpet.Stream
"""
return self['stream']
@stream.setter
def stream(self, val):
self['stream'] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each (a,b,c) point. If a
single string, the same string appears over all the data
points. If an array of strings, the items are mapped in order
to the the data points in (a,b,c).
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['text']
@text.setter
def text(self, val):
self['text'] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the text font.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of plotly.graph_objs.scattercarpet.Textfont
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly.graph_objs.scattercarpet.Textfont
"""
return self['textfont']
@textfont.setter
def textfont(self, val):
self['textfont'] = val
# textposition
# ------------
@property
def textposition(self):
"""
Sets the positions of the `text` elements with respects to the
(x,y) coordinates.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top left', 'top center', 'top right', 'middle left',
'middle center', 'middle right', 'bottom left', 'bottom
center', 'bottom right']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self['textposition']
@textposition.setter
def textposition(self, val):
self['textposition'] = val
# textpositionsrc
# ---------------
@property
def textpositionsrc(self):
"""
Sets the source reference on plot.ly for textposition .
The 'textpositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['textpositionsrc']
@textpositionsrc.setter
def textpositionsrc(self, val):
self['textpositionsrc'] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on plot.ly for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['textsrc']
@textsrc.setter
def textsrc(self, val):
self['textsrc'] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['uid']
@uid.setter
def uid(self, val):
self['uid'] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self['uirevision']
@uirevision.setter
def uirevision(self, val):
self['uirevision'] = val
# unselected
# ----------
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of plotly.graph_objs.scattercarpet.Unselected
- A dict of string/value properties that will be passed
to the Unselected constructor
Supported dict properties:
marker
plotly.graph_objs.scattercarpet.unselected.Mark
er instance or dict with compatible properties
textfont
plotly.graph_objs.scattercarpet.unselected.Text
font instance or dict with compatible
properties
Returns
-------
plotly.graph_objs.scattercarpet.Unselected
"""
return self['unselected']
@unselected.setter
def unselected(self, val):
self['unselected'] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self['visible']
@visible.setter
def visible(self, val):
self['visible'] = val
# xaxis
# -----
@property
def xaxis(self):
"""
Sets a reference between this trace's x coordinates and a 2D
cartesian x axis. If "x" (the default value), the x coordinates
refer to `layout.xaxis`. If "x2", the x coordinates refer to
`layout.xaxis2`, and so on.
The 'xaxis' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
str
"""
return self['xaxis']
@xaxis.setter
def xaxis(self, val):
self['xaxis'] = val
# yaxis
# -----
@property
def yaxis(self):
"""
Sets a reference between this trace's y coordinates and a 2D
cartesian y axis. If "y" (the default value), the y coordinates
refer to `layout.yaxis`. If "y2", the y coordinates refer to
`layout.yaxis2`, and so on.
The 'yaxis' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
str
"""
return self['yaxis']
@yaxis.setter
def yaxis(self, val):
self['yaxis'] = val
# type
# ----
@property
def type(self):
return self._props['type']
# property parent name
# --------------------
@property
def _parent_path_str(self):
return ''
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
a
Sets the quantity of component `a` in each data point.
If `a`, `b`, and `c` are all provided, they need not be
normalized, only the relative values matter. If only
two arrays are provided they must be normalized to
match `ternary<i>.sum`.
asrc
Sets the source reference on plot.ly for a .
b
Sets the quantity of component `a` in each data point.
If `a`, `b`, and `c` are all provided, they need not be
normalized, only the relative values matter. If only
two arrays are provided they must be normalized to
match `ternary<i>.sum`.
bsrc
Sets the source reference on plot.ly for b .
carpet
An identifier for this carpet, so that `scattercarpet`
and `scattercontour` traces can specify a carpet plot
on which they lie
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on plot.ly for customdata .
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". scatterternary has a subset
of the options available to scatter. "toself" connects
the endpoints of the trace (or each segment of the
trace if it has gaps) into a closed shape. "tonext"
fills the space between two traces if one completely
encloses the other (eg consecutive contour lines), and
behaves like "toself" if there is no trace before it.
"tonext" should not be used if one trace does not
enclose the other.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on plot.ly for hoverinfo .
hoverlabel
plotly.graph_objs.scattercarpet.Hoverlabel instance or
dict with compatible properties
hoveron
Do the hover effects highlight individual points
(markers or line points) or do they highlight filled
regions? If the fill is "toself" or "tonext" and there
are no markers or text, then the default is "fills",
otherwise it is "points".
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on plot.ly for ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
line
plotly.graph_objs.scattercarpet.Line instance or dict
with compatible properties
marker
plotly.graph_objs.scattercarpet.Marker instance or dict
with compatible properties
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover. If there are less than
20 points and the trace is not stacked then the default
is "lines+markers". Otherwise, "lines".
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
plotly.graph_objs.scattercarpet.Selected instance or
dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
plotly.graph_objs.scattercarpet.Stream instance or dict
with compatible properties
text
Sets text elements associated with each (a,b,c) point.
If a single string, the same string appears over all
the data points. If an array of strings, the items are
mapped in order to the the data points in (a,b,c).
textfont
Sets the text font.
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textpositionsrc
Sets the source reference on plot.ly for textposition
.
textsrc
Sets the source reference on plot.ly for text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
plotly.graph_objs.scattercarpet.Unselected instance or
dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
"""
def __init__(
self,
arg=None,
a=None,
asrc=None,
b=None,
bsrc=None,
carpet=None,
connectgaps=None,
customdata=None,
customdatasrc=None,
fill=None,
fillcolor=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hoveron=None,
ids=None,
idssrc=None,
legendgroup=None,
line=None,
marker=None,
mode=None,
name=None,
opacity=None,
selected=None,
selectedpoints=None,
showlegend=None,
stream=None,
text=None,
textfont=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
xaxis=None,
yaxis=None,
**kwargs
):
"""
Construct a new Scattercarpet object
Plots a scatter trace on either the first carpet axis or the
carpet axis with a matching `carpet` attribute.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.Scattercarpet
a
Sets the quantity of component `a` in each data point.
If `a`, `b`, and `c` are all provided, they need not be
normalized, only the relative values matter. If only
two arrays are provided they must be normalized to
match `ternary<i>.sum`.
asrc
Sets the source reference on plot.ly for a .
b
Sets the quantity of component `a` in each data point.
If `a`, `b`, and `c` are all provided, they need not be
normalized, only the relative values matter. If only
two arrays are provided they must be normalized to
match `ternary<i>.sum`.
bsrc
Sets the source reference on plot.ly for b .
carpet
An identifier for this carpet, so that `scattercarpet`
and `scattercontour` traces can specify a carpet plot
on which they lie
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on plot.ly for customdata .
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". scatterternary has a subset
of the options available to scatter. "toself" connects
the endpoints of the trace (or each segment of the
trace if it has gaps) into a closed shape. "tonext"
fills the space between two traces if one completely
encloses the other (eg consecutive contour lines), and
behaves like "toself" if there is no trace before it.
"tonext" should not be used if one trace does not
enclose the other.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on plot.ly for hoverinfo .
hoverlabel
plotly.graph_objs.scattercarpet.Hoverlabel instance or
dict with compatible properties
hoveron
Do the hover effects highlight individual points
(markers or line points) or do they highlight filled
regions? If the fill is "toself" or "tonext" and there
are no markers or text, then the default is "fills",
otherwise it is "points".
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on plot.ly for ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
line
plotly.graph_objs.scattercarpet.Line instance or dict
with compatible properties
marker
plotly.graph_objs.scattercarpet.Marker instance or dict
with compatible properties
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover. If there are less than
20 points and the trace is not stacked then the default
is "lines+markers". Otherwise, "lines".
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
plotly.graph_objs.scattercarpet.Selected instance or
dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
plotly.graph_objs.scattercarpet.Stream instance or dict
with compatible properties
text
Sets text elements associated with each (a,b,c) point.
If a single string, the same string appears over all
the data points. If an array of strings, the items are
mapped in order to the the data points in (a,b,c).
textfont
Sets the text font.
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textpositionsrc
Sets the source reference on plot.ly for textposition
.
textsrc
Sets the source reference on plot.ly for text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
plotly.graph_objs.scattercarpet.Unselected instance or
dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
Returns
-------
Scattercarpet
"""
super(Scattercarpet, self).__init__('scattercarpet')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Scattercarpet
constructor must be a dict or
an instance of plotly.graph_objs.Scattercarpet"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators import (scattercarpet as v_scattercarpet)
# Initialize validators
# ---------------------
self._validators['a'] = v_scattercarpet.AValidator()
self._validators['asrc'] = v_scattercarpet.AsrcValidator()
self._validators['b'] = v_scattercarpet.BValidator()
self._validators['bsrc'] = v_scattercarpet.BsrcValidator()
self._validators['carpet'] = v_scattercarpet.CarpetValidator()
self._validators['connectgaps'] = v_scattercarpet.ConnectgapsValidator(
)
self._validators['customdata'] = v_scattercarpet.CustomdataValidator()
self._validators['customdatasrc'
] = v_scattercarpet.CustomdatasrcValidator()
self._validators['fill'] = v_scattercarpet.FillValidator()
self._validators['fillcolor'] = v_scattercarpet.FillcolorValidator()
self._validators['hoverinfo'] = v_scattercarpet.HoverinfoValidator()
self._validators['hoverinfosrc'
] = v_scattercarpet.HoverinfosrcValidator()
self._validators['hoverlabel'] = v_scattercarpet.HoverlabelValidator()
self._validators['hoveron'] = v_scattercarpet.HoveronValidator()
self._validators['ids'] = v_scattercarpet.IdsValidator()
self._validators['idssrc'] = v_scattercarpet.IdssrcValidator()
self._validators['legendgroup'] = v_scattercarpet.LegendgroupValidator(
)
self._validators['line'] = v_scattercarpet.LineValidator()
self._validators['marker'] = v_scattercarpet.MarkerValidator()
self._validators['mode'] = v_scattercarpet.ModeValidator()
self._validators['name'] = v_scattercarpet.NameValidator()
self._validators['opacity'] = v_scattercarpet.OpacityValidator()
self._validators['selected'] = v_scattercarpet.SelectedValidator()
self._validators['selectedpoints'
] = v_scattercarpet.SelectedpointsValidator()
self._validators['showlegend'] = v_scattercarpet.ShowlegendValidator()
self._validators['stream'] = v_scattercarpet.StreamValidator()
self._validators['text'] = v_scattercarpet.TextValidator()
self._validators['textfont'] = v_scattercarpet.TextfontValidator()
self._validators['textposition'
] = v_scattercarpet.TextpositionValidator()
self._validators['textpositionsrc'
] = v_scattercarpet.TextpositionsrcValidator()
self._validators['textsrc'] = v_scattercarpet.TextsrcValidator()
self._validators['uid'] = v_scattercarpet.UidValidator()
self._validators['uirevision'] = v_scattercarpet.UirevisionValidator()
self._validators['unselected'] = v_scattercarpet.UnselectedValidator()
self._validators['visible'] = v_scattercarpet.VisibleValidator()
self._validators['xaxis'] = v_scattercarpet.XAxisValidator()
self._validators['yaxis'] = v_scattercarpet.YAxisValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('a', None)
self['a'] = a if a is not None else _v
_v = arg.pop('asrc', None)
self['asrc'] = asrc if asrc is not None else _v
_v = arg.pop('b', None)
self['b'] = b if b is not None else _v
_v = arg.pop('bsrc', None)
self['bsrc'] = bsrc if bsrc is not None else _v
_v = arg.pop('carpet', None)
self['carpet'] = carpet if carpet is not None else _v
_v = arg.pop('connectgaps', None)
self['connectgaps'] = connectgaps if connectgaps is not None else _v
_v = arg.pop('customdata', None)
self['customdata'] = customdata if customdata is not None else _v
_v = arg.pop('customdatasrc', None)
self['customdatasrc'
] = customdatasrc if customdatasrc is not None else _v
_v = arg.pop('fill', None)
self['fill'] = fill if fill is not None else _v
_v = arg.pop('fillcolor', None)
self['fillcolor'] = fillcolor if fillcolor is not None else _v
_v = arg.pop('hoverinfo', None)
self['hoverinfo'] = hoverinfo if hoverinfo is not None else _v
_v = arg.pop('hoverinfosrc', None)
self['hoverinfosrc'] = hoverinfosrc if hoverinfosrc is not None else _v
_v = arg.pop('hoverlabel', None)
self['hoverlabel'] = hoverlabel if hoverlabel is not None else _v
_v = arg.pop('hoveron', None)
self['hoveron'] = hoveron if hoveron is not None else _v
_v = arg.pop('ids', None)
self['ids'] = ids if ids is not None else _v
_v = arg.pop('idssrc', None)
self['idssrc'] = idssrc if idssrc is not None else _v
_v = arg.pop('legendgroup', None)
self['legendgroup'] = legendgroup if legendgroup is not None else _v
_v = arg.pop('line', None)
self['line'] = line if line is not None else _v
_v = arg.pop('marker', None)
self['marker'] = marker if marker is not None else _v
_v = arg.pop('mode', None)
self['mode'] = mode if mode is not None else _v
_v = arg.pop('name', None)
self['name'] = name if name is not None else _v
_v = arg.pop('opacity', None)
self['opacity'] = opacity if opacity is not None else _v
_v = arg.pop('selected', None)
self['selected'] = selected if selected is not None else _v
_v = arg.pop('selectedpoints', None)
self['selectedpoints'
] = selectedpoints if selectedpoints is not None else _v
_v = arg.pop('showlegend', None)
self['showlegend'] = showlegend if showlegend is not None else _v
_v = arg.pop('stream', None)
self['stream'] = stream if stream is not None else _v
_v = arg.pop('text', None)
self['text'] = text if text is not None else _v
_v = arg.pop('textfont', None)
self['textfont'] = textfont if textfont is not None else _v
_v = arg.pop('textposition', None)
self['textposition'] = textposition if textposition is not None else _v
_v = arg.pop('textpositionsrc', None)
self['textpositionsrc'
] = textpositionsrc if textpositionsrc is not None else _v
_v = arg.pop('textsrc', None)
self['textsrc'] = textsrc if textsrc is not None else _v
_v = arg.pop('uid', None)
self['uid'] = uid if uid is not None else _v
_v = arg.pop('uirevision', None)
self['uirevision'] = uirevision if uirevision is not None else _v
_v = arg.pop('unselected', None)
self['unselected'] = unselected if unselected is not None else _v
_v = arg.pop('visible', None)
self['visible'] = visible if visible is not None else _v
_v = arg.pop('xaxis', None)
self['xaxis'] = xaxis if xaxis is not None else _v
_v = arg.pop('yaxis', None)
self['yaxis'] = yaxis if yaxis is not None else _v
# Read-only literals
# ------------------
from _plotly_utils.basevalidators import LiteralValidator
self._props['type'] = 'scattercarpet'
self._validators['type'] = LiteralValidator(
plotly_name='type',
parent_name='scattercarpet',
val='scattercarpet'
)
arg.pop('type', None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 36.511283
| 86
| 0.56513
|
75e7e9133d396fb11af2fd6093a957e28ef93f25
| 872
|
py
|
Python
|
adeft_app/__init__.py
|
indralab/adeft_app
|
3f20d04791f598e089bb59ca1ca133d5d51d6c28
|
[
"BSD-2-Clause"
] | null | null | null |
adeft_app/__init__.py
|
indralab/adeft_app
|
3f20d04791f598e089bb59ca1ca133d5d51d6c28
|
[
"BSD-2-Clause"
] | null | null | null |
adeft_app/__init__.py
|
indralab/adeft_app
|
3f20d04791f598e089bb59ca1ca133d5d51d6c28
|
[
"BSD-2-Clause"
] | 2
|
2019-05-10T19:58:13.000Z
|
2019-05-14T14:25:45.000Z
|
import os
from flask import Flask, render_template
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATA=os.path.join(app.instance_path, 'data'),
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
from . import ground, fix
@app.route('/')
def main():
return render_template('index.jinja2')
app.register_blueprint(ground.bp)
app.register_blueprint(fix.bp)
return app
| 24.914286
| 66
| 0.661697
|
95877f39fba2595ccdce10d4458a69d2e7ad7093
| 27
|
py
|
Python
|
__init__.py
|
wpm216/fileParser
|
916958f6d544dd748dd0dd8701c585fefb5074c8
|
[
"MIT"
] | null | null | null |
__init__.py
|
wpm216/fileParser
|
916958f6d544dd748dd0dd8701c585fefb5074c8
|
[
"MIT"
] | null | null | null |
__init__.py
|
wpm216/fileParser
|
916958f6d544dd748dd0dd8701c585fefb5074c8
|
[
"MIT"
] | null | null | null |
from .src.File import File
| 13.5
| 26
| 0.777778
|
e00e5bbd17e52cf000b2a9d972913c4c2f597b88
| 2,095
|
py
|
Python
|
main.py
|
lexerpars/odoo_hacking
|
b0eb8edd76657a8c49d8168c250bff81b2819518
|
[
"Apache-2.0"
] | 2
|
2022-02-03T01:54:59.000Z
|
2022-02-13T04:11:49.000Z
|
main.py
|
lexerpars/odoo_hacking
|
b0eb8edd76657a8c49d8168c250bff81b2819518
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
lexerpars/odoo_hacking
|
b0eb8edd76657a8c49d8168c250bff81b2819518
|
[
"Apache-2.0"
] | 2
|
2022-01-19T16:24:57.000Z
|
2022-02-03T01:55:29.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 13 21:03:02 2022
@author: Lexerpars
"""
import argparse
from models import auth
from models import options
from subprocess import call
def parser():
pars = argparse.ArgumentParser(description='Script hacking ODOO')
pars.add_argument('host',help='URL o HOST de ODOO')
arguments = pars.parse_args()
return arguments
def main(arguments):
call('color a' ,shell=True)
welcome = '''
____ _ _ _ _ _
/ __ \ | | | | | | | | (_)
| | | | __| | ___ ___ ______| |__| | __ _ ___| | ___ _ __ __ _
| | | |/ _` |/ _ \ / _ \______| __ |/ _` |/ __| |/ / | '_ \ / _` |
| |__| | (_| | (_) | (_) | | | | | (_| | (__| <| | | | | (_| |
\____/ \__,_|\___/ \___/ |_| |_|\__,_|\___|_|\_\_|_| |_|\__, |
/ ____| | | __/ |
| | __ _ _ __ _| |_ _____ ____ _ _ __ ___ ___ |___/
| | |_ | | | |/ _` | __/ _ \ \ /\ / / _` | '__/ _ \/ __|
| |__| | |_| | (_| | || __/\ V V / (_| | | | __/\__ \
\_____|\__,_|\__,_|\__\___| \_/\_/ \__,_|_| \___||___/
'''
print(welcome)
print('[*] Target :',arguments.host)
conexion = auth.Conexion(host=arguments.host)
version = conexion.version()
if version:
dbs = conexion.list_db()
registro = conexion.registro_odoo()
apps = False
if not dbs or len(dbs) <= 1:
apps = conexion.apps_default_info()
if dbs:
op = input('Quiere probar las credenciales basicas [Si] o [No] ')
if op == 'Si':
conexion.auth_basic(dbs)
menu = options.Menu(arguments.host)
menu.MenuOpciones(version)
if __name__ == '__main__':
arguments = parser()
main(arguments)
| 36.12069
| 120
| 0.41957
|
14418aa329e9224f416b69c1b84e73a94afc4e03
| 92
|
py
|
Python
|
2015/02/pew-brian-williams/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 14
|
2015-05-08T13:41:51.000Z
|
2021-02-24T12:34:55.000Z
|
2015/02/pew-brian-williams/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | null | null | null |
2015/02/pew-brian-williams/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 7
|
2015-04-04T04:45:54.000Z
|
2021-02-18T11:12:48.000Z
|
#!/usr/bin/env python
COPY_GOOGLE_DOC_KEY = '1Jzl-3MUzTptF0VJUnu0q_DujYS0cdKjU6PYJgcXlOFA'
| 23
| 68
| 0.836957
|
5216858f859463d3632f25f41c6a062d24091508
| 6,121
|
py
|
Python
|
tests/unit/transport/tcp_test.py
|
silviud/salt
|
9d35ea66428a030d00ef1a2f3b93ecfe90be023c
|
[
"Apache-2.0"
] | 1
|
2018-09-19T22:42:54.000Z
|
2018-09-19T22:42:54.000Z
|
tests/unit/transport/tcp_test.py
|
silviud/salt
|
9d35ea66428a030d00ef1a2f3b93ecfe90be023c
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/transport/tcp_test.py
|
silviud/salt
|
9d35ea66428a030d00ef1a2f3b93ecfe90be023c
|
[
"Apache-2.0"
] | 1
|
2019-07-23T13:42:23.000Z
|
2019-07-23T13:42:23.000Z
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Thomas Jackson <jacksontj.89@gmail.com>`
'''
# Import python libs
from __future__ import absolute_import
import os
import threading
import tornado.gen
import tornado.ioloop
from tornado.testing import AsyncTestCase
import salt.config
import salt.ext.six as six
import salt.utils
import salt.transport.server
import salt.transport.client
import salt.exceptions
# Import Salt Testing libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../')
import integration
# Import Salt libs
from unit.transport.req_test import ReqChannelMixin
from unit.transport.pub_test import PubChannelMixin
# TODO: move to a library?
def get_config_file_path(filename):
return os.path.join(integration.TMP, 'config', filename)
class BaseTCPReqCase(TestCase):
'''
Test the req server/client pair
'''
@classmethod
def setUpClass(cls):
cls.master_opts = salt.config.master_config(get_config_file_path('master'))
cls.master_opts.update({
'transport': 'tcp',
'auto_accept': True,
})
cls.minion_opts = salt.config.minion_config(get_config_file_path('minion'))
cls.minion_opts.update({
'transport': 'tcp',
'master_uri': 'tcp://127.0.0.1:{0}'.format(cls.minion_opts['master_port']),
})
cls.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
cls.server_channel = salt.transport.server.ReqServerChannel.factory(cls.master_opts)
cls.server_channel.pre_fork(cls.process_manager)
cls.io_loop = tornado.ioloop.IOLoop()
cls.io_loop.make_current()
cls.server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(target=cls.io_loop.start)
cls.server_thread.daemon = True
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
cls.io_loop.stop()
cls.server_thread.join()
cls.process_manager.kill_children()
cls.server_channel.close()
del cls.server_channel
@skipIf(salt.utils.is_darwin(), 'hanging test suite on MacOS')
class ClearReqTestCases(BaseTCPReqCase, ReqChannelMixin):
'''
Test all of the clear msg stuff
'''
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_opts, crypt='clear')
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send_clear'}))
@skipIf(salt.utils.is_darwin(), 'hanging test suite on MacOS')
class AESReqTestCases(BaseTCPReqCase, ReqChannelMixin):
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_opts)
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send'}))
# TODO: make failed returns have a specific framing so we can raise the same exception
# on encrypted channels
def test_badload(self):
'''
Test a variety of bad requests, make sure that we get some sort of error
'''
msgs = ['', [], tuple()]
for msg in msgs:
with self.assertRaises(salt.exceptions.AuthenticationError):
ret = self.channel.send(msg)
class BaseTCPPubCase(AsyncTestCase):
'''
Test the req server/client pair
'''
@classmethod
def setUpClass(cls):
cls.master_opts = salt.config.master_config(get_config_file_path('master'))
cls.master_opts.update({
'transport': 'tcp',
'auto_accept': True,
})
cls.minion_opts = salt.config.minion_config(get_config_file_path('minion'))
cls.minion_opts.update({
'transport': 'tcp',
'master_ip': '127.0.0.1',
'auth_timeout': 1,
'master_uri': 'tcp://127.0.0.1:{0}'.format(cls.minion_opts['master_port']),
})
cls.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
cls.server_channel = salt.transport.server.PubServerChannel.factory(cls.master_opts)
cls.server_channel.pre_fork(cls.process_manager)
# we also require req server for auth
cls.req_server_channel = salt.transport.server.ReqServerChannel.factory(cls.master_opts)
cls.req_server_channel.pre_fork(cls.process_manager)
cls._server_io_loop = tornado.ioloop.IOLoop()
cls.req_server_channel.post_fork(cls._handle_payload, io_loop=cls._server_io_loop)
cls.server_thread = threading.Thread(target=cls._server_io_loop.start)
cls.server_thread.start()
@classmethod
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
return payload, {'fun': 'send_clear'}
@classmethod
def tearDownClass(cls):
cls._server_io_loop.stop()
cls.server_thread.join()
cls.process_manager.kill_children()
cls.req_server_channel.close()
del cls.req_server_channel
def setUp(self):
super(BaseTCPPubCase, self).setUp()
self._start_handlers = dict(self.io_loop._handlers)
def tearDown(self):
super(BaseTCPPubCase, self).tearDown()
failures = []
for k, v in six.iteritems(self.io_loop._handlers):
if self._start_handlers.get(k) != v:
failures.append((k, v))
if len(failures) > 0:
raise Exception('FDs still attached to the IOLoop: {0}'.format(failures))
@skipIf(True, 'Skip until we can devote time to fix this test')
class AsyncPubChannelTest(BaseTCPPubCase, PubChannelMixin):
'''
Tests around the publish system
'''
if __name__ == '__main__':
from integration import run_tests
run_tests(ClearReqTestCases, needs_daemon=False)
run_tests(AESReqTestCases, needs_daemon=False)
| 31.551546
| 96
| 0.670642
|
f911421191950f2e57ad4451197396281ffdfaf2
| 17,212
|
py
|
Python
|
hwt/synthesizer/rtlLevel/extract_part_drivers.py
|
ufo2011/hwt
|
2a32a3fc0c04cf3d6e78dce2dcd0d90dffc7eac1
|
[
"MIT"
] | 134
|
2018-02-28T05:06:31.000Z
|
2022-03-25T23:50:13.000Z
|
hwt/synthesizer/rtlLevel/extract_part_drivers.py
|
ufo2011/hwt
|
2a32a3fc0c04cf3d6e78dce2dcd0d90dffc7eac1
|
[
"MIT"
] | 32
|
2018-03-15T19:01:02.000Z
|
2021-07-02T14:00:06.000Z
|
hwt/synthesizer/rtlLevel/extract_part_drivers.py
|
ufo2011/hwt
|
2a32a3fc0c04cf3d6e78dce2dcd0d90dffc7eac1
|
[
"MIT"
] | 17
|
2018-09-16T12:54:01.000Z
|
2022-01-20T03:16:52.000Z
|
from itertools import islice
from typing import Dict, List, Tuple, Union, Optional, Sequence
from hwt.code import Concat
from hwt.doc_markers import internal
from hwt.hdl.operator import isConst
from hwt.hdl.statements.assignmentContainer import HdlAssignmentContainer
from hwt.hdl.statements.codeBlockContainer import HdlStmCodeBlockContainer
from hwt.hdl.statements.ifContainter import IfContainer
from hwt.hdl.statements.statement import HdlStatement
from hwt.hdl.statements.switchContainer import SwitchContainer
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.bitsVal import BitsVal
from hwt.hdl.types.defs import SLICE
from hwt.hdl.types.sliceVal import HSliceVal
from hwt.hdl.value import HValue
from hwt.pyUtils.uniqList import UniqList
from hwt.serializer.utils import RtlSignal_sort_key, HdlStatement_sort_key
from hwt.synthesizer.rtlLevel.constants import NOT_SPECIFIED
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal
def _format_indexes(indexes):
return tuple(
(int(i) + 1, int(i))
if isinstance(i, BitsVal) else
(int(i.val.start), int(i.val.stop))
for i in indexes)
@internal
def construct_tmp_dst_sig_for_slice(dst: RtlSignal,
indexes: List[Union[BitsVal, HSliceVal]],
src: Optional[RtlSignal],
is_signal_needed: bool) -> RtlSignal:
"""
Construct a tmp signal or value which will be used instead of slice from original signal
:param dst: a signal which slice we want to generate tmp signal for
:param indexes: a indexes to specify the slice of the dst
:param is_signal_needed: True if we need a signal which will we drive later, else returns HValue instance
resolved from default and nop value
"""
if is_signal_needed:
name = dst.name
def_val = dst.def_val
nop_val = dst._nop_val
for i in indexes:
def_val = def_val[i]
if nop_val is not NOT_SPECIFIED:
nop_val = nop_val[i]
if is_signal_needed:
dst = dst[i]
if isinstance(i, HSliceVal):
if int(i.val.step) == -1:
stop = int(i.val.stop)
start = int(i.val.start)
name = f"{name}_{start - 1:d}downto{stop:d}"
else:
raise NotImplementedError(i.val.step)
else:
_i = int(i)
name = f"{name:s}_{_i:d}"
if is_signal_needed:
tmp_sig = dst.ctx.sig(name, dst._dtype, def_val=def_val, nop_val=nop_val)
return tmp_sig
elif src is not None:
return src
elif nop_val is not NOT_SPECIFIED:
return nop_val
else:
return def_val
def resolve_splitpoints(s: RtlSignal, parts):
split_points = set()
add_split_point = split_points.add
for i, _, _ in parts:
if len(i) != 1:
raise NotImplementedError(s, i)
i = i[0]
if isinstance(i, BitsVal):
# index is normal integer
i = int(i)
add_split_point(i)
add_split_point(i + 1)
else:
# index is slice
assert isinstance(i, HSliceVal), (s, i)
add_split_point(int(i.val.start))
add_split_point(int(i.val.stop))
if isinstance(s._dtype, Bits):
# add boundary points in the case something is unconnected
add_split_point(0)
add_split_point(s._dtype.bit_length())
else:
raise NotImplementedError(s._dtype)
return split_points
class RtlNetlistPassExtractPartDrivers():
"""
Split parts of bit vectors so each segment has an unique variable.
.. code-block:: verilog
if (c0)
s[0] <= x;
if (c1)
s[1] <= y;
to
.. code-block:: verilog
wire s_0_tmp;
wire s_1_tmp;
assign s <= {s_1_tmp, s_0_tmp};
if (c0)
s_0_tmp <= x;
if (c1)
s_1_tmp <= y;
"""
@classmethod
def find_independent_slice_drivers(cls, stm: HdlStatement):
if isinstance(stm, HdlAssignmentContainer):
if stm.indexes and len(stm.indexes) == 1 and isinstance(stm.dst._dtype, Bits):
dst = stm.dst
for i in stm.indexes:
if not isConst(i):
return
can_directly_replace_with_src_expr = stm.parentStm is None
yield (
dst,
tuple(stm.indexes),
can_directly_replace_with_src_expr,
stm.src if can_directly_replace_with_src_expr else None
)
else:
for _stm in stm._iter_stms():
yield from cls.find_independent_slice_drivers(_stm)
@classmethod
def find_all_independent_slice_drivers(cls, statements: Sequence[HdlStatement]):
for stm in sorted(statements, key=HdlStatement_sort_key):
for s, indexes, can_directly_replace_with_src_expr, src in cls.find_independent_slice_drivers(stm):
yield s, indexes, can_directly_replace_with_src_expr, src
@classmethod
def _collect_indexes_on_variables(cls, statements: Sequence[HdlStatement]):
signal_parts = {}
for s, indexes, can_directly_replace_with_src_expr, src in cls.find_all_independent_slice_drivers(statements):
signal_parts.setdefault(s, []).append((indexes, can_directly_replace_with_src_expr, src))
return signal_parts
@classmethod
def resolve_final_parts_from_splitpoints_and_parts(cls, signal_parts):
final_signal_parts: Dict[RtlSignal, Dict[Tuple[Tuple[int, int], ...], Union[HValue, RtlSignal]]] = {}
# split part intervals to non-overlapping chunks
for s, parts in sorted(signal_parts.items(), key=lambda x: RtlSignal_sort_key(x[0])):
split_point = resolve_splitpoints(s, parts)
split_point = sorted(split_point)
# prepare part signals
new_parts = []
new_parts_dict = {}
split_i = 0
end = 0
# :attention: parts are likely to contain parts with same indexes
for indexes, can_directly_replace_with_src_expr, src in sorted(parts, key=lambda x: x[0]):
if len(indexes) != 1:
raise NotImplementedError()
i = indexes[0]
split_p = split_point[split_i]
if isinstance(i, BitsVal):
low = int(i)
high = low + 1
index_key = ((high, low),)
else:
assert isinstance(i, HSliceVal), (s, i)
if i.val.step != -1:
raise NotImplementedError(s, i)
high, low = int(i.val.start), int(i.val.stop)
index_key = ((high, low),)
while split_p < low:
# some parts at the beginning are skiped
# that means that that part is not driven by anything
# and we need to check default and nop value
part_indexes = (SLICE.from_py(slice(low, split_p , -1)),)
_src = construct_tmp_dst_sig_for_slice(s, part_indexes, None, isinstance(s._nop_val, RtlSignal))
new_parts.append(_src)
_index_key = ((low, split_p),)
new_parts_dict[_index_key] = _src, True
split_i += 1
split_p = split_point[split_i]
this_start_split_p_i = split_i
if split_p > low:
# some parts at the beginning were already resolved
# This can happen if there was some part which started on some <= index and overlaps with this part.
try:
_, _can_directly_replace_with_src_expr = new_parts_dict[index_key]
assert not _can_directly_replace_with_src_expr, (s, index_key)
# was already resolved and checked no need to check it again
continue
except KeyError:
pass
for i in range(split_i, -1, -1):
_sp = split_point[i]
if _sp == low:
this_start_split_p_i = i
assert split_point[this_start_split_p_i] == low
# just at the start of this slice
next_split_p = split_point[this_start_split_p_i + 1]
assert next_split_p <= high, "The next split point can be at most end of current part"
if next_split_p == high:
assert this_start_split_p_i == split_i, "We should see this part for the first time or the split_i should already be higher"
# all bits on this slice are alwyas driven at once, we can instantiate whole part
assert split_p == low
_src = construct_tmp_dst_sig_for_slice(s, indexes, src, not can_directly_replace_with_src_expr)
new_parts.append(_src)
assert index_key not in new_parts_dict, (s, index_key)
new_parts_dict[index_key] = _src, can_directly_replace_with_src_expr
split_i += 1
else:
# list of part keys for later search
_split_parts = []
prev_sp = split_point[this_start_split_p_i]
dst_offset = low
assert not can_directly_replace_with_src_expr, (indexes, src)
# continue instanciating parts until we reach the end of this part
for sp_i, sp in zip(range(this_start_split_p_i + 1, len(split_point)),
islice(split_point, this_start_split_p_i + 1, None)):
# need to generate sub slice
# because this slice has actually multiple individualy driven parts
# we need to generate all slice parts because there could be a case where only some sub parts are
# driven elsewhere and we would othervise resolve those segments as a constantly driven
# but they are in fact driven from this slice
if sp > high:
break
part_key = ((sp, prev_sp),)
if sp_i <= split_i:
# check if the slice is not driven from some top level constant assignment
# which would result is multiple drivers of this slice
assert src is None
existing_part, _can_directly_replace_with_src_expr = new_parts_dict[part_key]
assert not _can_directly_replace_with_src_expr, (s, low, high, existing_part)
assert not can_directly_replace_with_src_expr, (s, low, high, existing_part)
assert isinstance(existing_part, RtlSignal), (s, low, high, existing_part)
else:
assert sp_i == split_i + 1, (s, sp_i, split_i)
# get actual input signal
if src is None:
_src = None
else:
_src = src[sp - dst_offset:prev_sp - dst_offset]
part_indexes = (SLICE.from_py(slice(sp, prev_sp, -1)),)
_src = construct_tmp_dst_sig_for_slice(s, part_indexes, _src, True)
new_parts.append(_src)
new_parts_dict[part_key] = _src, can_directly_replace_with_src_expr
split_i += 1
_split_parts.append(part_key)
prev_sp = sp
new_parts_dict[index_key] = _split_parts, False
end = max(end, high)
if end < split_point[-1]:
# something unconnected at the end
high, low = split_point[-1], end
part_indexes = (SLICE.from_py(slice(high, low , -1)),)
_src = construct_tmp_dst_sig_for_slice(s, part_indexes, None, isinstance(s._nop_val, RtlSignal))
new_parts.append(_src)
index_key = ((high, low),)
new_parts_dict[index_key] = _src, True
# construct assignment of concatenation from all parts
assert new_parts, (s, parts)
s(Concat(*reversed(new_parts)))
final_signal_parts[s] = new_parts_dict
return final_signal_parts
@classmethod
def extract_part_drivers_stm(cls, stm: HdlStatement,
signal_parts: Dict[RtlSignal,
List[Tuple[RtlSignal, List[HValue]]]]
) -> bool:
"""
:returns: True if statement was modified
"""
if isinstance(stm, HdlAssignmentContainer):
dst = stm.dst
parts = signal_parts.get(dst, None)
if parts is None:
return False
if stm.indexes and len(stm.indexes) == 1:
indexes = _format_indexes(stm.indexes)
new_dsts, do_remove_stm = parts[indexes]
else:
# collect only parts which do not have sub parts (are primitive parts)
new_dsts = []
for k, d in parts.items():
if not isinstance(d, list):
new_dsts.append(k)
new_dsts.sort()
do_remove_stm = False
if isinstance(new_dsts, list):
if stm.parentStm is None:
return False
assert len(new_dsts) > 1, (dst, new_dsts, stm)
# assert not do_remove_stm, (dst, new_dsts, stm)
# the driven slice was split to multiple sub slices
replacement = []
dst_offset = new_dsts[0][-1][1]
for i in new_dsts:
new_dst = parts[i][0]
new_src = stm.src
for _i in i:
high, low = _i[0] - dst_offset, _i[1] - dst_offset
assert high > 0 and low >= 0, dst_offset
assert high > low, (dst, stm, (high, low))
new_src = new_src[high:low]
a = new_dst(new_src)
replacement.append(a)
# it has to have parent statement because it needs to be nested
# because otherwise it would not have some overlapping parts driven diferently
# under some condition
stm.parentStm._replace_child_statement(stm, replacement, False)
if do_remove_stm:
stm._destroy()
elif do_remove_stm:
# remove current assignment because we are using src directly
# assert stm.parentStm is None, (stm, stm.parentStm)
stm._destroy()
else:
# rewrite the HdlAssignmentContainer instance to use new dst
replacement = [new_dsts(stm.src), ]
stm.parentStm._replace_child_statement(stm, replacement, False)
return True
elif isinstance(stm, (IfContainer, SwitchContainer, HdlStmCodeBlockContainer)):
modified = False
for _stm in stm._iter_stms():
modified |= cls.extract_part_drivers_stm(_stm, signal_parts)
if modified:
assert not stm._enclosed_for, "_enclosed_for is expected not to be initialized yet"
outputs = stm._outputs
inputs = stm._inputs
stm._outputs = UniqList()
stm._inputs = UniqList()
stm._collect_io()
if stm.parentStm is None:
for o in outputs:
if o not in stm._outputs:
o.drivers.remove(stm)
for i in inputs:
if i not in stm._inputs:
i.endpoints.remove(stm)
return True
else:
raise NotImplementedError("Unknown statement ", stm)
return False
def apply(self, netlist: "RtlNetlist"):
signal_parts = self._collect_indexes_on_variables(netlist.statements)
if not signal_parts:
return
final_signal_parts = self.resolve_final_parts_from_splitpoints_and_parts(signal_parts)
for stm in sorted(netlist.statements, key=HdlStatement_sort_key):
self.extract_part_drivers_stm(stm, final_signal_parts)
@internal
def extract_part_drivers(netlist: "RtlNetlist"):
RtlNetlistPassExtractPartDrivers().apply(netlist)
| 42.709677
| 144
| 0.555775
|
fa1cf06093af69d225e54da543ff564eb1902037
| 142
|
py
|
Python
|
oop_hack/dnd_oop/arena/apps.py
|
Yakov-Varnaev/hack_oop_RPG
|
f68390d9584463ea8a3abb2602bb565fc021acc8
|
[
"MIT"
] | null | null | null |
oop_hack/dnd_oop/arena/apps.py
|
Yakov-Varnaev/hack_oop_RPG
|
f68390d9584463ea8a3abb2602bb565fc021acc8
|
[
"MIT"
] | null | null | null |
oop_hack/dnd_oop/arena/apps.py
|
Yakov-Varnaev/hack_oop_RPG
|
f68390d9584463ea8a3abb2602bb565fc021acc8
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class ArenaConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'arena'
| 20.285714
| 56
| 0.753521
|
c01cf992cc152edf3ceafabf535fe14e5e6a7799
| 400
|
py
|
Python
|
Lib/idlelib/idle.py
|
Victor-Savu/cpython-old
|
87060fc4b043dd0da0a29a0ffb8eb92d8cad8dd3
|
[
"PSF-2.0"
] | 1
|
2020-11-26T18:53:46.000Z
|
2020-11-26T18:53:46.000Z
|
Lib/idlelib/idle.py
|
Victor-Savu/cpython-old
|
87060fc4b043dd0da0a29a0ffb8eb92d8cad8dd3
|
[
"PSF-2.0"
] | null | null | null |
Lib/idlelib/idle.py
|
Victor-Savu/cpython-old
|
87060fc4b043dd0da0a29a0ffb8eb92d8cad8dd3
|
[
"PSF-2.0"
] | 1
|
2019-04-11T11:27:01.000Z
|
2019-04-11T11:27:01.000Z
|
import os.path
import sys
# If we are working on a development version of IDLE, we need to prepend the
# parent of this idlelib dir to sys.path. Otherwise, importing idlelib gets
# the version installed with the Python used to call this module:
idlelib_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, idlelib_dir)
import idlelib.pyshell
idlelib.pyshell.main()
| 33.333333
| 76
| 0.7825
|
fd67a29bef76151a5db93f36f6f8a89dcb830b3d
| 119,176
|
py
|
Python
|
gclient.py
|
bitbeen/depot_tools
|
9217ff8b2c57fc7f5f706fef741a2c48efe7c885
|
[
"BSD-3-Clause"
] | null | null | null |
gclient.py
|
bitbeen/depot_tools
|
9217ff8b2c57fc7f5f706fef741a2c48efe7c885
|
[
"BSD-3-Clause"
] | null | null | null |
gclient.py
|
bitbeen/depot_tools
|
9217ff8b2c57fc7f5f706fef741a2c48efe7c885
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Meta checkout dependency manager for Git."""
# Files
# .gclient : Current client configuration, written by 'config' command.
# Format is a Python script defining 'solutions', a list whose
# entries each are maps binding the strings "name" and "url"
# to strings specifying the name and location of the client
# module, as well as "custom_deps" to a map similar to the
# deps section of the DEPS file below, as well as
# "custom_hooks" to a list similar to the hooks sections of
# the DEPS file below.
# .gclient_entries : A cache constructed by 'update' command. Format is a
# Python script defining 'entries', a list of the names
# of all modules in the client
# <module>/DEPS : Python script defining var 'deps' as a map from each
# requisite submodule name to a URL where it can be found (via
# one SCM)
#
# Hooks
# .gclient and DEPS files may optionally contain a list named "hooks" to
# allow custom actions to be performed based on files that have changed in the
# working copy as a result of a "sync"/"update" or "revert" operation. This
# can be prevented by using --nohooks (hooks run by default). Hooks can also
# be forced to run with the "runhooks" operation. If "sync" is run with
# --force, all known but not suppressed hooks will run regardless of the state
# of the working copy.
#
# Each item in a "hooks" list is a dict, containing these two keys:
# "pattern" The associated value is a string containing a regular
# expression. When a file whose pathname matches the expression
# is checked out, updated, or reverted, the hook's "action" will
# run.
# "action" A list describing a command to run along with its arguments, if
# any. An action command will run at most one time per gclient
# invocation, regardless of how many files matched the pattern.
# The action is executed in the same directory as the .gclient
# file. If the first item in the list is the string "python",
# the current Python interpreter (sys.executable) will be used
# to run the command. If the list contains string
# "$matching_files" it will be removed from the list and the list
# will be extended by the list of matching files.
# "name" An optional string specifying the group to which a hook belongs
# for overriding and organizing.
#
# Example:
# hooks = [
# { "pattern": "\\.(gif|jpe?g|pr0n|png)$",
# "action": ["python", "image_indexer.py", "--all"]},
# { "pattern": ".",
# "name": "gyp",
# "action": ["python", "src/build/gyp_chromium"]},
# ]
#
# Pre-DEPS Hooks
# DEPS files may optionally contain a list named "pre_deps_hooks". These are
# the same as normal hooks, except that they run before the DEPS are
# processed. Pre-DEPS run with "sync" and "revert" unless the --noprehooks
# flag is used.
#
# Specifying a target OS
# An optional key named "target_os" may be added to a gclient file to specify
# one or more additional operating systems that should be considered when
# processing the deps_os/hooks_os dict of a DEPS file.
#
# Example:
# target_os = [ "android" ]
#
# If the "target_os_only" key is also present and true, then *only* the
# operating systems listed in "target_os" will be used.
#
# Example:
# target_os = [ "ios" ]
# target_os_only = True
#
# Specifying a target CPU
# To specify a target CPU, the variables target_cpu and target_cpu_only
# are available and are analagous to target_os and target_os_only.
from __future__ import print_function
__version__ = '0.7'
import collections
import copy
import json
import logging
import optparse
import os
import platform
import posixpath
import pprint
import re
import sys
import time
try:
import urlparse
except ImportError: # For Py3 compatibility
import urllib.parse as urlparse
import detect_host_arch
import fix_encoding
import gclient_eval
import gclient_scm
import gclient_paths
import gclient_utils
import git_cache
import metrics
import metrics_utils
from third_party.repo.progress import Progress
import subcommand
import subprocess2
import setup_color
# TODO(crbug.com/953884): Remove this when python3 migration is done.
try:
basestring
except NameError:
# pylint: disable=redefined-builtin
basestring = str
DEPOT_TOOLS_DIR = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
# Singleton object to represent an unset cache_dir (as opposed to a disabled
# one, e.g. if a spec explicitly says `cache_dir = None`.)
UNSET_CACHE_DIR = object()
class GNException(Exception):
pass
def ToGNString(value, allow_dicts = True):
"""Returns a stringified GN equivalent of the Python value.
allow_dicts indicates if this function will allow converting dictionaries
to GN scopes. This is only possible at the top level, you can't nest a
GN scope in a list, so this should be set to False for recursive calls."""
if isinstance(value, basestring):
if value.find('\n') >= 0:
raise GNException("Trying to print a string with a newline in it.")
return '"' + \
value.replace('\\', '\\\\').replace('"', '\\"').replace('$', '\\$') + \
'"'
if sys.version_info.major == 2 and isinstance(value, unicode):
return ToGNString(value.encode('utf-8'))
if isinstance(value, bool):
if value:
return "true"
return "false"
# NOTE: some type handling removed compared to chromium/src copy.
raise GNException("Unsupported type when printing to GN.")
class Hook(object):
"""Descriptor of command ran before/after sync or on demand."""
def __init__(self, action, pattern=None, name=None, cwd=None, condition=None,
variables=None, verbose=False, cwd_base=None):
"""Constructor.
Arguments:
action (list of basestring): argv of the command to run
pattern (basestring regex): noop with git; deprecated
name (basestring): optional name; no effect on operation
cwd (basestring): working directory to use
condition (basestring): condition when to run the hook
variables (dict): variables for evaluating the condition
"""
self._action = gclient_utils.freeze(action)
self._pattern = pattern
self._name = name
self._cwd = cwd
self._condition = condition
self._variables = variables
self._verbose = verbose
self._cwd_base = cwd_base
@staticmethod
def from_dict(d, variables=None, verbose=False, conditions=None,
cwd_base=None):
"""Creates a Hook instance from a dict like in the DEPS file."""
# Merge any local and inherited conditions.
gclient_eval.UpdateCondition(d, 'and', conditions)
return Hook(
d['action'],
d.get('pattern'),
d.get('name'),
d.get('cwd'),
d.get('condition'),
variables=variables,
# Always print the header if not printing to a TTY.
verbose=verbose or not setup_color.IS_TTY,
cwd_base=cwd_base)
@property
def action(self):
return self._action
@property
def pattern(self):
return self._pattern
@property
def name(self):
return self._name
@property
def condition(self):
return self._condition
@property
def effective_cwd(self):
cwd = self._cwd_base
if self._cwd:
cwd = os.path.join(cwd, self._cwd)
return cwd
def matches(self, file_list):
"""Returns true if the pattern matches any of files in the list."""
if not self._pattern:
return True
pattern = re.compile(self._pattern)
return bool([f for f in file_list if pattern.search(f)])
def run(self):
"""Executes the hook's command (provided the condition is met)."""
if (self._condition and
not gclient_eval.EvaluateCondition(self._condition, self._variables)):
return
cmd = [arg for arg in self._action]
if cmd[0] == 'python':
# If the hook specified "python" as the first item, the action is a
# Python script. Run it by starting a new copy of the same
# interpreter.
cmd[0] = sys.executable
elif cmd[0] == 'vpython' and _detect_host_os() == 'win':
cmd[0] += '.bat'
try:
start_time = time.time()
gclient_utils.CheckCallAndFilterAndHeader(
cmd, cwd=self.effective_cwd, always=self._verbose)
except (gclient_utils.Error, subprocess2.CalledProcessError) as e:
# Use a discrete exit status code of 2 to indicate that a hook action
# failed. Users of this script may wish to treat hook action failures
# differently from VC failures.
print('Error: %s' % str(e), file=sys.stderr)
sys.exit(2)
finally:
elapsed_time = time.time() - start_time
if elapsed_time > 10:
print("Hook '%s' took %.2f secs" % (
gclient_utils.CommandToStr(cmd), elapsed_time))
class DependencySettings(object):
"""Immutable configuration settings."""
def __init__(
self, parent, url, managed, custom_deps, custom_vars,
custom_hooks, deps_file, should_process, relative, condition):
# These are not mutable:
self._parent = parent
self._deps_file = deps_file
self._url = url
# The condition as string (or None). Useful to keep e.g. for flatten.
self._condition = condition
# 'managed' determines whether or not this dependency is synced/updated by
# gclient after gclient checks it out initially. The difference between
# 'managed' and 'should_process' is that the user specifies 'managed' via
# the --unmanaged command-line flag or a .gclient config, where
# 'should_process' is dynamically set by gclient if it goes over its
# recursion limit and controls gclient's behavior so it does not misbehave.
self._managed = managed
self._should_process = should_process
# If this is a recursed-upon sub-dependency, and the parent has
# use_relative_paths set, then this dependency should check out its own
# dependencies relative to that parent's path for this, rather than
# relative to the .gclient file.
self._relative = relative
# This is a mutable value which has the list of 'target_os' OSes listed in
# the current deps file.
self.local_target_os = None
# These are only set in .gclient and not in DEPS files.
self._custom_vars = custom_vars or {}
self._custom_deps = custom_deps or {}
self._custom_hooks = custom_hooks or []
# Post process the url to remove trailing slashes.
if isinstance(self.url, basestring):
# urls are sometime incorrectly written as proto://host/path/@rev. Replace
# it to proto://host/path@rev.
self.set_url(self.url.replace('/@', '@'))
elif not isinstance(self.url, (None.__class__)):
raise gclient_utils.Error(
('dependency url must be either string or None, '
'instead of %s') % self.url.__class__.__name__)
# Make any deps_file path platform-appropriate.
if self._deps_file:
for sep in ['/', '\\']:
self._deps_file = self._deps_file.replace(sep, os.sep)
@property
def deps_file(self):
return self._deps_file
@property
def managed(self):
return self._managed
@property
def parent(self):
return self._parent
@property
def root(self):
"""Returns the root node, a GClient object."""
if not self.parent:
# This line is to signal pylint that it could be a GClient instance.
return self or GClient(None, None)
return self.parent.root
@property
def should_process(self):
"""True if this dependency should be processed, i.e. checked out."""
return self._should_process
@property
def custom_vars(self):
return self._custom_vars.copy()
@property
def custom_deps(self):
return self._custom_deps.copy()
@property
def custom_hooks(self):
return self._custom_hooks[:]
@property
def url(self):
"""URL after variable expansion."""
return self._url
@property
def condition(self):
return self._condition
@property
def target_os(self):
if self.local_target_os is not None:
return tuple(set(self.local_target_os).union(self.parent.target_os))
else:
return self.parent.target_os
@property
def target_cpu(self):
return self.parent.target_cpu
def set_url(self, url):
self._url = url
def get_custom_deps(self, name, url):
"""Returns a custom deps if applicable."""
if self.parent:
url = self.parent.get_custom_deps(name, url)
# None is a valid return value to disable a dependency.
return self.custom_deps.get(name, url)
class Dependency(gclient_utils.WorkItem, DependencySettings):
"""Object that represents a dependency checkout."""
def __init__(self, parent, name, url, managed, custom_deps,
custom_vars, custom_hooks, deps_file, should_process,
should_recurse, relative, condition, print_outbuf=False):
gclient_utils.WorkItem.__init__(self, name)
DependencySettings.__init__(
self, parent, url, managed, custom_deps, custom_vars,
custom_hooks, deps_file, should_process, relative, condition)
# This is in both .gclient and DEPS files:
self._deps_hooks = []
self._pre_deps_hooks = []
# Calculates properties:
self._dependencies = []
self._vars = {}
# A cache of the files affected by the current operation, necessary for
# hooks.
self._file_list = []
# List of host names from which dependencies are allowed.
# Default is an empty set, meaning unspecified in DEPS file, and hence all
# hosts will be allowed. Non-empty set means whitelist of hosts.
# allowed_hosts var is scoped to its DEPS file, and so it isn't recursive.
self._allowed_hosts = frozenset()
self._gn_args_from = None
# Spec for .gni output to write (if any).
self._gn_args_file = None
self._gn_args = []
# If it is not set to True, the dependency wasn't processed for its child
# dependency, i.e. its DEPS wasn't read.
self._deps_parsed = False
# This dependency has been processed, i.e. checked out
self._processed = False
# This dependency had its pre-DEPS hooks run
self._pre_deps_hooks_ran = False
# This dependency had its hook run
self._hooks_ran = False
# This is the scm used to checkout self.url. It may be used by dependencies
# to get the datetime of the revision we checked out.
self._used_scm = None
self._used_revision = None
# The actual revision we ended up getting, or None if that information is
# unavailable
self._got_revision = None
# recursedeps is a mutable value that selectively overrides the default
# 'no recursion' setting on a dep-by-dep basis.
#
# It will be a dictionary of {deps_name: depfile_namee}
self.recursedeps = {}
# Whether we should process this dependency's DEPS file.
self._should_recurse = should_recurse
self._OverrideUrl()
# This is inherited from WorkItem. We want the URL to be a resource.
if self.url and isinstance(self.url, basestring):
# The url is usually given to gclient either as https://blah@123
# or just https://blah. The @123 portion is irrelevant.
self.resources.append(self.url.split('@')[0])
# Controls whether we want to print git's output when we first clone the
# dependency
self.print_outbuf = print_outbuf
if not self.name and self.parent:
raise gclient_utils.Error('Dependency without name')
def _OverrideUrl(self):
"""Resolves the parsed url from the parent hierarchy."""
parsed_url = self.get_custom_deps(self._name, self.url)
if parsed_url != self.url:
logging.info('Dependency(%s)._OverrideUrl(%s) -> %s', self._name,
self.url, parsed_url)
self.set_url(parsed_url)
elif isinstance(self.url, basestring):
parsed_url = urlparse.urlparse(self.url)
if (not parsed_url[0] and
not re.match(r'^\w+\@[\w\.-]+\:[\w\/]+', parsed_url[2])):
path = parsed_url[2]
if not path.startswith('/'):
raise gclient_utils.Error(
'relative DEPS entry \'%s\' must begin with a slash' % self.url)
# A relative url. Get the parent url, strip from the last '/'
# (equivalent to unix basename), and append the relative url.
parent_url = self.parent.url
parsed_url = parent_url[:parent_url.rfind('/')] + self.url
logging.info('Dependency(%s)._OverrideUrl(%s) -> %s', self.name,
self.url, parsed_url)
self.set_url(parsed_url)
elif self.url is None:
logging.info('Dependency(%s)._OverrideUrl(None) -> None', self._name)
else:
raise gclient_utils.Error('Unknown url type')
def PinToActualRevision(self):
"""Updates self.url to the revision checked out on disk."""
if self.url is None:
return
url = None
scm = self.CreateSCM()
if os.path.isdir(scm.checkout_path):
revision = scm.revinfo(None, None, None)
url = '%s@%s' % (gclient_utils.SplitUrlRevision(self.url)[0], revision)
self.set_url(url)
def ToLines(self):
s = []
condition_part = ([' "condition": %r,' % self.condition]
if self.condition else [])
s.extend([
' # %s' % self.hierarchy(include_url=False),
' "%s": {' % (self.name,),
' "url": "%s",' % (self.url,),
] + condition_part + [
' },',
'',
])
return s
@property
def requirements(self):
"""Calculate the list of requirements."""
requirements = set()
# self.parent is implicitly a requirement. This will be recursive by
# definition.
if self.parent and self.parent.name:
requirements.add(self.parent.name)
# For a tree with at least 2 levels*, the leaf node needs to depend
# on the level higher up in an orderly way.
# This becomes messy for >2 depth as the DEPS file format is a dictionary,
# thus unsorted, while the .gclient format is a list thus sorted.
#
# Interestingly enough, the following condition only works in the case we
# want: self is a 2nd level node. 3nd level node wouldn't need this since
# they already have their parent as a requirement.
if self.parent and self.parent.parent and not self.parent.parent.parent:
requirements |= set(i.name for i in self.root.dependencies if i.name)
if self.name:
requirements |= set(
obj.name for obj in self.root.subtree(False)
if (obj is not self
and obj.name and
self.name.startswith(posixpath.join(obj.name, ''))))
requirements = tuple(sorted(requirements))
logging.info('Dependency(%s).requirements = %s' % (self.name, requirements))
return requirements
@property
def should_recurse(self):
return self._should_recurse
def verify_validity(self):
"""Verifies that this Dependency is fine to add as a child of another one.
Returns True if this entry should be added, False if it is a duplicate of
another entry.
"""
logging.info('Dependency(%s).verify_validity()' % self.name)
if self.name in [s.name for s in self.parent.dependencies]:
raise gclient_utils.Error(
'The same name "%s" appears multiple times in the deps section' %
self.name)
if not self.should_process:
# Return early, no need to set requirements.
return not any(d.name == self.name for d in self.root.subtree(True))
# This require a full tree traversal with locks.
siblings = [d for d in self.root.subtree(False) if d.name == self.name]
for sibling in siblings:
# Allow to have only one to be None or ''.
if self.url != sibling.url and bool(self.url) == bool(sibling.url):
raise gclient_utils.Error(
('Dependency %s specified more than once:\n'
' %s [%s]\n'
'vs\n'
' %s [%s]') % (
self.name,
sibling.hierarchy(),
sibling.url,
self.hierarchy(),
self.url))
# In theory we could keep it as a shadow of the other one. In
# practice, simply ignore it.
logging.warn('Won\'t process duplicate dependency %s' % sibling)
return False
return True
def _postprocess_deps(self, deps, rel_prefix):
"""Performs post-processing of deps compared to what's in the DEPS file."""
# Make sure the dict is mutable, e.g. in case it's frozen.
deps = dict(deps)
# If a line is in custom_deps, but not in the solution, we want to append
# this line to the solution.
for dep_name, dep_info in self.custom_deps.items():
if dep_name not in deps:
deps[dep_name] = {'url': dep_info, 'dep_type': 'git'}
# Make child deps conditional on any parent conditions. This ensures that,
# when flattened, recursed entries have the correct restrictions, even if
# not explicitly set in the recursed DEPS file. For instance, if
# "src/ios_foo" is conditional on "checkout_ios=True", then anything
# recursively included by "src/ios_foo/DEPS" should also require
# "checkout_ios=True".
if self.condition:
for value in deps.itervalues():
gclient_eval.UpdateCondition(value, 'and', self.condition)
if rel_prefix:
logging.warning('use_relative_paths enabled.')
rel_deps = {}
for d, url in deps.items():
# normpath is required to allow DEPS to use .. in their
# dependency local path.
rel_deps[os.path.normpath(os.path.join(rel_prefix, d))] = url
logging.warning('Updating deps by prepending %s.', rel_prefix)
deps = rel_deps
return deps
def _deps_to_objects(self, deps, use_relative_paths):
"""Convert a deps dict to a dict of Dependency objects."""
deps_to_add = []
for name, dep_value in deps.items():
should_process = self.should_process
if dep_value is None:
continue
condition = dep_value.get('condition')
dep_type = dep_value.get('dep_type')
if condition and not self._get_option('process_all_deps', False):
should_process = should_process and gclient_eval.EvaluateCondition(
condition, self.get_vars())
# The following option is only set by the 'revinfo' command.
if self._get_option('ignore_dep_type', None) == dep_type:
continue
if dep_type == 'cipd':
cipd_root = self.GetCipdRoot()
for package in dep_value.get('packages', []):
deps_to_add.append(
CipdDependency(
parent=self,
name=name,
dep_value=package,
cipd_root=cipd_root,
custom_vars=self.custom_vars,
should_process=should_process,
relative=use_relative_paths,
condition=condition))
else:
url = dep_value.get('url')
deps_to_add.append(
GitDependency(
parent=self,
name=name,
url=url,
managed=True,
custom_deps=None,
custom_vars=self.custom_vars,
custom_hooks=None,
deps_file=self.recursedeps.get(name, self.deps_file),
should_process=should_process,
should_recurse=name in self.recursedeps,
relative=use_relative_paths,
condition=condition))
deps_to_add.sort(key=lambda x: x.name)
return deps_to_add
def ParseDepsFile(self):
"""Parses the DEPS file for this dependency."""
assert not self.deps_parsed
assert not self.dependencies
deps_content = None
# First try to locate the configured deps file. If it's missing, fallback
# to DEPS.
deps_files = [self.deps_file]
if 'DEPS' not in deps_files:
deps_files.append('DEPS')
for deps_file in deps_files:
filepath = os.path.join(self.root.root_dir, self.name, deps_file)
if os.path.isfile(filepath):
logging.info(
'ParseDepsFile(%s): %s file found at %s', self.name, deps_file,
filepath)
break
logging.info(
'ParseDepsFile(%s): No %s file found at %s', self.name, deps_file,
filepath)
if os.path.isfile(filepath):
deps_content = gclient_utils.FileRead(filepath)
logging.debug('ParseDepsFile(%s) read:\n%s', self.name, deps_content)
local_scope = {}
if deps_content:
try:
local_scope = gclient_eval.Parse(
deps_content, self._get_option('validate_syntax', False),
filepath, self.get_vars(), self.get_builtin_vars())
except SyntaxError as e:
gclient_utils.SyntaxErrorToError(filepath, e)
if 'allowed_hosts' in local_scope:
try:
self._allowed_hosts = frozenset(local_scope.get('allowed_hosts'))
except TypeError: # raised if non-iterable
pass
if not self._allowed_hosts:
logging.warning("allowed_hosts is specified but empty %s",
self._allowed_hosts)
raise gclient_utils.Error(
'ParseDepsFile(%s): allowed_hosts must be absent '
'or a non-empty iterable' % self.name)
self._gn_args_from = local_scope.get('gclient_gn_args_from')
self._gn_args_file = local_scope.get('gclient_gn_args_file')
self._gn_args = local_scope.get('gclient_gn_args', [])
# It doesn't make sense to set all of these, since setting gn_args_from to
# another DEPS will make gclient ignore any other local gn_args* settings.
assert not (self._gn_args_from and self._gn_args_file), \
'Only specify one of "gclient_gn_args_from" or ' \
'"gclient_gn_args_file + gclient_gn_args".'
self._vars = local_scope.get('vars', {})
if self.parent:
for key, value in self.parent.get_vars().items():
if key in self._vars:
self._vars[key] = value
# Since we heavily post-process things, freeze ones which should
# reflect original state of DEPS.
self._vars = gclient_utils.freeze(self._vars)
# If use_relative_paths is set in the DEPS file, regenerate
# the dictionary using paths relative to the directory containing
# the DEPS file. Also update recursedeps if use_relative_paths is
# enabled.
# If the deps file doesn't set use_relative_paths, but the parent did
# (and therefore set self.relative on this Dependency object), then we
# want to modify the deps and recursedeps by prepending the parent
# directory of this dependency.
use_relative_paths = local_scope.get('use_relative_paths', False)
rel_prefix = None
if use_relative_paths:
rel_prefix = self.name
elif self._relative:
rel_prefix = os.path.dirname(self.name)
if 'recursion' in local_scope:
logging.warning(
'%s: Ignoring recursion = %d.', self.name, local_scope['recursion'])
if 'recursedeps' in local_scope:
for ent in local_scope['recursedeps']:
if isinstance(ent, basestring):
self.recursedeps[ent] = self.deps_file
else: # (depname, depsfilename)
self.recursedeps[ent[0]] = ent[1]
logging.warning('Found recursedeps %r.', repr(self.recursedeps))
if rel_prefix:
logging.warning('Updating recursedeps by prepending %s.', rel_prefix)
rel_deps = {}
for depname, options in self.recursedeps.items():
rel_deps[
os.path.normpath(os.path.join(rel_prefix, depname))] = options
self.recursedeps = rel_deps
# To get gn_args from another DEPS, that DEPS must be recursed into.
if self._gn_args_from:
assert self.recursedeps and self._gn_args_from in self.recursedeps, \
'The "gclient_gn_args_from" value must be in recursedeps.'
# If present, save 'target_os' in the local_target_os property.
if 'target_os' in local_scope:
self.local_target_os = local_scope['target_os']
deps = local_scope.get('deps', {})
deps_to_add = self._deps_to_objects(
self._postprocess_deps(deps, rel_prefix), use_relative_paths)
# compute which working directory should be used for hooks
use_relative_hooks = local_scope.get('use_relative_hooks', False)
hooks_cwd = self.root.root_dir
if use_relative_hooks:
if not use_relative_paths:
raise gclient_utils.Error(
'ParseDepsFile(%s): use_relative_hooks must be used with '
'use_relative_paths' % self.name)
hooks_cwd = os.path.join(hooks_cwd, self.name)
logging.warning('Updating hook base working directory to %s.',
hooks_cwd)
# override named sets of hooks by the custom hooks
hooks_to_run = []
hook_names_to_suppress = [c.get('name', '') for c in self.custom_hooks]
for hook in local_scope.get('hooks', []):
if hook.get('name', '') not in hook_names_to_suppress:
hooks_to_run.append(hook)
# add the replacements and any additions
for hook in self.custom_hooks:
if 'action' in hook:
hooks_to_run.append(hook)
if self.should_recurse:
self._pre_deps_hooks = [
Hook.from_dict(hook, variables=self.get_vars(), verbose=True,
conditions=self.condition, cwd_base=hooks_cwd)
for hook in local_scope.get('pre_deps_hooks', [])
]
self.add_dependencies_and_close(deps_to_add, hooks_to_run,
hooks_cwd=hooks_cwd)
logging.info('ParseDepsFile(%s) done' % self.name)
def _get_option(self, attr, default):
obj = self
while not hasattr(obj, '_options'):
obj = obj.parent
return getattr(obj._options, attr, default)
def add_dependencies_and_close(self, deps_to_add, hooks, hooks_cwd=None):
"""Adds the dependencies, hooks and mark the parsing as done."""
if hooks_cwd == None:
hooks_cwd = self.root.root_dir
for dep in deps_to_add:
if dep.verify_validity():
self.add_dependency(dep)
self._mark_as_parsed([
Hook.from_dict(
h, variables=self.get_vars(), verbose=self.root._options.verbose,
conditions=self.condition, cwd_base=hooks_cwd)
for h in hooks
])
def findDepsFromNotAllowedHosts(self):
"""Returns a list of dependencies from not allowed hosts.
If allowed_hosts is not set, allows all hosts and returns empty list.
"""
if not self._allowed_hosts:
return []
bad_deps = []
for dep in self._dependencies:
# Don't enforce this for custom_deps.
if dep.name in self._custom_deps:
continue
if isinstance(dep.url, basestring):
parsed_url = urlparse.urlparse(dep.url)
if parsed_url.netloc and parsed_url.netloc not in self._allowed_hosts:
bad_deps.append(dep)
return bad_deps
def FuzzyMatchUrl(self, candidates):
"""Attempts to find this dependency in the list of candidates.
It looks first for the URL of this dependency in the list of
candidates. If it doesn't succeed, and the URL ends in '.git', it will try
looking for the URL minus '.git'. Finally it will try to look for the name
of the dependency.
Args:
candidates: list, dict. The list of candidates in which to look for this
dependency. It can contain URLs as above, or dependency names like
"src/some/dep".
Returns:
If this dependency is not found in the list of candidates, returns None.
Otherwise, it returns under which name did we find this dependency:
- Its parsed url: "https://example.com/src.git'
- Its parsed url minus '.git': "https://example.com/src"
- Its name: "src"
"""
if self.url:
origin, _ = gclient_utils.SplitUrlRevision(self.url)
if origin in candidates:
return origin
if origin.endswith('.git') and origin[:-len('.git')] in candidates:
return origin[:-len('.git')]
if origin + '.git' in candidates:
return origin + '.git'
if self.name in candidates:
return self.name
return None
# Arguments number differs from overridden method
# pylint: disable=arguments-differ
def run(self, revision_overrides, command, args, work_queue, options,
patch_refs, target_branches):
"""Runs |command| then parse the DEPS file."""
logging.info('Dependency(%s).run()' % self.name)
assert self._file_list == []
if not self.should_process:
return
# When running runhooks, there's no need to consult the SCM.
# All known hooks are expected to run unconditionally regardless of working
# copy state, so skip the SCM status check.
run_scm = command not in (
'flatten', 'runhooks', 'recurse', 'validate', None)
file_list = [] if not options.nohooks else None
revision_override = revision_overrides.pop(
self.FuzzyMatchUrl(revision_overrides), None)
if not revision_override and not self.managed:
revision_override = 'unmanaged'
if run_scm and self.url:
# Create a shallow copy to mutate revision.
options = copy.copy(options)
options.revision = revision_override
self._used_revision = options.revision
self._used_scm = self.CreateSCM(out_cb=work_queue.out_cb)
self._got_revision = self._used_scm.RunCommand(command, options, args,
file_list)
patch_repo = self.url.split('@')[0]
patch_ref = patch_refs.pop(self.FuzzyMatchUrl(patch_refs), None)
target_branch = target_branches.pop(
self.FuzzyMatchUrl(target_branches), None)
if command == 'update' and patch_ref is not None:
self._used_scm.apply_patch_ref(patch_repo, patch_ref, target_branch,
options, file_list)
if file_list:
file_list = [os.path.join(self.name, f.strip()) for f in file_list]
# TODO(phajdan.jr): We should know exactly when the paths are absolute.
# Convert all absolute paths to relative.
for i in range(len(file_list or [])):
# It depends on the command being executed (like runhooks vs sync).
if not os.path.isabs(file_list[i]):
continue
prefix = os.path.commonprefix(
[self.root.root_dir.lower(), file_list[i].lower()])
file_list[i] = file_list[i][len(prefix):]
# Strip any leading path separators.
while file_list[i].startswith(('\\', '/')):
file_list[i] = file_list[i][1:]
if self.should_recurse:
self.ParseDepsFile()
self._run_is_done(file_list or [])
if self.should_recurse:
if command in ('update', 'revert') and not options.noprehooks:
self.RunPreDepsHooks()
# Parse the dependencies of this dependency.
for s in self.dependencies:
if s.should_process:
work_queue.enqueue(s)
if command == 'recurse':
# Skip file only checkout.
scm = self.GetScmName()
if not options.scm or scm in options.scm:
cwd = os.path.normpath(os.path.join(self.root.root_dir, self.name))
# Pass in the SCM type as an env variable. Make sure we don't put
# unicode strings in the environment.
env = os.environ.copy()
if scm:
env['GCLIENT_SCM'] = str(scm)
if self.url:
env['GCLIENT_URL'] = str(self.url)
env['GCLIENT_DEP_PATH'] = str(self.name)
if options.prepend_dir and scm == 'git':
print_stdout = False
def filter_fn(line):
"""Git-specific path marshaling. It is optimized for git-grep."""
def mod_path(git_pathspec):
match = re.match('^(\\S+?:)?([^\0]+)$', git_pathspec)
modified_path = os.path.join(self.name, match.group(2))
branch = match.group(1) or ''
return '%s%s' % (branch, modified_path)
match = re.match('^Binary file ([^\0]+) matches$', line)
if match:
print('Binary file %s matches\n' % mod_path(match.group(1)))
return
items = line.split('\0')
if len(items) == 2 and items[1]:
print('%s : %s' % (mod_path(items[0]), items[1]))
elif len(items) >= 2:
# Multiple null bytes or a single trailing null byte indicate
# git is likely displaying filenames only (such as with -l)
print('\n'.join(mod_path(path) for path in items if path))
else:
print(line)
else:
print_stdout = True
filter_fn = None
if self.url is None:
print('Skipped omitted dependency %s' % cwd, file=sys.stderr)
elif os.path.isdir(cwd):
try:
gclient_utils.CheckCallAndFilter(
args, cwd=cwd, env=env, print_stdout=print_stdout,
filter_fn=filter_fn,
)
except subprocess2.CalledProcessError:
if not options.ignore:
raise
else:
print('Skipped missing %s' % cwd, file=sys.stderr)
def GetScmName(self):
raise NotImplementedError()
def CreateSCM(self, out_cb=None):
raise NotImplementedError()
def HasGNArgsFile(self):
return self._gn_args_file is not None
def WriteGNArgsFile(self):
lines = ['# Generated from %r' % self.deps_file]
variables = self.get_vars()
for arg in self._gn_args:
value = variables[arg]
if isinstance(value, basestring):
value = gclient_eval.EvaluateCondition(value, variables)
lines.append('%s = %s' % (arg, ToGNString(value)))
with open(os.path.join(self.root.root_dir, self._gn_args_file), 'w') as f:
f.write('\n'.join(lines))
@gclient_utils.lockedmethod
def _run_is_done(self, file_list):
# Both these are kept for hooks that are run as a separate tree traversal.
self._file_list = file_list
self._processed = True
def GetHooks(self, options):
"""Evaluates all hooks, and return them in a flat list.
RunOnDeps() must have been called before to load the DEPS.
"""
result = []
if not self.should_process or not self.should_recurse:
# Don't run the hook when it is above recursion_limit.
return result
# If "--force" was specified, run all hooks regardless of what files have
# changed.
if self.deps_hooks:
# TODO(maruel): If the user is using git, then we don't know
# what files have changed so we always run all hooks. It'd be nice to fix
# that.
result.extend(self.deps_hooks)
for s in self.dependencies:
result.extend(s.GetHooks(options))
return result
def RunHooksRecursively(self, options, progress):
assert self.hooks_ran == False
self._hooks_ran = True
hooks = self.GetHooks(options)
if progress:
progress._total = len(hooks)
for hook in hooks:
if progress:
progress.update(extra=hook.name or '')
hook.run()
if progress:
progress.end()
def RunPreDepsHooks(self):
assert self.processed
assert self.deps_parsed
assert not self.pre_deps_hooks_ran
assert not self.hooks_ran
for s in self.dependencies:
assert not s.processed
self._pre_deps_hooks_ran = True
for hook in self.pre_deps_hooks:
hook.run()
def GetCipdRoot(self):
if self.root is self:
# Let's not infinitely recurse. If this is root and isn't an
# instance of GClient, do nothing.
return None
return self.root.GetCipdRoot()
def subtree(self, include_all):
"""Breadth first recursion excluding root node."""
dependencies = self.dependencies
for d in dependencies:
if d.should_process or include_all:
yield d
for d in dependencies:
for i in d.subtree(include_all):
yield i
@gclient_utils.lockedmethod
def add_dependency(self, new_dep):
self._dependencies.append(new_dep)
@gclient_utils.lockedmethod
def _mark_as_parsed(self, new_hooks):
self._deps_hooks.extend(new_hooks)
self._deps_parsed = True
@property
@gclient_utils.lockedmethod
def dependencies(self):
return tuple(self._dependencies)
@property
@gclient_utils.lockedmethod
def deps_hooks(self):
return tuple(self._deps_hooks)
@property
@gclient_utils.lockedmethod
def pre_deps_hooks(self):
return tuple(self._pre_deps_hooks)
@property
@gclient_utils.lockedmethod
def deps_parsed(self):
"""This is purely for debugging purposes. It's not used anywhere."""
return self._deps_parsed
@property
@gclient_utils.lockedmethod
def processed(self):
return self._processed
@property
@gclient_utils.lockedmethod
def pre_deps_hooks_ran(self):
return self._pre_deps_hooks_ran
@property
@gclient_utils.lockedmethod
def hooks_ran(self):
return self._hooks_ran
@property
@gclient_utils.lockedmethod
def allowed_hosts(self):
return self._allowed_hosts
@property
@gclient_utils.lockedmethod
def file_list(self):
return tuple(self._file_list)
@property
def used_scm(self):
"""SCMWrapper instance for this dependency or None if not processed yet."""
return self._used_scm
@property
@gclient_utils.lockedmethod
def got_revision(self):
return self._got_revision
@property
def file_list_and_children(self):
result = list(self.file_list)
for d in self.dependencies:
result.extend(d.file_list_and_children)
return tuple(result)
def __str__(self):
out = []
for i in ('name', 'url', 'custom_deps',
'custom_vars', 'deps_hooks', 'file_list', 'should_process',
'processed', 'hooks_ran', 'deps_parsed', 'requirements',
'allowed_hosts'):
# First try the native property if it exists.
if hasattr(self, '_' + i):
value = getattr(self, '_' + i, False)
else:
value = getattr(self, i, False)
if value:
out.append('%s: %s' % (i, value))
for d in self.dependencies:
out.extend([' ' + x for x in str(d).splitlines()])
out.append('')
return '\n'.join(out)
def __repr__(self):
return '%s: %s' % (self.name, self.url)
def hierarchy(self, include_url=True):
"""Returns a human-readable hierarchical reference to a Dependency."""
def format_name(d):
if include_url:
return '%s(%s)' % (d.name, d.url)
return d.name
out = format_name(self)
i = self.parent
while i and i.name:
out = '%s -> %s' % (format_name(i), out)
i = i.parent
return out
def hierarchy_data(self):
"""Returns a machine-readable hierarchical reference to a Dependency."""
d = self
out = []
while d and d.name:
out.insert(0, (d.name, d.url))
d = d.parent
return tuple(out)
def get_builtin_vars(self):
return {
'checkout_android': 'android' in self.target_os,
'checkout_chromeos': 'chromeos' in self.target_os,
'checkout_fuchsia': 'fuchsia' in self.target_os,
'checkout_ios': 'ios' in self.target_os,
'checkout_linux': 'unix' in self.target_os,
'checkout_mac': 'mac' in self.target_os,
'checkout_win': 'win' in self.target_os,
'host_os': _detect_host_os(),
'checkout_arm': 'arm' in self.target_cpu,
'checkout_arm64': 'arm64' in self.target_cpu,
'checkout_x86': 'x86' in self.target_cpu,
'checkout_mips': 'mips' in self.target_cpu,
'checkout_mips64': 'mips64' in self.target_cpu,
'checkout_ppc': 'ppc' in self.target_cpu,
'checkout_s390': 's390' in self.target_cpu,
'checkout_x64': 'x64' in self.target_cpu,
'host_cpu': detect_host_arch.HostArch(),
}
def get_vars(self):
"""Returns a dictionary of effective variable values
(DEPS file contents with applied custom_vars overrides)."""
# Variable precedence (last has highest):
# - DEPS vars
# - parents, from first to last
# - built-in
# - custom_vars overrides
result = {}
result.update(self._vars)
if self.parent:
parent_vars = self.parent.get_vars()
result.update(parent_vars)
# Provide some built-in variables.
result.update(self.get_builtin_vars())
result.update(self.custom_vars or {})
return result
_PLATFORM_MAPPING = {
'cygwin': 'win',
'darwin': 'mac',
'linux2': 'linux',
'win32': 'win',
'aix6': 'aix',
}
def _detect_host_os():
return _PLATFORM_MAPPING[sys.platform]
class GitDependency(Dependency):
"""A Dependency object that represents a single git checkout."""
#override
def GetScmName(self):
"""Always 'git'."""
return 'git'
#override
def CreateSCM(self, out_cb=None):
"""Create a Wrapper instance suitable for handling this git dependency."""
return gclient_scm.GitWrapper(
self.url, self.root.root_dir, self.name, self.outbuf, out_cb,
print_outbuf=self.print_outbuf)
class GClient(GitDependency):
"""Object that represent a gclient checkout. A tree of Dependency(), one per
solution or DEPS entry."""
DEPS_OS_CHOICES = {
"aix6": "unix",
"win32": "win",
"win": "win",
"cygwin": "win",
"darwin": "mac",
"mac": "mac",
"unix": "unix",
"linux": "unix",
"linux2": "unix",
"linux3": "unix",
"android": "android",
"ios": "ios",
"fuchsia": "fuchsia",
"chromeos": "chromeos",
}
DEFAULT_CLIENT_FILE_TEXT = ("""\
solutions = [
{ "name" : "%(solution_name)s",
"url" : "%(solution_url)s",
"deps_file" : "%(deps_file)s",
"managed" : %(managed)s,
"custom_deps" : {
},
"custom_vars": %(custom_vars)r,
},
]
""")
DEFAULT_CLIENT_CACHE_DIR_TEXT = ("""\
cache_dir = %(cache_dir)r
""")
DEFAULT_SNAPSHOT_FILE_TEXT = ("""\
# Snapshot generated with gclient revinfo --snapshot
solutions = %(solution_list)s
""")
def __init__(self, root_dir, options):
# Do not change previous behavior. Only solution level and immediate DEPS
# are processed.
self._recursion_limit = 2
super(GClient, self).__init__(
parent=None,
name=None,
url=None,
managed=True,
custom_deps=None,
custom_vars=None,
custom_hooks=None,
deps_file='unused',
should_process=True,
should_recurse=True,
relative=None,
condition=None,
print_outbuf=True)
self._options = options
if options.deps_os:
enforced_os = options.deps_os.split(',')
else:
enforced_os = [self.DEPS_OS_CHOICES.get(sys.platform, 'unix')]
if 'all' in enforced_os:
enforced_os = self.DEPS_OS_CHOICES.itervalues()
self._enforced_os = tuple(set(enforced_os))
self._enforced_cpu = detect_host_arch.HostArch(),
self._root_dir = root_dir
self._cipd_root = None
self.config_content = None
def _CheckConfig(self):
"""Verify that the config matches the state of the existing checked-out
solutions."""
for dep in self.dependencies:
if dep.managed and dep.url:
scm = dep.CreateSCM()
actual_url = scm.GetActualRemoteURL(self._options)
if actual_url and not scm.DoesRemoteURLMatch(self._options):
mirror = scm.GetCacheMirror()
if mirror:
mirror_string = '%s (exists=%s)' % (mirror.mirror_path,
mirror.exists())
else:
mirror_string = 'not used'
raise gclient_utils.Error(
'''
Your .gclient file seems to be broken. The requested URL is different from what
is actually checked out in %(checkout_path)s.
The .gclient file contains:
URL: %(expected_url)s (%(expected_scm)s)
Cache mirror: %(mirror_string)s
The local checkout in %(checkout_path)s reports:
%(actual_url)s (%(actual_scm)s)
You should ensure that the URL listed in .gclient is correct and either change
it or fix the checkout.
''' % {'checkout_path': os.path.join(self.root_dir, dep.name),
'expected_url': dep.url,
'expected_scm': dep.GetScmName(),
'mirror_string': mirror_string,
'actual_url': actual_url,
'actual_scm': dep.GetScmName()})
def SetConfig(self, content):
assert not self.dependencies
config_dict = {}
self.config_content = content
try:
exec(content, config_dict)
except SyntaxError as e:
gclient_utils.SyntaxErrorToError('.gclient', e)
# Append any target OS that is not already being enforced to the tuple.
target_os = config_dict.get('target_os', [])
if config_dict.get('target_os_only', False):
self._enforced_os = tuple(set(target_os))
else:
self._enforced_os = tuple(set(self._enforced_os).union(target_os))
# Append any target CPU that is not already being enforced to the tuple.
target_cpu = config_dict.get('target_cpu', [])
if config_dict.get('target_cpu_only', False):
self._enforced_cpu = tuple(set(target_cpu))
else:
self._enforced_cpu = tuple(set(self._enforced_cpu).union(target_cpu))
cache_dir = config_dict.get('cache_dir', UNSET_CACHE_DIR)
if cache_dir is not UNSET_CACHE_DIR:
if cache_dir:
cache_dir = os.path.join(self.root_dir, cache_dir)
cache_dir = os.path.abspath(cache_dir)
git_cache.Mirror.SetCachePath(cache_dir)
if not target_os and config_dict.get('target_os_only', False):
raise gclient_utils.Error('Can\'t use target_os_only if target_os is '
'not specified')
if not target_cpu and config_dict.get('target_cpu_only', False):
raise gclient_utils.Error('Can\'t use target_cpu_only if target_cpu is '
'not specified')
deps_to_add = []
for s in config_dict.get('solutions', []):
try:
deps_to_add.append(GitDependency(
parent=self,
name=s['name'],
url=s['url'],
managed=s.get('managed', True),
custom_deps=s.get('custom_deps', {}),
custom_vars=s.get('custom_vars', {}),
custom_hooks=s.get('custom_hooks', []),
deps_file=s.get('deps_file', 'DEPS'),
should_process=True,
should_recurse=True,
relative=None,
condition=None,
print_outbuf=True))
except KeyError:
raise gclient_utils.Error('Invalid .gclient file. Solution is '
'incomplete: %s' % s)
metrics.collector.add(
'project_urls',
[
dep.FuzzyMatchUrl(metrics_utils.KNOWN_PROJECT_URLS)
for dep in deps_to_add
if dep.FuzzyMatchUrl(metrics_utils.KNOWN_PROJECT_URLS)
]
)
self.add_dependencies_and_close(deps_to_add, config_dict.get('hooks', []))
logging.info('SetConfig() done')
def SaveConfig(self):
gclient_utils.FileWrite(os.path.join(self.root_dir,
self._options.config_filename),
self.config_content)
@staticmethod
def LoadCurrentConfig(options):
"""Searches for and loads a .gclient file relative to the current working
dir. Returns a GClient object."""
if options.spec:
client = GClient('.', options)
client.SetConfig(options.spec)
else:
if options.verbose:
print('Looking for %s starting from %s\n' % (
options.config_filename, os.getcwd()))
path = gclient_paths.FindGclientRoot(os.getcwd(), options.config_filename)
if not path:
if options.verbose:
print('Couldn\'t find configuration file.')
return None
client = GClient(path, options)
client.SetConfig(gclient_utils.FileRead(
os.path.join(path, options.config_filename)))
if (options.revisions and
len(client.dependencies) > 1 and
any('@' not in r for r in options.revisions)):
print(
('You must specify the full solution name like --revision %s@%s\n'
'when you have multiple solutions setup in your .gclient file.\n'
'Other solutions present are: %s.') % (
client.dependencies[0].name,
options.revisions[0],
', '.join(s.name for s in client.dependencies[1:])),
file=sys.stderr)
return client
def SetDefaultConfig(self, solution_name, deps_file, solution_url,
managed=True, cache_dir=UNSET_CACHE_DIR,
custom_vars=None):
text = self.DEFAULT_CLIENT_FILE_TEXT
format_dict = {
'solution_name': solution_name,
'solution_url': solution_url,
'deps_file': deps_file,
'managed': managed,
'custom_vars': custom_vars or {},
}
if cache_dir is not UNSET_CACHE_DIR:
text += self.DEFAULT_CLIENT_CACHE_DIR_TEXT
format_dict['cache_dir'] = cache_dir
self.SetConfig(text % format_dict)
def _SaveEntries(self):
"""Creates a .gclient_entries file to record the list of unique checkouts.
The .gclient_entries file lives in the same directory as .gclient.
"""
# Sometimes pprint.pformat will use {', sometimes it'll use { ' ... It
# makes testing a bit too fun.
result = 'entries = {\n'
for entry in self.root.subtree(False):
result += ' %s: %s,\n' % (pprint.pformat(entry.name),
pprint.pformat(entry.url))
result += '}\n'
file_path = os.path.join(self.root_dir, self._options.entries_filename)
logging.debug(result)
gclient_utils.FileWrite(file_path, result)
def _ReadEntries(self):
"""Read the .gclient_entries file for the given client.
Returns:
A sequence of solution names, which will be empty if there is the
entries file hasn't been created yet.
"""
scope = {}
filename = os.path.join(self.root_dir, self._options.entries_filename)
if not os.path.exists(filename):
return {}
try:
exec(gclient_utils.FileRead(filename), scope)
except SyntaxError as e:
gclient_utils.SyntaxErrorToError(filename, e)
return scope.get('entries', {})
def _EnforceRevisions(self):
"""Checks for revision overrides."""
revision_overrides = {}
if self._options.head:
return revision_overrides
if not self._options.revisions:
return revision_overrides
solutions_names = [s.name for s in self.dependencies]
index = 0
for revision in self._options.revisions:
if not '@' in revision:
# Support for --revision 123
revision = '%s@%s' % (solutions_names[index], revision)
name, rev = revision.split('@', 1)
revision_overrides[name] = rev
index += 1
return revision_overrides
def _EnforcePatchRefsAndBranches(self):
"""Checks for patch refs."""
patch_refs = {}
target_branches = {}
if not self._options.patch_refs:
return patch_refs, target_branches
for given_patch_ref in self._options.patch_refs:
patch_repo, _, patch_ref = given_patch_ref.partition('@')
if not patch_repo or not patch_ref or ':' not in patch_ref:
raise gclient_utils.Error(
'Wrong revision format: %s should be of the form '
'patch_repo@target_branch:patch_ref.' % given_patch_ref)
target_branch, _, patch_ref = patch_ref.partition(':')
target_branches[patch_repo] = target_branch
patch_refs[patch_repo] = patch_ref
return patch_refs, target_branches
def _RemoveUnversionedGitDirs(self):
"""Remove directories that are no longer part of the checkout.
Notify the user if there is an orphaned entry in their working copy.
Only delete the directory if there are no changes in it, and
delete_unversioned_trees is set to true.
"""
entries = [i.name for i in self.root.subtree(False) if i.url]
full_entries = [os.path.join(self.root_dir, e.replace('/', os.path.sep))
for e in entries]
for entry, prev_url in self._ReadEntries().items():
if not prev_url:
# entry must have been overridden via .gclient custom_deps
continue
# Fix path separator on Windows.
entry_fixed = entry.replace('/', os.path.sep)
e_dir = os.path.join(self.root_dir, entry_fixed)
# Use entry and not entry_fixed there.
if (entry not in entries and
(not any(path.startswith(entry + '/') for path in entries)) and
os.path.exists(e_dir)):
# The entry has been removed from DEPS.
scm = gclient_scm.GitWrapper(
prev_url, self.root_dir, entry_fixed, self.outbuf)
# Check to see if this directory is now part of a higher-up checkout.
scm_root = None
try:
scm_root = gclient_scm.scm.GIT.GetCheckoutRoot(scm.checkout_path)
except subprocess2.CalledProcessError:
pass
if not scm_root:
logging.warning('Could not find checkout root for %s. Unable to '
'determine whether it is part of a higher-level '
'checkout, so not removing.' % entry)
continue
# This is to handle the case of third_party/WebKit migrating from
# being a DEPS entry to being part of the main project.
# If the subproject is a Git project, we need to remove its .git
# folder. Otherwise git operations on that folder will have different
# effects depending on the current working directory.
if os.path.abspath(scm_root) == os.path.abspath(e_dir):
e_par_dir = os.path.join(e_dir, os.pardir)
if gclient_scm.scm.GIT.IsInsideWorkTree(e_par_dir):
par_scm_root = gclient_scm.scm.GIT.GetCheckoutRoot(e_par_dir)
# rel_e_dir : relative path of entry w.r.t. its parent repo.
rel_e_dir = os.path.relpath(e_dir, par_scm_root)
if gclient_scm.scm.GIT.IsDirectoryVersioned(
par_scm_root, rel_e_dir):
save_dir = scm.GetGitBackupDirPath()
# Remove any eventual stale backup dir for the same project.
if os.path.exists(save_dir):
gclient_utils.rmtree(save_dir)
os.rename(os.path.join(e_dir, '.git'), save_dir)
# When switching between the two states (entry/ is a subproject
# -> entry/ is part of the outer project), it is very likely
# that some files are changed in the checkout, unless we are
# jumping *exactly* across the commit which changed just DEPS.
# In such case we want to cleanup any eventual stale files
# (coming from the old subproject) in order to end up with a
# clean checkout.
gclient_scm.scm.GIT.CleanupDir(par_scm_root, rel_e_dir)
assert not os.path.exists(os.path.join(e_dir, '.git'))
print('\nWARNING: \'%s\' has been moved from DEPS to a higher '
'level checkout. The git folder containing all the local'
' branches has been saved to %s.\n'
'If you don\'t care about its state you can safely '
'remove that folder to free up space.' % (entry, save_dir))
continue
if scm_root in full_entries:
logging.info('%s is part of a higher level checkout, not removing',
scm.GetCheckoutRoot())
continue
file_list = []
scm.status(self._options, [], file_list)
modified_files = file_list != []
if (not self._options.delete_unversioned_trees or
(modified_files and not self._options.force)):
# There are modified files in this entry. Keep warning until
# removed.
self.add_dependency(
GitDependency(
parent=self,
name=entry,
url=prev_url,
managed=False,
custom_deps={},
custom_vars={},
custom_hooks=[],
deps_file=None,
should_process=True,
should_recurse=False,
relative=None,
condition=None))
print('\nWARNING: \'%s\' is no longer part of this client.\n'
'It is recommended that you manually remove it or use '
'\'gclient sync -D\' next time.' % entry_fixed)
else:
# Delete the entry
print('\n________ deleting \'%s\' in \'%s\'' % (
entry_fixed, self.root_dir))
gclient_utils.rmtree(e_dir)
# record the current list of entries for next time
self._SaveEntries()
def RunOnDeps(self, command, args, ignore_requirements=False, progress=True):
"""Runs a command on each dependency in a client and its dependencies.
Args:
command: The command to use (e.g., 'status' or 'diff')
args: list of str - extra arguments to add to the command line.
"""
if not self.dependencies:
raise gclient_utils.Error('No solution specified')
revision_overrides = {}
patch_refs = {}
target_branches = {}
# It's unnecessary to check for revision overrides for 'recurse'.
# Save a few seconds by not calling _EnforceRevisions() in that case.
if command not in ('diff', 'recurse', 'runhooks', 'status', 'revert',
'validate'):
self._CheckConfig()
revision_overrides = self._EnforceRevisions()
if command == 'update':
patch_refs, target_branches = self._EnforcePatchRefsAndBranches()
# Disable progress for non-tty stdout.
should_show_progress = (
setup_color.IS_TTY and not self._options.verbose and progress)
pm = None
if should_show_progress:
if command in ('update', 'revert'):
pm = Progress('Syncing projects', 1)
elif command in ('recurse', 'validate'):
pm = Progress(' '.join(args), 1)
work_queue = gclient_utils.ExecutionQueue(
self._options.jobs, pm, ignore_requirements=ignore_requirements,
verbose=self._options.verbose)
for s in self.dependencies:
if s.should_process:
work_queue.enqueue(s)
work_queue.flush(revision_overrides, command, args, options=self._options,
patch_refs=patch_refs, target_branches=target_branches)
if revision_overrides:
print('Please fix your script, having invalid --revision flags will soon '
'be considered an error.', file=sys.stderr)
if patch_refs:
raise gclient_utils.Error(
'The following --patch-ref flags were not used. Please fix it:\n%s' %
('\n'.join(
patch_repo + '@' + patch_ref
for patch_repo, patch_ref in patch_refs.iteritems())))
# Once all the dependencies have been processed, it's now safe to write
# out the gn_args_file and run the hooks.
if command == 'update':
gn_args_dep = self.dependencies[0]
if gn_args_dep._gn_args_from:
deps_map = dict([(dep.name, dep) for dep in gn_args_dep.dependencies])
gn_args_dep = deps_map.get(gn_args_dep._gn_args_from)
if gn_args_dep and gn_args_dep.HasGNArgsFile():
gn_args_dep.WriteGNArgsFile()
self._RemoveUnversionedGitDirs()
# Sync CIPD dependencies once removed deps are deleted. In case a git
# dependency was moved to CIPD, we want to remove the old git directory
# first and then sync the CIPD dep.
if self._cipd_root:
self._cipd_root.run(command)
if not self._options.nohooks:
if should_show_progress:
pm = Progress('Running hooks', 1)
self.RunHooksRecursively(self._options, pm)
return 0
def PrintRevInfo(self):
if not self.dependencies:
raise gclient_utils.Error('No solution specified')
# Load all the settings.
work_queue = gclient_utils.ExecutionQueue(
self._options.jobs, None, False, verbose=self._options.verbose)
for s in self.dependencies:
if s.should_process:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=self._options, patch_refs=None,
target_branches=None)
def ShouldPrintRevision(dep):
return (not self._options.filter
or dep.FuzzyMatchUrl(self._options.filter))
if self._options.snapshot:
json_output = []
# First level at .gclient
for d in self.dependencies:
entries = {}
def GrabDeps(dep):
"""Recursively grab dependencies."""
for d in dep.dependencies:
d.PinToActualRevision()
if ShouldPrintRevision(d):
entries[d.name] = d.url
GrabDeps(d)
GrabDeps(d)
json_output.append({
'name': d.name,
'solution_url': d.url,
'deps_file': d.deps_file,
'managed': d.managed,
'custom_deps': entries,
})
if self._options.output_json == '-':
print(json.dumps(json_output, indent=2, separators=(',', ': ')))
elif self._options.output_json:
with open(self._options.output_json, 'w') as f:
json.dump(json_output, f)
else:
# Print the snapshot configuration file
print(self.DEFAULT_SNAPSHOT_FILE_TEXT % {
'solution_list': pprint.pformat(json_output, indent=2),
})
else:
entries = {}
for d in self.root.subtree(False):
if self._options.actual:
d.PinToActualRevision()
if ShouldPrintRevision(d):
entries[d.name] = d.url
if self._options.output_json:
json_output = {
name: {
'url': rev.split('@')[0] if rev else None,
'rev': rev.split('@')[1] if rev and '@' in rev else None,
}
for name, rev in entries.iteritems()
}
if self._options.output_json == '-':
print(json.dumps(json_output, indent=2, separators=(',', ': ')))
else:
with open(self._options.output_json, 'w') as f:
json.dump(json_output, f)
else:
keys = sorted(entries.keys())
for x in keys:
print('%s: %s' % (x, entries[x]))
logging.info(str(self))
def ParseDepsFile(self):
"""No DEPS to parse for a .gclient file."""
raise gclient_utils.Error('Internal error')
def PrintLocationAndContents(self):
# Print out the .gclient file. This is longer than if we just printed the
# client dict, but more legible, and it might contain helpful comments.
print('Loaded .gclient config in %s:\n%s' % (
self.root_dir, self.config_content))
def GetCipdRoot(self):
if not self._cipd_root:
self._cipd_root = gclient_scm.CipdRoot(
self.root_dir,
# TODO(jbudorick): Support other service URLs as necessary.
# Service URLs should be constant over the scope of a cipd
# root, so a var per DEPS file specifying the service URL
# should suffice.
'https://chrome-infra-packages.appspot.com')
return self._cipd_root
@property
def root_dir(self):
"""Root directory of gclient checkout."""
return self._root_dir
@property
def enforced_os(self):
"""What deps_os entries that are to be parsed."""
return self._enforced_os
@property
def target_os(self):
return self._enforced_os
@property
def target_cpu(self):
return self._enforced_cpu
class CipdDependency(Dependency):
"""A Dependency object that represents a single CIPD package."""
def __init__(
self, parent, name, dep_value, cipd_root,
custom_vars, should_process, relative, condition):
package = dep_value['package']
version = dep_value['version']
url = urlparse.urljoin(
cipd_root.service_url, '%s@%s' % (package, version))
super(CipdDependency, self).__init__(
parent=parent,
name=name + ':' + package,
url=url,
managed=None,
custom_deps=None,
custom_vars=custom_vars,
custom_hooks=None,
deps_file=None,
should_process=should_process,
should_recurse=False,
relative=relative,
condition=condition)
self._cipd_package = None
self._cipd_root = cipd_root
# CIPD wants /-separated paths, even on Windows.
native_subdir_path = os.path.relpath(
os.path.join(self.root.root_dir, name), cipd_root.root_dir)
self._cipd_subdir = posixpath.join(*native_subdir_path.split(os.sep))
self._package_name = package
self._package_version = version
#override
def run(self, revision_overrides, command, args, work_queue, options,
patch_refs, target_branches):
"""Runs |command| then parse the DEPS file."""
logging.info('CipdDependency(%s).run()' % self.name)
if not self.should_process:
return
self._CreatePackageIfNecessary()
super(CipdDependency, self).run(revision_overrides, command, args,
work_queue, options, patch_refs,
target_branches)
def _CreatePackageIfNecessary(self):
# We lazily create the CIPD package to make sure that only packages
# that we want (as opposed to all packages defined in all DEPS files
# we parse) get added to the root and subsequently ensured.
if not self._cipd_package:
self._cipd_package = self._cipd_root.add_package(
self._cipd_subdir, self._package_name, self._package_version)
def ParseDepsFile(self):
"""CIPD dependencies are not currently allowed to have nested deps."""
self.add_dependencies_and_close([], [])
#override
def verify_validity(self):
"""CIPD dependencies allow duplicate name for packages in same directory."""
logging.info('Dependency(%s).verify_validity()' % self.name)
return True
#override
def GetScmName(self):
"""Always 'cipd'."""
return 'cipd'
#override
def CreateSCM(self, out_cb=None):
"""Create a Wrapper instance suitable for handling this CIPD dependency."""
self._CreatePackageIfNecessary()
return gclient_scm.CipdWrapper(
self.url, self.root.root_dir, self.name, self.outbuf, out_cb,
root=self._cipd_root, package=self._cipd_package)
def hierarchy(self, include_url=False):
return self.parent.hierarchy(include_url) + ' -> ' + self._cipd_subdir
def ToLines(self):
"""Return a list of lines representing this in a DEPS file."""
def escape_cipd_var(package):
return package.replace('{', '{{').replace('}', '}}')
s = []
self._CreatePackageIfNecessary()
if self._cipd_package.authority_for_subdir:
condition_part = ([' "condition": %r,' % self.condition]
if self.condition else [])
s.extend([
' # %s' % self.hierarchy(include_url=False),
' "%s": {' % (self.name.split(':')[0],),
' "packages": [',
])
for p in sorted(
self._cipd_root.packages(self._cipd_subdir),
cmp=lambda x, y: cmp(x.name, y.name)):
s.extend([
' {',
' "package": "%s",' % escape_cipd_var(p.name),
' "version": "%s",' % p.version,
' },',
])
s.extend([
' ],',
' "dep_type": "cipd",',
] + condition_part + [
' },',
'',
])
return s
#### gclient commands.
@subcommand.usage('[command] [args ...]')
@metrics.collector.collect_metrics('gclient recurse')
def CMDrecurse(parser, args):
"""Operates [command args ...] on all the dependencies.
Runs a shell command on all entries.
Sets GCLIENT_DEP_PATH environment variable as the dep's relative location to
root directory of the checkout.
"""
# Stop parsing at the first non-arg so that these go through to the command
parser.disable_interspersed_args()
parser.add_option('-s', '--scm', action='append', default=[],
help='Choose scm types to operate upon.')
parser.add_option('-i', '--ignore', action='store_true',
help='Ignore non-zero return codes from subcommands.')
parser.add_option('--prepend-dir', action='store_true',
help='Prepend relative dir for use with git <cmd> --null.')
parser.add_option('--no-progress', action='store_true',
help='Disable progress bar that shows sub-command updates')
options, args = parser.parse_args(args)
if not args:
print('Need to supply a command!', file=sys.stderr)
return 1
root_and_entries = gclient_utils.GetGClientRootAndEntries()
if not root_and_entries:
print(
'You need to run gclient sync at least once to use \'recurse\'.\n'
'This is because .gclient_entries needs to exist and be up to date.',
file=sys.stderr)
return 1
# Normalize options.scm to a set()
scm_set = set()
for scm in options.scm:
scm_set.update(scm.split(','))
options.scm = scm_set
options.nohooks = True
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
return client.RunOnDeps('recurse', args, ignore_requirements=True,
progress=not options.no_progress)
@subcommand.usage('[args ...]')
@metrics.collector.collect_metrics('gclient fetch')
def CMDfetch(parser, args):
"""Fetches upstream commits for all modules.
Completely git-specific. Simply runs 'git fetch [args ...]' for each module.
"""
(options, args) = parser.parse_args(args)
return CMDrecurse(OptionParser(), [
'--jobs=%d' % options.jobs, '--scm=git', 'git', 'fetch'] + args)
class Flattener(object):
"""Flattens a gclient solution."""
def __init__(self, client, pin_all_deps=False):
"""Constructor.
Arguments:
client (GClient): client to flatten
pin_all_deps (bool): whether to pin all deps, even if they're not pinned
in DEPS
"""
self._client = client
self._deps_string = None
self._deps_files = set()
self._allowed_hosts = set()
self._deps = {}
self._hooks = []
self._pre_deps_hooks = []
self._vars = {}
self._flatten(pin_all_deps=pin_all_deps)
@property
def deps_string(self):
assert self._deps_string is not None
return self._deps_string
@property
def deps_files(self):
return self._deps_files
def _pin_dep(self, dep):
"""Pins a dependency to specific full revision sha.
Arguments:
dep (Dependency): dependency to process
"""
if dep.url is None:
return
# Make sure the revision is always fully specified (a hash),
# as opposed to refs or tags which might change. Similarly,
# shortened shas might become ambiguous; make sure to always
# use full one for pinning.
revision = gclient_utils.SplitUrlRevision(dep.url)[1]
if not revision or not gclient_utils.IsFullGitSha(revision):
dep.PinToActualRevision()
def _flatten(self, pin_all_deps=False):
"""Runs the flattener. Saves resulting DEPS string.
Arguments:
pin_all_deps (bool): whether to pin all deps, even if they're not pinned
in DEPS
"""
for solution in self._client.dependencies:
self._add_dep(solution)
self._flatten_dep(solution)
if pin_all_deps:
for dep in self._deps.itervalues():
self._pin_dep(dep)
def add_deps_file(dep):
# Only include DEPS files referenced by recursedeps.
if not dep.should_recurse:
return
deps_file = dep.deps_file
deps_path = os.path.join(self._client.root_dir, dep.name, deps_file)
if not os.path.exists(deps_path):
# gclient has a fallback that if deps_file doesn't exist, it'll try
# DEPS. Do the same here.
deps_file = 'DEPS'
deps_path = os.path.join(self._client.root_dir, dep.name, deps_file)
if not os.path.exists(deps_path):
return
assert dep.url
self._deps_files.add((dep.url, deps_file, dep.hierarchy_data()))
for dep in self._deps.itervalues():
add_deps_file(dep)
gn_args_dep = self._deps.get(self._client.dependencies[0]._gn_args_from,
self._client.dependencies[0])
self._deps_string = '\n'.join(
_GNSettingsToLines(gn_args_dep._gn_args_file, gn_args_dep._gn_args) +
_AllowedHostsToLines(self._allowed_hosts) +
_DepsToLines(self._deps) +
_HooksToLines('hooks', self._hooks) +
_HooksToLines('pre_deps_hooks', self._pre_deps_hooks) +
_VarsToLines(self._vars) +
['# %s, %s' % (url, deps_file)
for url, deps_file, _ in sorted(self._deps_files)] +
['']) # Ensure newline at end of file.
def _add_dep(self, dep):
"""Helper to add a dependency to flattened DEPS.
Arguments:
dep (Dependency): dependency to add
"""
assert dep.name not in self._deps or self._deps.get(dep.name) == dep, (
dep.name, self._deps.get(dep.name))
if dep.url:
self._deps[dep.name] = dep
def _flatten_dep(self, dep):
"""Visits a dependency in order to flatten it (see CMDflatten).
Arguments:
dep (Dependency): dependency to process
"""
logging.debug('_flatten_dep(%s)', dep.name)
assert dep.deps_parsed, (
"Attempted to flatten %s but it has not been processed." % dep.name)
self._allowed_hosts.update(dep.allowed_hosts)
# Only include vars explicitly listed in the DEPS files or gclient solution,
# not automatic, local overrides (i.e. not all of dep.get_vars()).
hierarchy = dep.hierarchy(include_url=False)
for key, value in dep._vars.iteritems():
# Make sure there are no conflicting variables. It is fine however
# to use same variable name, as long as the value is consistent.
assert key not in self._vars or self._vars[key][1] == value, (
"dep:%s key:%s value:%s != %s" % (
dep.name, key, value, self._vars[key][1]))
self._vars[key] = (hierarchy, value)
# Override explicit custom variables.
for key, value in dep.custom_vars.iteritems():
# Do custom_vars that don't correspond to DEPS vars ever make sense? DEPS
# conditionals shouldn't be using vars that aren't also defined in the
# DEPS (presubmit actually disallows this), so any new custom_var must be
# unused in the DEPS, so no need to add it to the flattened output either.
if key not in self._vars:
continue
# Don't "override" existing vars if it's actually the same value.
elif self._vars[key][1] == value:
continue
# Anything else is overriding a default value from the DEPS.
self._vars[key] = (hierarchy + ' [custom_var override]', value)
self._pre_deps_hooks.extend([(dep, hook) for hook in dep.pre_deps_hooks])
self._hooks.extend([(dep, hook) for hook in dep.deps_hooks])
for sub_dep in dep.dependencies:
self._add_dep(sub_dep)
for d in dep.dependencies:
if d.should_recurse:
self._flatten_dep(d)
@metrics.collector.collect_metrics('gclient flatten')
def CMDflatten(parser, args):
"""Flattens the solutions into a single DEPS file."""
parser.add_option('--output-deps', help='Path to the output DEPS file')
parser.add_option(
'--output-deps-files',
help=('Path to the output metadata about DEPS files referenced by '
'recursedeps.'))
parser.add_option(
'--pin-all-deps', action='store_true',
help=('Pin all deps, even if not pinned in DEPS. CAVEAT: only does so '
'for checked out deps, NOT deps_os.'))
options, args = parser.parse_args(args)
options.nohooks = True
options.process_all_deps = True
client = GClient.LoadCurrentConfig(options)
# Only print progress if we're writing to a file. Otherwise, progress updates
# could obscure intended output.
code = client.RunOnDeps('flatten', args, progress=options.output_deps)
if code != 0:
return code
flattener = Flattener(client, pin_all_deps=options.pin_all_deps)
if options.output_deps:
with open(options.output_deps, 'w') as f:
f.write(flattener.deps_string)
else:
print(flattener.deps_string)
deps_files = [{'url': d[0], 'deps_file': d[1], 'hierarchy': d[2]}
for d in sorted(flattener.deps_files)]
if options.output_deps_files:
with open(options.output_deps_files, 'w') as f:
json.dump(deps_files, f)
return 0
def _GNSettingsToLines(gn_args_file, gn_args):
s = []
if gn_args_file:
s.extend([
'gclient_gn_args_file = "%s"' % gn_args_file,
'gclient_gn_args = %r' % gn_args,
])
return s
def _AllowedHostsToLines(allowed_hosts):
"""Converts |allowed_hosts| set to list of lines for output."""
if not allowed_hosts:
return []
s = ['allowed_hosts = [']
for h in sorted(allowed_hosts):
s.append(' "%s",' % h)
s.extend([']', ''])
return s
def _DepsToLines(deps):
"""Converts |deps| dict to list of lines for output."""
if not deps:
return []
s = ['deps = {']
for _, dep in sorted(deps.iteritems()):
s.extend(dep.ToLines())
s.extend(['}', ''])
return s
def _DepsOsToLines(deps_os):
"""Converts |deps_os| dict to list of lines for output."""
if not deps_os:
return []
s = ['deps_os = {']
for dep_os, os_deps in sorted(deps_os.iteritems()):
s.append(' "%s": {' % dep_os)
for name, dep in sorted(os_deps.iteritems()):
condition_part = ([' "condition": %r,' % dep.condition]
if dep.condition else [])
s.extend([
' # %s' % dep.hierarchy(include_url=False),
' "%s": {' % (name,),
' "url": "%s",' % (dep.url,),
] + condition_part + [
' },',
'',
])
s.extend([' },', ''])
s.extend(['}', ''])
return s
def _HooksToLines(name, hooks):
"""Converts |hooks| list to list of lines for output."""
if not hooks:
return []
s = ['%s = [' % name]
for dep, hook in hooks:
s.extend([
' # %s' % dep.hierarchy(include_url=False),
' {',
])
if hook.name is not None:
s.append(' "name": "%s",' % hook.name)
if hook.pattern is not None:
s.append(' "pattern": "%s",' % hook.pattern)
if hook.condition is not None:
s.append(' "condition": %r,' % hook.condition)
# Flattened hooks need to be written relative to the root gclient dir
cwd = os.path.relpath(os.path.normpath(hook.effective_cwd))
s.extend(
[' "cwd": "%s",' % cwd] +
[' "action": ['] +
[' "%s",' % arg for arg in hook.action] +
[' ]', ' },', '']
)
s.extend([']', ''])
return s
def _HooksOsToLines(hooks_os):
"""Converts |hooks| list to list of lines for output."""
if not hooks_os:
return []
s = ['hooks_os = {']
for hook_os, os_hooks in hooks_os.iteritems():
s.append(' "%s": [' % hook_os)
for dep, hook in os_hooks:
s.extend([
' # %s' % dep.hierarchy(include_url=False),
' {',
])
if hook.name is not None:
s.append(' "name": "%s",' % hook.name)
if hook.pattern is not None:
s.append(' "pattern": "%s",' % hook.pattern)
if hook.condition is not None:
s.append(' "condition": %r,' % hook.condition)
# Flattened hooks need to be written relative to the root gclient dir
cwd = os.path.relpath(os.path.normpath(hook.effective_cwd))
s.extend(
[' "cwd": "%s",' % cwd] +
[' "action": ['] +
[' "%s",' % arg for arg in hook.action] +
[' ]', ' },', '']
)
s.extend([' ],', ''])
s.extend(['}', ''])
return s
def _VarsToLines(variables):
"""Converts |variables| dict to list of lines for output."""
if not variables:
return []
s = ['vars = {']
for key, tup in sorted(variables.iteritems()):
hierarchy, value = tup
s.extend([
' # %s' % hierarchy,
' "%s": %r,' % (key, value),
'',
])
s.extend(['}', ''])
return s
@metrics.collector.collect_metrics('gclient grep')
def CMDgrep(parser, args):
"""Greps through git repos managed by gclient.
Runs 'git grep [args...]' for each module.
"""
# We can't use optparse because it will try to parse arguments sent
# to git grep and throw an error. :-(
if not args or re.match('(-h|--help)$', args[0]):
print(
'Usage: gclient grep [-j <N>] git-grep-args...\n\n'
'Example: "gclient grep -j10 -A2 RefCountedBase" runs\n"git grep '
'-A2 RefCountedBase" on each of gclient\'s git\nrepos with up to '
'10 jobs.\n\nBonus: page output by appending "|& less -FRSX" to the'
' end of your query.',
file=sys.stderr)
return 1
jobs_arg = ['--jobs=1']
if re.match(r'(-j|--jobs=)\d+$', args[0]):
jobs_arg, args = args[:1], args[1:]
elif re.match(r'(-j|--jobs)$', args[0]):
jobs_arg, args = args[:2], args[2:]
return CMDrecurse(
parser,
jobs_arg + ['--ignore', '--prepend-dir', '--no-progress', '--scm=git',
'git', 'grep', '--null', '--color=Always'] + args)
@metrics.collector.collect_metrics('gclient root')
def CMDroot(parser, args):
"""Outputs the solution root (or current dir if there isn't one)."""
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if client:
print(os.path.abspath(client.root_dir))
else:
print(os.path.abspath('.'))
@subcommand.usage('[url]')
@metrics.collector.collect_metrics('gclient config')
def CMDconfig(parser, args):
"""Creates a .gclient file in the current directory.
This specifies the configuration for further commands. After update/sync,
top-level DEPS files in each module are read to determine dependent
modules to operate on as well. If optional [url] parameter is
provided, then configuration is read from a specified Subversion server
URL.
"""
# We do a little dance with the --gclientfile option. 'gclient config' is the
# only command where it's acceptable to have both '--gclientfile' and '--spec'
# arguments. So, we temporarily stash any --gclientfile parameter into
# options.output_config_file until after the (gclientfile xor spec) error
# check.
parser.remove_option('--gclientfile')
parser.add_option('--gclientfile', dest='output_config_file',
help='Specify an alternate .gclient file')
parser.add_option('--name',
help='overrides the default name for the solution')
parser.add_option('--deps-file', default='DEPS',
help='overrides the default name for the DEPS file for the '
'main solutions and all sub-dependencies')
parser.add_option('--unmanaged', action='store_true', default=False,
help='overrides the default behavior to make it possible '
'to have the main solution untouched by gclient '
'(gclient will check out unmanaged dependencies but '
'will never sync them)')
parser.add_option('--cache-dir', default=UNSET_CACHE_DIR,
help='Cache all git repos into this dir and do shared '
'clones from the cache, instead of cloning directly '
'from the remote. Pass "None" to disable cache, even '
'if globally enabled due to $GIT_CACHE_PATH.')
parser.add_option('--custom-var', action='append', dest='custom_vars',
default=[],
help='overrides variables; key=value syntax')
parser.set_defaults(config_filename=None)
(options, args) = parser.parse_args(args)
if options.output_config_file:
setattr(options, 'config_filename', getattr(options, 'output_config_file'))
if ((options.spec and args) or len(args) > 2 or
(not options.spec and not args)):
parser.error('Inconsistent arguments. Use either --spec or one or 2 args')
if (options.cache_dir is not UNSET_CACHE_DIR
and options.cache_dir.lower() == 'none'):
options.cache_dir = None
custom_vars = {}
for arg in options.custom_vars:
kv = arg.split('=', 1)
if len(kv) != 2:
parser.error('Invalid --custom-var argument: %r' % arg)
custom_vars[kv[0]] = gclient_eval.EvaluateCondition(kv[1], {})
client = GClient('.', options)
if options.spec:
client.SetConfig(options.spec)
else:
base_url = args[0].rstrip('/')
if not options.name:
name = base_url.split('/')[-1]
if name.endswith('.git'):
name = name[:-4]
else:
# specify an alternate relpath for the given URL.
name = options.name
if not os.path.abspath(os.path.join(os.getcwd(), name)).startswith(
os.getcwd()):
parser.error('Do not pass a relative path for --name.')
if any(x in ('..', '.', '/', '\\') for x in name.split(os.sep)):
parser.error('Do not include relative path components in --name.')
deps_file = options.deps_file
client.SetDefaultConfig(name, deps_file, base_url,
managed=not options.unmanaged,
cache_dir=options.cache_dir,
custom_vars=custom_vars)
client.SaveConfig()
return 0
@subcommand.epilog("""Example:
gclient pack > patch.txt
generate simple patch for configured client and dependences
""")
@metrics.collector.collect_metrics('gclient pack')
def CMDpack(parser, args):
"""Generates a patch which can be applied at the root of the tree.
Internally, runs 'git diff' on each checked out module and
dependencies, and performs minimal postprocessing of the output. The
resulting patch is printed to stdout and can be applied to a freshly
checked out tree via 'patch -p0 < patchfile'.
"""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.remove_option('--jobs')
(options, args) = parser.parse_args(args)
# Force jobs to 1 so the stdout is not annotated with the thread ids
options.jobs = 1
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
client.PrintLocationAndContents()
return client.RunOnDeps('pack', args)
@metrics.collector.collect_metrics('gclient status')
def CMDstatus(parser, args):
"""Shows modification status for every dependencies."""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
client.PrintLocationAndContents()
return client.RunOnDeps('status', args)
@subcommand.epilog("""Examples:
gclient sync
update files from SCM according to current configuration,
*for modules which have changed since last update or sync*
gclient sync --force
update files from SCM according to current configuration, for
all modules (useful for recovering files deleted from local copy)
gclient sync --revision src@31000
update src directory to r31000
JSON output format:
If the --output-json option is specified, the following document structure will
be emitted to the provided file. 'null' entries may occur for subprojects which
are present in the gclient solution, but were not processed (due to custom_deps,
os_deps, etc.)
{
"solutions" : {
"<name>": { # <name> is the posix-normalized path to the solution.
"revision": [<git id hex string>|null],
"scm": ["git"|null],
}
}
}
""")
@metrics.collector.collect_metrics('gclient sync')
def CMDsync(parser, args):
"""Checkout/update all modules."""
parser.add_option('-f', '--force', action='store_true',
help='force update even for unchanged modules')
parser.add_option('-n', '--nohooks', action='store_true',
help='don\'t run hooks after the update is complete')
parser.add_option('-p', '--noprehooks', action='store_true',
help='don\'t run pre-DEPS hooks', default=False)
parser.add_option('-r', '--revision', action='append',
dest='revisions', metavar='REV', default=[],
help='Enforces revision/hash for the solutions with the '
'format src@rev. The src@ part is optional and can be '
'skipped. You can also specify URLs instead of paths '
'and gclient will find the solution corresponding to '
'the given URL. If a path is also specified, the URL '
'takes precedence. -r can be used multiple times when '
'.gclient has multiple solutions configured, and will '
'work even if the src@ part is skipped.')
parser.add_option('--patch-ref', action='append',
dest='patch_refs', metavar='GERRIT_REF', default=[],
help='Patches the given reference with the format '
'dep@target-ref:patch-ref. '
'For |dep|, you can specify URLs as well as paths, '
'with URLs taking preference. '
'|patch-ref| will be applied to |dep|, rebased on top '
'of what |dep| was synced to, and a soft reset will '
'be done. Use --no-rebase-patch-ref and '
'--no-reset-patch-ref to disable this behavior. '
'|target-ref| is the target branch against which a '
'patch was created, it is used to determine which '
'commits from the |patch-ref| actually constitute a '
'patch.')
parser.add_option('--with_branch_heads', action='store_true',
help='Clone git "branch_heads" refspecs in addition to '
'the default refspecs. This adds about 1/2GB to a '
'full checkout. (git only)')
parser.add_option('--with_tags', action='store_true',
help='Clone git tags in addition to the default refspecs.')
parser.add_option('-H', '--head', action='store_true',
help='DEPRECATED: only made sense with safesync urls.')
parser.add_option('-D', '--delete_unversioned_trees', action='store_true',
help='Deletes from the working copy any dependencies that '
'have been removed since the last sync, as long as '
'there are no local modifications. When used with '
'--force, such dependencies are removed even if they '
'have local modifications. When used with --reset, '
'all untracked directories are removed from the '
'working copy, excluding those which are explicitly '
'ignored in the repository.')
parser.add_option('-R', '--reset', action='store_true',
help='resets any local changes before updating (git only)')
parser.add_option('-M', '--merge', action='store_true',
help='merge upstream changes instead of trying to '
'fast-forward or rebase')
parser.add_option('-A', '--auto_rebase', action='store_true',
help='Automatically rebase repositories against local '
'checkout during update (git only).')
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.add_option('--process-all-deps', action='store_true',
help='Check out all deps, even for different OS-es, '
'or with conditions evaluating to false')
parser.add_option('--upstream', action='store_true',
help='Make repo state match upstream branch.')
parser.add_option('--output-json',
help='Output a json document to this path containing '
'summary information about the sync.')
parser.add_option('--no-history', action='store_true',
help='GIT ONLY - Reduces the size/time of the checkout at '
'the cost of no history. Requires Git 1.9+')
parser.add_option('--shallow', action='store_true',
help='GIT ONLY - Do a shallow clone into the cache dir. '
'Requires Git 1.9+')
parser.add_option('--no_bootstrap', '--no-bootstrap',
action='store_true',
help='Don\'t bootstrap from Google Storage.')
parser.add_option('--ignore_locks', action='store_true',
help='GIT ONLY - Ignore cache locks.')
parser.add_option('--break_repo_locks', action='store_true',
help='GIT ONLY - Forcibly remove repo locks (e.g. '
'index.lock). This should only be used if you know for '
'certain that this invocation of gclient is the only '
'thing operating on the git repos (e.g. on a bot).')
parser.add_option('--lock_timeout', type='int', default=5000,
help='GIT ONLY - Deadline (in seconds) to wait for git '
'cache lock to become available. Default is %default.')
# TODO(agable): Remove these when the oldest CrOS release milestone is M56.
parser.add_option('-t', '--transitive', action='store_true',
help='DEPRECATED: This is a no-op.')
parser.add_option('-m', '--manually_grab_svn_rev', action='store_true',
help='DEPRECATED: This is a no-op.')
# TODO(phajdan.jr): Remove validation options once default (crbug/570091).
parser.add_option('--validate-syntax', action='store_true', default=True,
help='Validate the .gclient and DEPS syntax')
parser.add_option('--disable-syntax-validation', action='store_false',
dest='validate_syntax',
help='Disable validation of .gclient and DEPS syntax.')
parser.add_option('--no-rebase-patch-ref', action='store_false',
dest='rebase_patch_ref', default=True,
help='Bypass rebase of the patch ref after checkout.')
parser.add_option('--no-reset-patch-ref', action='store_false',
dest='reset_patch_ref', default=True,
help='Bypass calling reset after patching the ref.')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.revisions and options.head:
# TODO(maruel): Make it a parser.error if it doesn't break any builder.
print('Warning: you cannot use both --head and --revision')
if options.verbose:
client.PrintLocationAndContents()
ret = client.RunOnDeps('update', args)
if options.output_json:
slns = {}
for d in client.subtree(True):
normed = d.name.replace('\\', '/').rstrip('/') + '/'
slns[normed] = {
'revision': d.got_revision,
'scm': d.used_scm.name if d.used_scm else None,
'url': str(d.url) if d.url else None,
'was_processed': d.should_process,
}
with open(options.output_json, 'wb') as f:
json.dump({'solutions': slns}, f)
return ret
CMDupdate = CMDsync
@metrics.collector.collect_metrics('gclient validate')
def CMDvalidate(parser, args):
"""Validates the .gclient and DEPS syntax."""
options, args = parser.parse_args(args)
options.validate_syntax = True
client = GClient.LoadCurrentConfig(options)
rv = client.RunOnDeps('validate', args)
if rv == 0:
print('validate: SUCCESS')
else:
print('validate: FAILURE')
return rv
@metrics.collector.collect_metrics('gclient diff')
def CMDdiff(parser, args):
"""Displays local diff for every dependencies."""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
client.PrintLocationAndContents()
return client.RunOnDeps('diff', args)
@metrics.collector.collect_metrics('gclient revert')
def CMDrevert(parser, args):
"""Reverts all modifications in every dependencies.
That's the nuclear option to get back to a 'clean' state. It removes anything
that shows up in git status."""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.add_option('-n', '--nohooks', action='store_true',
help='don\'t run hooks after the revert is complete')
parser.add_option('-p', '--noprehooks', action='store_true',
help='don\'t run pre-DEPS hooks', default=False)
parser.add_option('--upstream', action='store_true',
help='Make repo state match upstream branch.')
parser.add_option('--break_repo_locks', action='store_true',
help='GIT ONLY - Forcibly remove repo locks (e.g. '
'index.lock). This should only be used if you know for '
'certain that this invocation of gclient is the only '
'thing operating on the git repos (e.g. on a bot).')
(options, args) = parser.parse_args(args)
# --force is implied.
options.force = True
options.reset = False
options.delete_unversioned_trees = False
options.merge = False
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
return client.RunOnDeps('revert', args)
@metrics.collector.collect_metrics('gclient runhooks')
def CMDrunhooks(parser, args):
"""Runs hooks for files that have been modified in the local working copy."""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.add_option('-f', '--force', action='store_true', default=True,
help='Deprecated. No effect.')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
client.PrintLocationAndContents()
options.force = True
options.nohooks = False
return client.RunOnDeps('runhooks', args)
@metrics.collector.collect_metrics('gclient revinfo')
def CMDrevinfo(parser, args):
"""Outputs revision info mapping for the client and its dependencies.
This allows the capture of an overall 'revision' for the source tree that
can be used to reproduce the same tree in the future. It is only useful for
'unpinned dependencies', i.e. DEPS/deps references without a git hash.
A git branch name isn't 'pinned' since the actual commit can change.
"""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.add_option('-a', '--actual', action='store_true',
help='gets the actual checked out revisions instead of the '
'ones specified in the DEPS and .gclient files')
parser.add_option('-s', '--snapshot', action='store_true',
help='creates a snapshot .gclient file of the current '
'version of all repositories to reproduce the tree, '
'implies -a')
parser.add_option('--filter', action='append', dest='filter',
help='Display revision information only for the specified '
'dependencies (filtered by URL or path).')
parser.add_option('--output-json',
help='Output a json document to this path containing '
'information about the revisions.')
parser.add_option('--ignore-dep-type', choices=['git', 'cipd'],
help='Specify to skip processing of a certain type of dep.')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
client.PrintRevInfo()
return 0
@metrics.collector.collect_metrics('gclient getdep')
def CMDgetdep(parser, args):
"""Gets revision information and variable values from a DEPS file."""
parser.add_option('--var', action='append',
dest='vars', metavar='VAR', default=[],
help='Gets the value of a given variable.')
parser.add_option('-r', '--revision', action='append',
dest='getdep_revisions', metavar='DEP', default=[],
help='Gets the revision/version for the given dependency. '
'If it is a git dependency, dep must be a path. If it '
'is a CIPD dependency, dep must be of the form '
'path:package.')
parser.add_option('--deps-file', default='DEPS',
# TODO(ehmaldonado): Try to find the DEPS file pointed by
# .gclient first.
help='The DEPS file to be edited. Defaults to the DEPS '
'file in the current directory.')
(options, args) = parser.parse_args(args)
if not os.path.isfile(options.deps_file):
raise gclient_utils.Error(
'DEPS file %s does not exist.' % options.deps_file)
with open(options.deps_file) as f:
contents = f.read()
client = GClient.LoadCurrentConfig(options)
if client is not None:
builtin_vars = client.get_builtin_vars()
else:
logging.warn(
'Couldn\'t find a valid gclient config. Will attempt to parse the DEPS '
'file without support for built-in variables.')
builtin_vars = None
local_scope = gclient_eval.Exec(contents, options.deps_file,
builtin_vars=builtin_vars)
for var in options.vars:
print(gclient_eval.GetVar(local_scope, var))
for name in options.getdep_revisions:
if ':' in name:
name, _, package = name.partition(':')
if not name or not package:
parser.error(
'Wrong CIPD format: %s:%s should be of the form path:pkg.'
% (name, package))
print(gclient_eval.GetCIPD(local_scope, name, package))
else:
print(gclient_eval.GetRevision(local_scope, name))
@metrics.collector.collect_metrics('gclient setdep')
def CMDsetdep(parser, args):
"""Modifies dependency revisions and variable values in a DEPS file"""
parser.add_option('--var', action='append',
dest='vars', metavar='VAR=VAL', default=[],
help='Sets a variable to the given value with the format '
'name=value.')
parser.add_option('-r', '--revision', action='append',
dest='setdep_revisions', metavar='DEP@REV', default=[],
help='Sets the revision/version for the dependency with '
'the format dep@rev. If it is a git dependency, dep '
'must be a path and rev must be a git hash or '
'reference (e.g. src/dep@deadbeef). If it is a CIPD '
'dependency, dep must be of the form path:package and '
'rev must be the package version '
'(e.g. src/pkg:chromium/pkg@2.1-cr0).')
parser.add_option('--deps-file', default='DEPS',
# TODO(ehmaldonado): Try to find the DEPS file pointed by
# .gclient first.
help='The DEPS file to be edited. Defaults to the DEPS '
'file in the current directory.')
(options, args) = parser.parse_args(args)
if args:
parser.error('Unused arguments: "%s"' % '" "'.join(args))
if not options.setdep_revisions and not options.vars:
parser.error(
'You must specify at least one variable or revision to modify.')
if not os.path.isfile(options.deps_file):
raise gclient_utils.Error(
'DEPS file %s does not exist.' % options.deps_file)
with open(options.deps_file) as f:
contents = f.read()
client = GClient.LoadCurrentConfig(options)
if client is not None:
builtin_vars = client.get_builtin_vars()
else:
logging.warn(
'Couldn\'t find a valid gclient config. Will attempt to parse the DEPS '
'file without support for built-in variables.')
builtin_vars = None
local_scope = gclient_eval.Exec(contents, options.deps_file,
builtin_vars=builtin_vars)
for var in options.vars:
name, _, value = var.partition('=')
if not name or not value:
parser.error(
'Wrong var format: %s should be of the form name=value.' % var)
if name in local_scope['vars']:
gclient_eval.SetVar(local_scope, name, value)
else:
gclient_eval.AddVar(local_scope, name, value)
for revision in options.setdep_revisions:
name, _, value = revision.partition('@')
if not name or not value:
parser.error(
'Wrong dep format: %s should be of the form dep@rev.' % revision)
if ':' in name:
name, _, package = name.partition(':')
if not name or not package:
parser.error(
'Wrong CIPD format: %s:%s should be of the form path:pkg@version.'
% (name, package))
gclient_eval.SetCIPD(local_scope, name, package, value)
else:
gclient_eval.SetRevision(local_scope, name, value)
with open(options.deps_file, 'w') as f:
f.write(gclient_eval.RenderDEPSFile(local_scope))
@metrics.collector.collect_metrics('gclient verify')
def CMDverify(parser, args):
"""Verifies the DEPS file deps are only from allowed_hosts."""
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
client.RunOnDeps(None, [])
# Look at each first-level dependency of this gclient only.
for dep in client.dependencies:
bad_deps = dep.findDepsFromNotAllowedHosts()
if not bad_deps:
continue
print("There are deps from not allowed hosts in file %s" % dep.deps_file)
for bad_dep in bad_deps:
print("\t%s at %s" % (bad_dep.name, bad_dep.url))
print("allowed_hosts:", ', '.join(dep.allowed_hosts))
sys.stdout.flush()
raise gclient_utils.Error(
'dependencies from disallowed hosts; check your DEPS file.')
return 0
@subcommand.epilog("""For more information on what metrics are we collecting and
why, please read metrics.README.md or visit https://bit.ly/2ufRS4p""")
@metrics.collector.collect_metrics('gclient metrics')
def CMDmetrics(parser, args):
"""Reports, and optionally modifies, the status of metric collection."""
parser.add_option('--opt-in', action='store_true', dest='enable_metrics',
help='Opt-in to metrics collection.',
default=None)
parser.add_option('--opt-out', action='store_false', dest='enable_metrics',
help='Opt-out of metrics collection.')
options, args = parser.parse_args(args)
if args:
parser.error('Unused arguments: "%s"' % '" "'.join(args))
if not metrics.collector.config.is_googler:
print("You're not a Googler. Metrics collection is disabled for you.")
return 0
if options.enable_metrics is not None:
metrics.collector.config.opted_in = options.enable_metrics
if metrics.collector.config.opted_in is None:
print("You haven't opted in or out of metrics collection.")
elif metrics.collector.config.opted_in:
print("You have opted in. Thanks!")
else:
print("You have opted out. Please consider opting in.")
return 0
class OptionParser(optparse.OptionParser):
gclientfile_default = os.environ.get('GCLIENT_FILE', '.gclient')
def __init__(self, **kwargs):
optparse.OptionParser.__init__(
self, version='%prog ' + __version__, **kwargs)
# Some arm boards have issues with parallel sync.
if platform.machine().startswith('arm'):
jobs = 1
else:
jobs = max(8, gclient_utils.NumLocalCpus())
self.add_option(
'-j', '--jobs', default=jobs, type='int',
help='Specify how many SCM commands can run in parallel; defaults to '
'%default on this machine')
self.add_option(
'-v', '--verbose', action='count', default=0,
help='Produces additional output for diagnostics. Can be used up to '
'three times for more logging info.')
self.add_option(
'--gclientfile', dest='config_filename',
help='Specify an alternate %s file' % self.gclientfile_default)
self.add_option(
'--spec',
help='create a gclient file containing the provided string. Due to '
'Cygwin/Python brokenness, it can\'t contain any newlines.')
self.add_option(
'--no-nag-max', default=False, action='store_true',
help='Ignored for backwards compatibility.')
def parse_args(self, args=None, _values=None):
"""Integrates standard options processing."""
# Create an optparse.Values object that will store only the actual passed
# options, without the defaults.
actual_options = optparse.Values()
_, args = optparse.OptionParser.parse_args(self, args, actual_options)
# Create an optparse.Values object with the default options.
options = optparse.Values(self.get_default_values().__dict__)
# Update it with the options passed by the user.
options._update_careful(actual_options.__dict__)
# Store the options passed by the user in an _actual_options attribute.
# We store only the keys, and not the values, since the values can contain
# arbitrary information, which might be PII.
metrics.collector.add('arguments', actual_options.__dict__.keys())
levels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
logging.basicConfig(
level=levels[min(options.verbose, len(levels) - 1)],
format='%(module)s(%(lineno)d) %(funcName)s:%(message)s')
if options.config_filename and options.spec:
self.error('Cannot specifiy both --gclientfile and --spec')
if (options.config_filename and
options.config_filename != os.path.basename(options.config_filename)):
self.error('--gclientfile target must be a filename, not a path')
if not options.config_filename:
options.config_filename = self.gclientfile_default
options.entries_filename = options.config_filename + '_entries'
if options.jobs < 1:
self.error('--jobs must be 1 or higher')
# These hacks need to die.
if not hasattr(options, 'revisions'):
# GClient.RunOnDeps expects it even if not applicable.
options.revisions = []
if not hasattr(options, 'head'):
options.head = None
if not hasattr(options, 'nohooks'):
options.nohooks = True
if not hasattr(options, 'noprehooks'):
options.noprehooks = True
if not hasattr(options, 'deps_os'):
options.deps_os = None
if not hasattr(options, 'force'):
options.force = None
return (options, args)
def disable_buffering():
# Make stdout auto-flush so buildbot doesn't kill us during lengthy
# operations. Python as a strong tendency to buffer sys.stdout.
sys.stdout = gclient_utils.MakeFileAutoFlush(sys.stdout)
# Make stdout annotated with the thread ids.
sys.stdout = gclient_utils.MakeFileAnnotated(sys.stdout)
def path_contains_tilde():
for element in os.environ['PATH'].split(os.pathsep):
if element.startswith('~') and os.path.abspath(
os.path.realpath(os.path.expanduser(element))) == DEPOT_TOOLS_DIR:
return True
return False
def can_run_gclient_and_helpers():
if sys.hexversion < 0x02060000:
print(
'\nYour python version %s is unsupported, please upgrade.\n' %
sys.version.split(' ', 1)[0],
file=sys.stderr)
return False
if not sys.executable:
print(
'\nPython cannot find the location of it\'s own executable.\n',
file=sys.stderr)
return False
if path_contains_tilde():
print(
'\nYour PATH contains a literal "~", which works in some shells ' +
'but will break when python tries to run subprocesses. ' +
'Replace the "~" with $HOME.\n' +
'See https://crbug.com/952865.\n',
file=sys.stderr)
return False
return True
def main(argv):
"""Doesn't parse the arguments here, just find the right subcommand to
execute."""
if not can_run_gclient_and_helpers():
return 2
fix_encoding.fix_encoding()
disable_buffering()
setup_color.init()
dispatcher = subcommand.CommandDispatcher(__name__)
try:
return dispatcher.execute(OptionParser(), argv)
except KeyboardInterrupt:
gclient_utils.GClientChildren.KillAllRemainingChildren()
raise
except (gclient_utils.Error, subprocess2.CalledProcessError) as e:
print('Error: %s' % str(e), file=sys.stderr)
return 1
finally:
gclient_utils.PrintWarnings()
return 0
if '__main__' == __name__:
with metrics.collector.print_notice_and_exit():
sys.exit(main(sys.argv[1:]))
# vim: ts=2:sw=2:tw=80:et:
| 37.761724
| 80
| 0.63642
|
627d25238f29603bcdfa819d6b247c4bd9abacd6
| 6,125
|
py
|
Python
|
reboot/settings.py
|
leesw98/reBOOT
|
41eaf3ceb9ed21263482022668fbd8e44e90a0fa
|
[
"MIT"
] | 10
|
2017-10-17T04:35:44.000Z
|
2021-03-19T21:12:15.000Z
|
reboot/settings.py
|
leesw98/reBOOT
|
41eaf3ceb9ed21263482022668fbd8e44e90a0fa
|
[
"MIT"
] | 224
|
2017-10-18T18:33:48.000Z
|
2022-02-02T03:33:04.000Z
|
reboot/settings.py
|
leesw98/reBOOT
|
41eaf3ceb9ed21263482022668fbd8e44e90a0fa
|
[
"MIT"
] | 1
|
2018-08-02T03:10:25.000Z
|
2018-08-02T03:10:25.000Z
|
"""
Django settings for reboot project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
from decouple import config, Csv
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Application definition
INSTALLED_APPS = [
'rangefilter',
'app.apps.AppConfig',
'admin_reorder',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'admin_reorder.middleware.ModelAdminReorder',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ADMIN_REORDER = (
# Keep original label and models
'sites',
# Rename app
{'app': 'app', 'models': (
'app.Donation',
'app.Donor',
'app.Item',
{'model': 'app.ItemDevice', 'label': 'Item Devices'},
{'model': 'app.ItemDeviceType', 'label': 'Item Device Types'}
)},
# Reorder app models
{'app': 'auth', 'models': ('auth.User', 'auth.Group')},
)
ROOT_URLCONF = 'reboot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR), 'app.templates'],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
},
},
]
WSGI_APPLICATION = 'reboot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# Reboot Development Database
DATABASES = {
'live': dj_database_url.config(),
'local': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME', default='reboot'),
'USER': config('DB_USER', default='root'),
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
default_database = config('DJANGO_DATABASE', default='local')
print('Using ' + default_database + ' database')
DATABASES['default'] = DATABASES[default_database]
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Toronto'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
ADMINS = [config('ADMIN', cast=Csv(post_process=tuple))]
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': ('%(asctime)s [%(process)d] [%(levelname)s] ' +
'pathname=%(pathname)s lineno=%(lineno)s ' +
'funcname=%(funcName)s %(message)s'),
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'simple': {
'format': '%(levelname)s %(message)s'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
}
},
'loggers': {
'app': {
'handlers': ['console'],
'level': 'INFO',
},
'app.views': {
'handlers': ['console'],
'level': 'INFO',
}
}
}
| 28.356481
| 91
| 0.639837
|
b21e0ef24397a99629eb2fec16c1582d4585db75
| 875
|
py
|
Python
|
parsl/tests/sites/test_cooley_singularity.py
|
benclifford/parsl
|
21f8681882779050d2e074591e95ada43789748f
|
[
"Apache-2.0"
] | 2
|
2019-02-25T16:43:30.000Z
|
2019-03-04T17:25:00.000Z
|
parsl/tests/sites/test_cooley_singularity.py
|
benclifford/parsl
|
21f8681882779050d2e074591e95ada43789748f
|
[
"Apache-2.0"
] | null | null | null |
parsl/tests/sites/test_cooley_singularity.py
|
benclifford/parsl
|
21f8681882779050d2e074591e95ada43789748f
|
[
"Apache-2.0"
] | 2
|
2019-04-30T13:46:23.000Z
|
2019-06-04T16:14:46.000Z
|
import parsl
from parsl.app.app import App
from parsl.tests.configs.cooley_local_single_node import config
parsl.clear()
parsl.load(config)
parsl.set_stream_logger()
@App("bash")
def freesurfer(stdout=None, stderr=None):
return """singularity exec ~madduri/freesurfer.simg recon-all
"""
if __name__ == "__main__":
N = 4
results = {}
for i in range(0, N):
results[i] = freesurfer(stdout="freesurfer.{}.out".format(i),
stderr="freesurfer.{}.err".format(i))
for i in range(0, N):
results[i].result()
print("Waiting ....")
try:
print(results[0].result())
except Exception:
print("Caught an exception, but this is not a problem")
pass
print("STDOUT from 0th run :")
print(open(results[0].stdout, 'r').read())
print(open(results[0].stderr, 'r').read())
| 24.305556
| 69
| 0.614857
|
f335c068030e35e367c0b0f81c8116b85f59cc3a
| 1,748
|
py
|
Python
|
user_test.py
|
EsauKip/Password-locker
|
94cc650175bef48a07639720144a5ebf6fe2af97
|
[
"MIT"
] | null | null | null |
user_test.py
|
EsauKip/Password-locker
|
94cc650175bef48a07639720144a5ebf6fe2af97
|
[
"MIT"
] | null | null | null |
user_test.py
|
EsauKip/Password-locker
|
94cc650175bef48a07639720144a5ebf6fe2af97
|
[
"MIT"
] | null | null | null |
#!usr/bin/env python3.9
import unittest #import unittestmodule
from user import User #import the user class
class TestUser(unittest.TestCase):
def setUp(self):
'''
method to run before each test
'''
self.new_user=User("EsauKip","#Youcannotfind5") #new User created
def test__init(self):
self.assertEqual(self.new_user.username,"EsauKip")
self.assertEqual(self.new_user.password,"#Youcannotfind5")
##second test
def test_save_user(self):
'''
check whether the user information can be saved
in the user list
'''
self.new_user.save_user()
self.assertEqual(len(User.user_list), 1)
def tearDown(self):
'''
clean up after each test to prevent errors
'''
User.user_list = []
#save many users
def test_save_multiple_users(self):
self.new_user.save_user()
test_user = User("test","trickysana")
test_user.save_user()
self.assertEqual(len(User.user_list),2)
#4th test
def test_delete_user(self):
'''
check whether one can delete a user account
'''
self.new_user.save_user()
test_user = User("try", "trickysana")
test_user.save_user()
self.new_user.delete_user()
self.assertEqual(len(User.user_list), 1)
#5th test
def test_find_user(self):
'''
find a user using username
'''
self.new_user.save_user()
test_user = User("try", "trickysana")
test_user.save_user()
found_user = User.find_user("EsauKip")
self.assertEqual(found_user.username, self.new_user.username)
if __name__ == "__main__":
unittest.main()
| 32.37037
| 73
| 0.610984
|
db2ad11a93de029d9827073cd1cfdc9717384dea
| 49,853
|
py
|
Python
|
src/capabilities/server.py
|
dfautomation/capabilities
|
ef5c68bde5299a48782a6021f37045f5bda2f58d
|
[
"BSD-3-Clause"
] | null | null | null |
src/capabilities/server.py
|
dfautomation/capabilities
|
ef5c68bde5299a48782a6021f37045f5bda2f58d
|
[
"BSD-3-Clause"
] | null | null | null |
src/capabilities/server.py
|
dfautomation/capabilities
|
ef5c68bde5299a48782a6021f37045f5bda2f58d
|
[
"BSD-3-Clause"
] | null | null | null |
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Open Source Robotics Foundation, Inc. nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author: Tully Foote <tfoote@osrfoundation.org>
# Author: William Woodall <william@osrfoundation.org>
"""
This module implements the Capability server.
The Capability server provides access to queries and services related
to capabilities.
"""
from __future__ import print_function
import argparse
import logging
import os
import sys
import threading
import traceback
import uuid
import rospy
from bondpy.bondpy import Bond
from std_srvs.srv import Empty
from std_srvs.srv import EmptyResponse
from capabilities.srv import EstablishBond
from capabilities.srv import EstablishBondResponse
from capabilities.srv import GetCapabilitySpec
from capabilities.srv import GetCapabilitySpecResponse
from capabilities.srv import FreeCapability
from capabilities.srv import FreeCapabilityResponse
from capabilities.srv import GetCapabilitySpecs
from capabilities.srv import GetCapabilitySpecsResponse
from capabilities.srv import GetInterfaces
from capabilities.srv import GetInterfacesResponse
from capabilities.srv import GetNodeletManagerName
from capabilities.srv import GetNodeletManagerNameResponse
from capabilities.srv import GetProviders
from capabilities.srv import GetProvidersResponse
from capabilities.srv import GetSemanticInterfaces
from capabilities.srv import GetSemanticInterfacesResponse
from capabilities.srv import GetRemappings
from capabilities.srv import GetRemappingsResponse
from capabilities.srv import GetRunningCapabilities
from capabilities.srv import GetRunningCapabilitiesResponse
from capabilities.srv import StartCapability
from capabilities.srv import StartCapabilityResponse
from capabilities.srv import StopCapability
from capabilities.srv import StopCapabilityResponse
from capabilities.srv import UseCapability
from capabilities.srv import UseCapabilityResponse
from capabilities.discovery import package_index_from_package_path
from capabilities.discovery import spec_file_index_from_package_index
from capabilities.discovery import spec_index_from_spec_file_index
from capabilities.launch_manager import _special_nodelet_manager_capability
from capabilities.launch_manager import LaunchManager
from capabilities.msg import Capability
from capabilities.msg import CapabilityEvent
from capabilities.msg import CapabilitySpec
from capabilities.msg import Remapping
from capabilities.msg import RunningCapability
from capabilities.specs.interface import capability_interface_from_string
from capabilities.specs.semantic_interface import semantic_capability_interface_from_string
USER_SERVICE_REASON = 'user service call'
## Hack to squelch output from Service call failure ##
from rospy.impl import tcpros_service
def custom__handle_request(self, transport, request): # pragma: no cover
import struct
from rospy.impl.tcpros_service import convert_return_to_response
from rospy.service import ServiceException
try:
# convert return type to response Message instance
response = convert_return_to_response(self.handler(request), self.response_class)
self.seq += 1
# ok byte
transport.write_buff.write(struct.pack('<B', 1))
transport.send_message(response, self.seq)
except ServiceException as e:
rospy.core.rospydebug("handler raised ServiceException: %s" % e)
self._write_service_error(transport, "service cannot process request: %s" % e)
except Exception as e:
# rospy.logerr("Error processing request: %s\n%s" % (e, traceback.print_exc()))
self._write_service_error(transport, "error processing request: %s" % e)
tcpros_service.ServiceImpl._handle_request = custom__handle_request
## End hacks ##
class CapabilityInstance(object):
"""Encapsulates the state of an instance of a Capability Provider
This class encapsulates the state of the capability instance and
provides methods for changing the states of the instance.
"""
def __init__(self, provider, provider_path, started_by='unknown'):
self.__state = 'waiting'
self.name = provider.name
self.provider = provider
self.provider_path = provider_path
self.interface = provider.implements
self.pid = None
self.depends_on = [x for x in provider.dependencies]
self.canceled = False
self.started_by = started_by
self.bonds = {} # {bond_id: reference_count}
@property
def reference_count(self):
return sum(list(self.bonds.values()))
@property
def state(self):
"""Get the current state of the CapabilityInstance"""
return self.__state
def launch(self):
"""Change to the 'launching' state
Fails to transition if the current state is not 'waiting'.
:returns: True if transition is successful, False otherwise
:rtype: bool
"""
if self.state != 'waiting':
rospy.logerr(
"Capability Provider '{0}' ".format(self.name) +
"cannot transition to 'launching' from anything but " +
"'waiting', current state is '{0}'".format(self.state))
return False
self.__state = 'launching'
return True
def cancel(self):
"""Cancels the instance, which can only be done while it is still 'waiting'
Fails to cancel if the current state is not 'waiting'.
"Canceling" is achieved by setting the canceled member variable to True.
:returns: True if canceling is successful, False otherwise
:rtype: bool
"""
if self.state != 'waiting':
rospy.logerr(
"Capability Instance '{0}' ".format(self.name) +
"cannot be canceled from anything but " +
"'waiting', current state is '{0}'".format(self.state))
return False
self.canceled = True
return True
def launched(self, pid):
"""Called once the instance is "launched", changes state to 'running'
Fails to transition if the current state is not 'launching'.
If successful, the state changes to 'running'.
:param pid: process ID of the instance being tracked
:type pid: int
:returns: True if transition is successful, False otherwise
:rtype: bool
"""
self.pid = pid
if self.state != 'launching':
rospy.logerr(
"Capability Instance '{0}' ".format(self.name) +
"cannot transition to 'running' from anything but " +
"'launching', current state is '{0}'".format(self.state))
return False
self.__state = 'running'
return True
def stopped(self):
"""Change to the 'stopping' state
Fails to transition if the current state is not either 'running' or 'launching'.
:returns: True if transition is successful, False otherwise
:rtype: bool
"""
if self.state not in ['running', 'launching']:
rospy.logerr(
"Capability Instance '{0}' ".format(self.name) +
"cannot transition to 'stopping' from anything but " +
"'launching' or 'running', " +
"current state is '{0}'".format(self.state))
return False
self.__state = 'stopping'
return True
def terminated(self):
"""Called when the instance has terminated, transitions to the 'terminated' state
Fails to transition if the current state is not 'stopping'.
:returns: True if transition is successful, False otherwise
:rtype: bool
"""
result = True
if self.state != 'stopping':
rospy.logerr(
"Capability Instance '{0}' ".format(self.name) +
"terminated unexpectedly, it was previously in the " +
"'{0}' state.".format(self.state))
result = False
self.__state = 'terminated'
return result
def get_reverse_depends(name, capability_instances):
"""Gets the reverse dependencies of a given Capability
:param name: Name of the Capability which the instances might depend on
:type name: str
:param capability_instances: list of instances to search for having a
dependency on the given Capability
:type capability_instances: :py:obj:`list` of :py:class:`CapabilityInstance`
:returns: A list of :py:class:`CapabilityInstance`'s which depend on the
given Capability name
:rtype: :py:obj:`list` of :py:class:`CapabilityInstance`
"""
rdepends = []
for instance in capability_instances:
if name in instance.depends_on:
rdepends.append(instance)
return rdepends
class CapabilityServer(object):
"""A class to expose the :py:class:`discovery.SpecIndex` over a ROS API
"""
def __init__(self, package_paths, screen=None):
self.__package_paths = package_paths
self.__spec_index = None
self.__graph_lock = threading.Lock()
self.__capability_instances = {}
self.__launch_manager = LaunchManager(
screen=bool(rospy.get_param('~use_screen', screen)),
nodelet_manager_name=rospy.get_param('~nodelet_manager_name', None)
)
self.__debug = False
self.__package_whitelist = None
self.__package_blacklist = None
self.__whitelist = None
self.__blacklist = None
self.__default_providers = {}
self.__missing_default_provider_is_an_error = rospy.get_param('~missing_default_provider_is_an_error', False)
self.__bonds = {}
def spin(self):
"""Starts the capability server by setting up ROS comms, then spins"""
self.__package_whitelist = rospy.get_param('~package_whitelist', None)
if not isinstance(self.__package_whitelist, (list, tuple, type(None))):
msg = "~package_whitelist must be a list or null, got a '{0}'".format(type(self.__whitelist))
rospy.logerr(msg)
self.__package_whitelist = None
self.__package_blacklist = rospy.get_param('~package_blacklist', None)
if not isinstance(self.__package_blacklist, (list, tuple, type(None))):
msg = "~package_blacklist must be a list or null, got a '{0}'".format(type(self.__whitelist))
rospy.logerr(msg)
self.__package_blacklist = None
self.__whitelist = rospy.get_param('~whitelist', None)
if not isinstance(self.__whitelist, (list, tuple, type(None))):
msg = "~whitelist must be a list or null, got a '{0}'".format(type(self.__whitelist))
rospy.logerr(msg)
self.__whitelist = None
self.__blacklist = rospy.get_param('~blacklist', None)
if not isinstance(self.__blacklist, (list, tuple, type(None))):
msg = "~blacklist must be a list or null, got a '{0}'".format(type(self.__blacklist))
rospy.logerr(msg)
self.__blacklist = None
self.__debug = rospy.get_param('~debug', False)
if self.__debug:
logger = logging.getLogger('rosout')
logger.setLevel(logging.DEBUG)
rospy.logdebug('Debug messages enabled.')
self.__load_capabilities()
self.__bond_topic = rospy.get_name() + "/bonds"
# Collect default arguments
self.__populate_default_providers()
rospy.Subscriber(
'~events', CapabilityEvent, self.handle_capability_events)
self.__start_capability_service = rospy.Service(
'~start_capability', StartCapability, self.handle_start_capability)
self.__stop_capability_service = rospy.Service(
'~stop_capability', StopCapability, self.handle_stop_capability)
self.__establish_bond_service = rospy.Service(
'~establish_bond', EstablishBond, self.handle_establish_bond)
self.__free_capability_service = rospy.Service(
'~free_capability', FreeCapability, self.handle_free_capability)
self.__use_capability_service = rospy.Service(
'~use_capability', UseCapability, self.handle_use_capability)
self.__reload_service = rospy.Service(
'~reload_capabilities', Empty, self.handle_reload_request)
self.__interfaces_service = rospy.Service(
'~get_interfaces', GetInterfaces, self.handle_get_interfaces)
self.__providers_service = rospy.Service(
'~get_providers', GetProviders, self.handle_get_providers)
self.__semantic_interfaces_service = rospy.Service(
'~get_semantic_interfaces', GetSemanticInterfaces,
self.handle_get_semantic_interfaces)
self.__running_capabilities = rospy.Service(
'~get_running_capabilities', GetRunningCapabilities,
self.handle_get_running_capabilities)
self.__capability_specs = rospy.Service(
'~get_capability_specs', GetCapabilitySpecs,
self.handle_get_capability_specs)
self.__capability_spec = rospy.Service(
'~get_capability_spec', GetCapabilitySpec,
self.handle_get_capability_spec)
self.__get_nodelet_manager_name_service = rospy.Service(
'~get_nodelet_manager_name', GetNodeletManagerName,
self.handle_get_nodelet_manager_name)
self.__get_remappings_service = rospy.Service(
'~get_remappings', GetRemappings,
self.handle_get_remappings)
rospy.loginfo("Capability Server Ready")
rospy.Publisher("~events", CapabilityEvent, queue_size=1000).publish(
CapabilityEvent(type=CapabilityEvent.SERVER_READY))
rospy.spin()
def shutdown(self):
"""Stops the capability server and cleans up any running processes"""
for instance in self.__capability_instances.values(): # pragma: no cover
if instance.state in ['running', 'launching']:
instance.stopped()
if instance.state == 'waiting':
instance.cancel()
self.__launch_manager.stop()
def __load_capabilities(self):
package_index = package_index_from_package_path(self.__package_paths)
self.spec_file_index = spec_file_index_from_package_index(package_index)
# Prune packages by black and white list
for package in self.spec_file_index.keys():
if self.__package_whitelist and package not in self.__package_whitelist:
rospy.loginfo("Package '{0}' not in whitelist, skipping.".format(package))
del self.spec_file_index[package]
elif self.__package_blacklist and package in self.__package_blacklist:
rospy.loginfo("Package '{0}' in blacklist, skipping.".format(package))
del self.spec_file_index[package]
# Generate spec_index from spec file index
spec_index, errors = spec_index_from_spec_file_index(self.spec_file_index)
if errors:
rospy.logerr("Errors were encountered loading capabilities:")
for error in errors:
rospy.logerr(" " + str(error.__class__.__name__) + ": " + str(error))
# Prune specific capabilities based on black and white lists
removed_interfaces = []
for specs, remove_func in [
(spec_index.interfaces, spec_index.remove_interface),
(spec_index.semantic_interfaces, spec_index.remove_semantic_interface),
(spec_index.providers, spec_index.remove_provider)
]:
for spec in specs.keys():
if self.__whitelist and spec not in self.__whitelist:
removed_interfaces.append(spec)
remove_func(spec)
rospy.loginfo("Spec '{0}' is not in the whitelist, skipping.".format(spec))
elif self.__blacklist and spec in self.__blacklist:
removed_interfaces.append(spec)
remove_func(spec)
rospy.loginfo("Spec '{0}' is in the blacklist, skipping.".format(spec))
# Remove providers which no longer have an interface
for interface in removed_interfaces:
for provider in spec_index.providers.values():
if provider.implements == interface:
spec_index.remove_provider(provider.name)
self.__spec_index = spec_index
# Prune spec_file_index
spec_paths = spec_index.interface_paths.values() + \
spec_index.semantic_interface_paths.values() + \
spec_index.provider_paths.values()
for package_name, package_dict in self.spec_file_index.items():
for spec_type in ['capability_interface', 'semantic_capability_interface', 'capability_provider']:
package_dict[spec_type][:] = [path for path in package_dict[spec_type] if path in spec_paths]
def __populate_default_providers(self):
# Collect available interfaces
interfaces = self.__spec_index.interface_names + self.__spec_index.semantic_interface_names
for interface in interfaces:
# Collect the providers for each interface
providers = [n
for n, p in self.__spec_index.providers.items()
if p.implements == interface]
if not providers:
# If an interface has not providers, ignore it
rospy.logwarn("No providers for capability interface '{0}', not checking for default provider."
.format(interface))
continue
try:
# Try to get the default provider from the corresponding ros parameter
self.__default_providers[interface] = rospy.get_param('~defaults/' + interface)
except KeyError:
# No ros parameter set for this capability interface
rospy.logwarn("No default provider given for capability interface '{0}'. ".format(interface))
if len(providers) == 1:
# If there is only one provider, allow it to be the default
rospy.logwarn("'{0}' has only one provider, '{1}', using that as the default."
.format(interface, providers[0]))
self.__default_providers[interface] = providers[0]
else:
# Otherwise we can't decide
if self.__missing_default_provider_is_an_error:
rospy.logerr("Could not determine a default provider for capability interface '{0}', aborting."
.format(interface))
sys.exit(-1)
else:
rospy.logwarn("Could not determine a default provider for capability interface '{0}'."
.format(interface))
continue
# Make sure the given default provider exists
if self.__default_providers[interface] not in self.__spec_index.provider_names:
if self.__missing_default_provider_is_an_error:
rospy.logerr("Given default provider '{0}' for interface '{1}' does not exist."
.format(self.__default_providers[interface], interface))
sys.exit(-1)
else:
rospy.logwarn("Given default provider '{0}' for interface '{1}' does not exist."
.format(self.__default_providers[interface], interface))
del self.__default_providers[interface]
continue
# Make sure the given provider implements this interface
if self.__default_providers[interface] not in providers:
if self.__missing_default_provider_is_an_error:
rospy.logerr("Given default provider '{0}' does not implment interface '{1}'."
.format(self.__default_providers[interface], interface))
sys.exit(-1)
else:
rospy.logwarn("Given default provider '{0}' does not implment interface '{1}'."
.format(self.__default_providers[interface], interface))
del self.__default_providers[interface]
continue
# Update the interface object with the default provider
iface = self.__spec_index.interfaces.get(
interface,
self.__spec_index.semantic_interfaces.get(interface, None))
iface.default_provider = self.__default_providers[interface]
# Summarize defaults
if self.__default_providers:
rospy.loginfo("For each available interface, the default provider:")
for interface, provider in self.__default_providers.items():
rospy.loginfo("'{0}'".format(interface))
rospy.loginfo(" => '{0}'".format(provider))
rospy.loginfo("")
else: # pragma: no cover
rospy.logwarn("No runnable Capabilities loaded.")
def __catch_and_log(self, func, *args, **kwargs):
warning_level_exceptions = ['because it is not running']
try:
return func(*args, **kwargs)
except Exception as exc:
msg = "{0}".format(exc)
log_func = rospy.logerr
if [x for x in warning_level_exceptions if x in msg]:
log_func = rospy.logwarn
rospy.logdebug(traceback.format_exc())
log_func('{0}: {1}'.format(exc.__class__.__name__, msg))
raise
def handle_capability_events(self, event):
"""Callback for handling messages (events) from the /events topic
This callback only process events generated by this node.
:param event: ROS message encapsulating an event
:type event: :py:class:`capabilities.msgs.CapabilityEvent`
"""
return self.__catch_and_log(self._handle_capability_events, event)
def _handle_capability_events(self, event):
# Ignore any publications which we did not send (external publishers)
if event._connection_header['callerid'] != rospy.get_name():
return # pragma: no cover
# Ignore the `server_ready` event
if event.type == event.SERVER_READY:
return
# Specially handle the nodelet manager
if event.capability == _special_nodelet_manager_capability:
if event.type == event.TERMINATED:
if not rospy.is_shutdown():
rospy.logerr("Capability server's nodelet manager terminated unexpectedly.")
self.shutdown()
return
# Update the capability
capability = event.capability
with self.__graph_lock:
if capability not in self.__capability_instances.keys():
rospy.logerr("Unknown capability instance: '{0}'"
.format(capability))
return
instance = self.__capability_instances[capability]
if event.type == event.LAUNCHED:
if instance.canceled: # pragma: no cover
# This is defensive programming, it should not happen
self.__stop_capability(instance.name)
else:
instance.launched(event.pid)
elif event.type == event.TERMINATED:
instance.terminated()
rospy.loginfo(
"Capability Provider '{0}' for Capability '{1}' "
.format(event.provider, event.capability) +
"has terminated.")
# Update the graph
self.__update_graph()
def __remove_terminated_capabilities(self):
# collect all of the terminated capabilities
terminated = [x
for x in self.__capability_instances.values()
if x.state == 'terminated']
# Remove terminated instances
for instance in terminated:
del self.__capability_instances[instance.interface]
# Shutdown unused capabilities
self.__cleanup_graph()
def __cleanup_graph(self):
"""Iterate over the running capabilities and shutdown ones which are no longer needed
For each running capability, if it was not started by the user then look at who depends on it.
If no other capabilities depend on it, then shut it down.
"""
# Collect all running capabilities
running_capabilities = [x
for x in self.__capability_instances.values()
if x.state == 'running']
for cap in running_capabilities:
if cap.started_by == USER_SERVICE_REASON:
# Started by user, do not garbage collect this
continue
rdepends = get_reverse_depends(cap.interface, self.__capability_instances.values())
if rdepends:
# Someone depends on me, do not garbage collect this
rospy.logdebug("Keeping the '{0}' provider of the '{1}' interface, ".format(cap.name, cap.interface) +
"because other running capabilities depend on it.")
continue
if cap.state == 'running':
rospy.loginfo("Stopping the '{0}' provider of the '{1}' interface, because it has no dependents left."
.format(cap.name, cap.interface))
self.__stop_capability(cap.interface)
elif cap.state == 'waiting': # pragma: no cover
rospy.loginfo("Canceling the '{0}' provider of the '{1}' interface, because it has no dependents left."
.format(cap.name, cap.interface))
cap.cancel()
# Else the state is launching, stopping, or terminated
# In which case launching will be caught on the next cleanup
# and the latter two will get cleared out also.
def __update_graph(self):
# collect all of the waiting capabilities
waiting = [x
for x in self.__capability_instances.values()
if x.state == 'waiting']
# If any of the waiting have no blocking dependencies start them
for instance in waiting:
blocking_dependencies = []
for dependency_name in instance.depends_on:
if dependency_name not in self.__capability_instances: # pragma: no cover
rospy.logerr(
"Inconsistent capability run graph, '{0}' depends on "
.format(instance.name) + "'{0}', ".format(dependency_name) +
"which is not in the list of capability instances.")
return
dependency = self.__capability_instances[dependency_name]
if dependency.state != 'running':
blocking_dependencies.append(dependency)
if not blocking_dependencies:
instance.launch()
self.__launch_manager.run_capability_provider(
instance.provider, instance.provider_path
)
# Remove any terminated capabilities
self.__remove_terminated_capabilities()
def __stop_capability(self, name):
if name not in self.__capability_instances:
rospy.logerr("Inconsistent capability run graph, asked to stop " +
"capability '{0}', ".format(name) +
"which is not in the list of capability instances.")
return
capability = self.__capability_instances[name]
rdepends = get_reverse_depends(name, self.__capability_instances.values())
for cap in rdepends:
if cap.state in ['stopping', 'terminated']: # pragma: no cover
# It is possible that this cap was stopped by another cap in this list
# This is purely defensive
continue
rospy.loginfo(
"Capability '{0}' being stopped because its dependency '{1}' is being stopped.".format(cap.name, name))
self.__stop_capability(cap.interface)
capability.stopped()
self.__launch_manager.stop_capability_provider(capability.pid)
def __get_provider_dependencies(self, provider):
result = []
for interface, dep in provider.dependencies.items():
provider_name = dep.provider or self.__default_providers[interface]
if provider_name not in self.__spec_index.providers:
# This is the case where a provider depends on another interface,
# but the preferred provider does not exist
raise RuntimeError("Capability Provider '{0}' not found"
.format(provider_name))
dep_provider = self.__spec_index.providers[provider_name]
result.append((dep_provider, provider.name))
return result
def __get_capability_instances_from_provider(self, provider):
instances = []
providers = [(provider, USER_SERVICE_REASON)]
while providers:
curr, reason = providers.pop()
providers.extend(self.__get_provider_dependencies(curr))
curr_path = self.__spec_index.provider_paths[curr.name]
instances.append(CapabilityInstance(curr, curr_path, started_by=reason))
return instances
def __get_providers_for_interface(self, interface, allow_semantic=False):
valid_interfaces = [interface]
if allow_semantic:
# Add semantic interfaces which redefine this one
valid_interfaces.extend(
[k for k, v in self.__spec_index.semantic_interfaces.items()
if v.redefines == interface]
)
providers = dict([(n, p)
for n, p in self.__spec_index.providers.items()
if p.implements in valid_interfaces])
return providers # Could be empty
def __start_capability(self, capability, preferred_provider):
if capability not in self.__spec_index.interfaces.keys() + self.__spec_index.semantic_interfaces.keys():
raise RuntimeError("Capability '{0}' not found.".format(capability))
# If no preferred provider is given, use the default
preferred_provider = preferred_provider or self.__default_providers[capability]
providers = self.__get_providers_for_interface(capability, allow_semantic=True)
if preferred_provider not in providers:
raise RuntimeError(
"Capability Provider '{0}' not found for Capability '{1}'"
.format(preferred_provider, capability))
provider = providers[preferred_provider]
instances = self.__get_capability_instances_from_provider(provider)
with self.__graph_lock:
# If the requested capability has an existing instance, we don't start it
# again. Return a result that lets the callee know this happened.
requested_instance = instances[0]
if requested_instance.interface in self.__capability_instances:
requested_instance_state = self.__capability_instances[
requested_instance.interface].state
if requested_instance_state in ['running']:
# Current instance is running (or will be soon)
return StartCapabilityResponse.RESULT_CURRENTLY_RUNNING
elif requested_instance_state in ['waiting', 'launching']:
return StartCapabilityResponse.RESULT_CURRENTLY_STARTING
elif requested_instance_state in ['stopping', 'terminated']:
# Current instance is in the process of stopping
return StartCapabilityResponse.RESULT_CURRENTLY_STOPPING
else:
raise RuntimeError(
"Instance for capability '{0}' has improper state '{1}'"
.format(capability, requested_instance_state))
for x in instances:
if x.interface not in self.__capability_instances:
self.__capability_instances[x.interface] = x
self.__update_graph()
return StartCapabilityResponse.RESULT_SUCCESS
def handle_get_capability_specs(self, req):
return self.__catch_and_log(self._handle_get_capability_specs, req)
def _handle_get_capability_specs(self, req):
rospy.loginfo("Servicing request for capability specs...")
response = GetCapabilitySpecsResponse()
for package_name, package_dict in self.spec_file_index.items():
for spec_type in ['capability_interface', 'semantic_capability_interface', 'capability_provider']:
for path in package_dict[spec_type]:
with open(path, 'r') as f:
raw = f.read()
default_provider = ''
# If a capability interface, try to lookup the default provider
iface = None
if spec_type == 'capability_interface':
iface = capability_interface_from_string(raw, path)
if spec_type == 'semantic_capability_interface':
iface = semantic_capability_interface_from_string(raw, path)
if spec_type in ['capability_interface', 'semantic_capability_interface']:
iface.name = '{package}/{name}'.format(package=package_name, name=iface.name)
if iface.name not in self.__default_providers:
default_provider = ''
else:
default_provider = self.__default_providers[iface.name]
cs = CapabilitySpec(package_name, spec_type, raw, default_provider)
response.capability_specs.append(cs)
return response
def handle_get_capability_spec(self, req):
return self.__catch_and_log(self._handle_get_capability_spec, req)
def _handle_get_capability_spec(self, req):
rospy.loginfo("Servicing request for get capability spec '{0}'...".format(req.capability_spec))
response = GetCapabilitySpecResponse()
for package_name, package_dict in self.spec_file_index.items():
for spec_type in ['capability_interface', 'semantic_capability_interface', 'capability_provider']:
for path in package_dict[spec_type]:
with open(path, 'r') as f:
raw = f.read()
default_provider = ''
# If a capability interface, try to lookup the default provider
iface = None
if spec_type == 'capability_interface':
iface = capability_interface_from_string(raw, path)
if spec_type == 'semantic_capability_interface':
iface = semantic_capability_interface_from_string(raw, path)
if spec_type in ['capability_interface', 'semantic_capability_interface']:
iface.name = '{package}/{name}'.format(package=package_name, name=iface.name)
if iface.name not in self.__default_providers:
default_provider = ''
else:
default_provider = self.__default_providers[iface.name]
if iface and iface.name == req.capability_spec:
response.capability_spec = CapabilitySpec(package_name, spec_type, raw, default_provider)
return response
raise RuntimeError("Could not find requested spec '{0}'".format(req.capability_spec))
def handle_start_capability(self, req):
return self.__catch_and_log(self._handle_start_capability, req)
def _handle_start_capability(self, req):
msg = "Request to start capability '{0}'".format(req.capability)
if req.preferred_provider:
msg += " with provider '{0}'".format(req.preferred_provider)
rospy.loginfo(msg)
ret = self.__start_capability(req.capability, req.preferred_provider)
return StartCapabilityResponse(ret)
def handle_stop_capability(self, req):
return self.__catch_and_log(self._handle_stop_capability, req)
def _handle_stop_capability(self, req):
rospy.loginfo("Request to stop capability '{0}'".format(req.capability))
capability = req.capability
if capability not in self.__capability_instances:
raise RuntimeError("No Capability '{0}' running".format(capability))
self.__stop_capability(capability)
return StopCapabilityResponse(True)
def handle_establish_bond(self, req):
return self.__catch_and_log(self._handle_establish_bond, req)
def _handle_establish_bond(self, req):
rospy.loginfo("Request to establish a bond")
bond_id = str(uuid.uuid1())
def on_formed():
rospy.loginfo("Bond formed with bond_id of '{0}'"
.format(bond_id))
def on_broken():
# if bond_id in self.__bonds:
rospy.loginfo("Bond with bond id '{0}' was broken, freeing associated capabilities"
.format(bond_id))
self.__free_capabilities_by_bond_id(bond_id)
del self.__bonds[bond_id]
self.__bonds[bond_id] = Bond(self.__bond_topic, bond_id, on_broken=on_broken, on_formed=on_formed)
self.__bonds[bond_id].start()
return EstablishBondResponse(bond_id)
def __free_capabilities_by_bond_id(self, bond_id):
if bond_id in self.__bonds:
for capability in self.__capability_instances.values():
if bond_id in capability.bonds:
del capability.bonds[bond_id]
if capability.reference_count == 0:
rospy.loginfo("Capability '{0}' being stopped because it has zero references"
.format(capability.interface))
self.__free_capability_instance(capability)
def __free_capability(self, capability_name, bond_id):
if capability_name not in self.__capability_instances:
# If you update this exception's message, then update the corresponding code
# in capabilities.client.CapabilitiesClient.free_capability()
raise RuntimeError("Cannot free Capability '{0}', because it is not running".format(capability_name))
capability = self.__capability_instances[capability_name]
if bond_id not in capability.bonds:
raise RuntimeError("Given bond_id '{0}' not associated with given capability '{1}'"
.format(bond_id, capability_name))
if capability.bonds[bond_id] == 0: # pragma: no cover
# this is defensive, it should never happen
raise RuntimeError("Cannot free capability '{0}' for bond_id '{1}', it already has a reference count of 0"
.format(capability_name, bond_id))
capability.bonds[bond_id] -= 1
if capability.reference_count == 0:
rospy.loginfo("Capability '{0}' being stopped because it has zero references"
.format(capability.interface))
self.__free_capability_instance(capability)
def __free_capability_instance(self, capability_instance):
capability_instance.started_by = 'unknown'
self.__cleanup_graph()
def handle_free_capability(self, req):
return self.__catch_and_log(self._handle_free_capability, req)
def _handle_free_capability(self, req):
rospy.loginfo("Request to free usage of capability '{0}' (bond id '{1}')"
.format(req.capability, req.bond_id))
self.__free_capability(req.capability, req.bond_id)
return FreeCapabilityResponse()
def handle_use_capability(self, req):
return self.__catch_and_log(self._handle_use_capability, req)
def _handle_use_capability(self, req):
msg = "Request to use capability '{0}'".format(req.capability)
if req.preferred_provider:
msg += " with provider '{0}'".format(req.preferred_provider)
rospy.loginfo(msg)
# Make sure the bond_id is valid
if req.bond_id not in self.__bonds:
raise RuntimeError("Invalid bond_id given to ~use_capability: '{0}'".format(req.bond_id))
# Start the capability if it is not already running
if req.capability not in self.__capability_instances:
# This will raise if it failes to start the capability
self.__start_capability(req.capability, req.preferred_provider)
assert req.capability in self.__capability_instances # Should be true
# Get a handle ont he capability
capability = self.__capability_instances[req.capability]
if req.preferred_provider and capability.name != req.preferred_provider:
raise RuntimeError("Requested to use capability '{0}' with preferred provider '{1}', "
.format(capability.interface, req.preferred_provider) +
"but the capability is already running with provider '{0}'"
.format(capability.name))
if req.bond_id not in capability.bonds:
capability.bonds[req.bond_id] = 0
capability.bonds[req.bond_id] += 1
return UseCapabilityResponse()
def handle_reload_request(self, req):
return self.__catch_and_log(self._handle_reload_request, req)
def _handle_reload_request(self, req):
rospy.loginfo("Reloading capabilities...")
self.__load_capabilities()
return EmptyResponse()
def handle_get_interfaces(self, req):
return self.__catch_and_log(self._handle_get_interfaces, req)
def _handle_get_interfaces(self, req):
return GetInterfacesResponse(self.__spec_index.interface_names)
def handle_get_providers(self, req):
return self.__catch_and_log(self._handle_get_providers, req)
def _handle_get_providers(self, req):
if req.interface:
if req.interface not in self.__spec_index.interfaces.keys() + self.__spec_index.semantic_interfaces.keys():
raise RuntimeError("Capability Interface '{0}' not found.".format(req.interface))
providers = self.__get_providers_for_interface(req.interface, allow_semantic=req.include_semantic).keys()
default_provider = self.__default_providers.get(req.interface, '')
else:
providers = self.__spec_index.provider_names
default_provider = ''
return GetProvidersResponse(providers, default_provider)
def handle_get_semantic_interfaces(self, req):
return self.__catch_and_log(self._handle_get_semantic_interfaces, req)
def _handle_get_semantic_interfaces(self, req):
if req.interface:
sifaces = [si.name
for si in self.__spec_index.semantic_interfaces.values()
if si.redefines == req.interface]
else:
sifaces = self.__spec_index.semantic_interface_names
return GetSemanticInterfacesResponse(sifaces)
def handle_get_running_capabilities(self, req):
return self.__catch_and_log(self._handle_get_running_capabilities, req)
def _handle_get_running_capabilities(self, req):
resp = GetRunningCapabilitiesResponse()
for instance in self.__capability_instances.values():
if instance.state not in ['running']: # pragma: no cover
continue
running_capability = RunningCapability()
running_capability.capability = Capability(instance.interface, instance.name)
running_capability.started_by = instance.started_by
running_capability.pid = instance.pid
rdepends = get_reverse_depends(instance.interface, self.__capability_instances.values())
for dep in rdepends:
running_capability.dependent_capabilities.append(Capability(dep.interface, dep.name))
resp.running_capabilities.append(running_capability)
return resp
def handle_get_nodelet_manager_name(self, req):
return self.__catch_and_log(self._handle_get_nodelet_manager_name, req)
def _handle_get_nodelet_manager_name(self, req):
resp = GetNodeletManagerNameResponse()
resp.nodelet_manager_name = self.__launch_manager.nodelet_manager_name
return resp
def handle_get_remappings(self, req):
return self.__catch_and_log(self._handle_get_remappings, req)
def _handle_get_remappings(self, req):
interface = None
if req.spec in self.__capability_instances.keys():
interface = self.__capability_instances[req.spec]
else:
providers = dict([(i.provider.name, i) for i in self.__capability_instances.values()])
if req.spec not in providers:
raise RuntimeError("Spec '{0}' is neither a running Interface nor a running Provider."
.format(req.spec))
interface = providers[req.spec]
resp = GetRemappingsResponse()
remappings = {
'topics': {},
'services': {},
'actions': {},
'parameters': {}
}
# Iterate this instance and its recursive dependencies
for iface in reversed(self.__get_capability_instances_from_provider(interface.provider)):
# For each iterate over their remappings and add them to the combined remappings,
# flattening the remappings as you go
for map_type, mapping in iface.provider.remappings_by_type.items():
assert map_type in remappings
remappings[map_type].update(mapping)
# Collapse remapping chains
for mapping in remappings.values():
for key, value in mapping.items():
if value in mapping:
mapping[key] = mapping[value]
del mapping[value]
for map_type, mapping in remappings.items():
resp_mapping = getattr(resp, map_type)
for key, value in mapping.items():
remapping = Remapping()
remapping.key = key
remapping.value = value
resp_mapping.append(remapping)
return resp
def create_parser():
parser = argparse.ArgumentParser(description="Runs the capability server")
add = parser.add_argument
add('package_path', nargs='?',
help="Overrides ROS_PACKAGE_PATH when discovering capabilities")
add('--screen', '-s', action='store_true', default=False,
help="Passes `--screen` down to roslaunch, `use_screen` rosparam takes priority.")
return parser
def main(sysargv=None):
sys.argv = rospy.myargv(argv=sys.argv)
parser = create_parser()
args = parser.parse_args(sysargv)
ros_package_path = args.package_path or os.getenv('ROS_PACKAGE_PATH', '')
ros_package_path = [x for x in ros_package_path.split(':') if x]
if not ros_package_path:
sys.exit('No package paths specified, set ROS_PACKAGE_PATH or '
'pass them as an argument')
# Extend the ROS_PACKAGE_PATH
os.environ['ROS_PACKAGE_PATH'] = ':'.join(
os.getenv('ROS_PACKAGE_PATH', '').split(':') + ros_package_path)
rospy.init_node('capability_server')
capability_server = CapabilityServer(ros_package_path, screen=args.screen)
capability_server.spin()
capability_server.shutdown()
| 47.615091
| 119
| 0.641406
|
8828e21551a7961c23b57e220e159956415aa031
| 1,001
|
py
|
Python
|
setup.py
|
caiyunapp/rb3
|
bcf6b5c7a1f1ea73a284feedddd33c18d4ced3f6
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
caiyunapp/rb3
|
bcf6b5c7a1f1ea73a284feedddd33c18d4ced3f6
|
[
"Apache-2.0"
] | 1
|
2019-08-13T08:30:25.000Z
|
2019-08-13T08:30:25.000Z
|
setup.py
|
caiyunapp/rb3
|
bcf6b5c7a1f1ea73a284feedddd33c18d4ced3f6
|
[
"Apache-2.0"
] | null | null | null |
import re
import ast
from setuptools import setup
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('rb/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
f = open('README.md')
try:
README = f.read()
finally:
f.close()
setup(
name='rb3',
author='Beijing ColorfulClouds Technology Co.,Ltd.',
author_email='admin@caiyunapp.com',
# original author: Functional Software Inc.
# original email: hello@getsentry.com
version=version,
url='https://github.com/caiyunapp/rb3',
packages=['rb'],
description='rb3, the redis blaster which support Python 3.7',
long_description=README,
long_description_content_type="text/markdown",
keywords='Redis rb python3',
install_requires=[
'redis>=2.6',
'six>=1.12.0'
],
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
],
)
| 24.414634
| 66
| 0.643357
|
412c555eb6a2a42960a9d4f97958671d1f6bd8e5
| 761
|
py
|
Python
|
create_nn_model.py
|
shehand/FlowAPI
|
bc8f519c92163e08ac80dce791365b13b369f1fa
|
[
"Apache-2.0"
] | 1
|
2019-04-16T03:27:32.000Z
|
2019-04-16T03:27:32.000Z
|
create_nn_model.py
|
shehand/FlowAPI
|
bc8f519c92163e08ac80dce791365b13b369f1fa
|
[
"Apache-2.0"
] | null | null | null |
create_nn_model.py
|
shehand/FlowAPI
|
bc8f519c92163e08ac80dce791365b13b369f1fa
|
[
"Apache-2.0"
] | 1
|
2019-04-11T07:21:20.000Z
|
2019-04-11T07:21:20.000Z
|
from keras.layers import Conv2D, MaxPooling2D
import tensorflow as tf
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.models import Sequential
def create_nn_model(input_shape, img_width, img_height):
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
| 26.241379
| 60
| 0.731932
|
42955bd3ed45737f17b0ef5973651914a8b25a3c
| 621
|
py
|
Python
|
test_generators/ssz_generic/ssz_test_case.py
|
osirusgroup/eth2.0-specs
|
88e954a9c73dff63436350b9cd530c8c9f44a94b
|
[
"CC0-1.0"
] | 1
|
2021-04-06T23:29:39.000Z
|
2021-04-06T23:29:39.000Z
|
test_generators/ssz_generic/ssz_test_case.py
|
osirusgroup/eth2.0-specs
|
88e954a9c73dff63436350b9cd530c8c9f44a94b
|
[
"CC0-1.0"
] | null | null | null |
test_generators/ssz_generic/ssz_test_case.py
|
osirusgroup/eth2.0-specs
|
88e954a9c73dff63436350b9cd530c8c9f44a94b
|
[
"CC0-1.0"
] | 1
|
2021-12-25T16:41:24.000Z
|
2021-12-25T16:41:24.000Z
|
from eth2spec.utils.ssz.ssz_impl import serialize, hash_tree_root
from eth2spec.debug.encode import encode
from eth2spec.utils.ssz.ssz_typing import SSZValue, Container
from typing import Callable
def valid_test_case(value_fn: Callable[[], SSZValue]):
def case_fn():
value = value_fn()
yield "value", "data", encode(value)
yield "serialized", "ssz", serialize(value)
yield "root", "meta", '0x' + hash_tree_root(value).hex()
return case_fn
def invalid_test_case(bytez_fn: Callable[[], bytes]):
def case_fn():
yield "serialized", "ssz", bytez_fn()
return case_fn
| 31.05
| 65
| 0.694042
|
b9a95587ac5cc42fd9cdf6d8d80a0369e8bdd10a
| 12,479
|
py
|
Python
|
tests/st/model_zoo_tests/DeepFM/src/dataset.py
|
HappyKL/mindspore
|
479cb89e8b5c9d859130891567038bb849a30bce
|
[
"Apache-2.0"
] | 1
|
2020-10-18T12:27:45.000Z
|
2020-10-18T12:27:45.000Z
|
tests/st/model_zoo_tests/DeepFM/src/dataset.py
|
ReIadnSan/mindspore
|
c3d1f54c7f6d6f514e5748430d24b16a4f9ee9e5
|
[
"Apache-2.0"
] | null | null | null |
tests/st/model_zoo_tests/DeepFM/src/dataset.py
|
ReIadnSan/mindspore
|
c3d1f54c7f6d6f514e5748430d24b16a4f9ee9e5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Create train or eval dataset.
"""
import os
import math
from enum import Enum
import pandas as pd
import numpy as np
import mindspore.dataset.engine as de
import mindspore.common.dtype as mstype
from .config import DataConfig
class DataType(Enum):
"""
Enumerate supported dataset format.
"""
MINDRECORD = 1
TFRECORD = 2
H5 = 3
class H5Dataset():
"""
Create dataset with H5 format.
Args:
data_path (str): Dataset directory.
train_mode (bool): Whether dataset is used for train or eval (default=True).
train_num_of_parts (int): The number of train data file (default=21).
test_num_of_parts (int): The number of test data file (default=3).
"""
max_length = 39
def __init__(self, data_path, train_mode=True,
train_num_of_parts=DataConfig.train_num_of_parts,
test_num_of_parts=DataConfig.test_num_of_parts):
self._hdf_data_dir = data_path
self._is_training = train_mode
if self._is_training:
self._file_prefix = 'train'
self._num_of_parts = train_num_of_parts
else:
self._file_prefix = 'test'
self._num_of_parts = test_num_of_parts
self.data_size = self._bin_count(self._hdf_data_dir, self._file_prefix, self._num_of_parts)
print("data_size: {}".format(self.data_size))
def _bin_count(self, hdf_data_dir, file_prefix, num_of_parts):
size = 0
for part in range(num_of_parts):
_y = pd.read_hdf(os.path.join(hdf_data_dir, f'{file_prefix}_output_part_{str(part)}.h5'))
size += _y.shape[0]
return size
def _iterate_hdf_files_(self, num_of_parts=None,
shuffle_block=False):
"""
iterate among hdf files(blocks). when the whole data set is finished, the iterator restarts
from the beginning, thus the data stream will never stop
:param train_mode: True or false,false is eval_mode,
this file iterator will go through the train set
:param num_of_parts: number of files
:param shuffle_block: shuffle block files at every round
:return: input_hdf_file_name, output_hdf_file_name, finish_flag
"""
parts = np.arange(num_of_parts)
while True:
if shuffle_block:
for _ in range(int(shuffle_block)):
np.random.shuffle(parts)
for i, p in enumerate(parts):
yield os.path.join(self._hdf_data_dir, f'{self._file_prefix}_input_part_{str(p)}.h5'), \
os.path.join(self._hdf_data_dir, f'{self._file_prefix}_output_part_{str(p)}.h5'), \
i + 1 == len(parts)
def _generator(self, X, y, batch_size, shuffle=True):
"""
should be accessed only in private
:param X:
:param y:
:param batch_size:
:param shuffle:
:return:
"""
number_of_batches = np.ceil(1. * X.shape[0] / batch_size)
counter = 0
finished = False
sample_index = np.arange(X.shape[0])
if shuffle:
for _ in range(int(shuffle)):
np.random.shuffle(sample_index)
assert X.shape[0] > 0
while True:
batch_index = sample_index[batch_size * counter: batch_size * (counter + 1)]
X_batch = X[batch_index]
y_batch = y[batch_index]
counter += 1
yield X_batch, y_batch, finished
if counter == number_of_batches:
counter = 0
finished = True
def batch_generator(self, batch_size=1000,
random_sample=False, shuffle_block=False):
"""
:param train_mode: True or false,false is eval_mode,
:param batch_size
:param num_of_parts: number of files
:param random_sample: if True, will shuffle
:param shuffle_block: shuffle file blocks at every round
:return:
"""
for hdf_in, hdf_out, _ in self._iterate_hdf_files_(self._num_of_parts,
shuffle_block):
start = stop = None
X_all = pd.read_hdf(hdf_in, start=start, stop=stop).values
y_all = pd.read_hdf(hdf_out, start=start, stop=stop).values
data_gen = self._generator(X_all, y_all, batch_size,
shuffle=random_sample)
finished = False
while not finished:
X, y, finished = data_gen.__next__()
X_id = X[:, 0:self.max_length]
X_va = X[:, self.max_length:]
yield np.array(X_id.astype(dtype=np.int32)), \
np.array(X_va.astype(dtype=np.float32)), \
np.array(y.astype(dtype=np.float32))
def _get_h5_dataset(directory, train_mode=True, epochs=1, batch_size=1000):
"""
Get dataset with h5 format.
Args:
directory (str): Dataset directory.
train_mode (bool): Whether dataset is use for train or eval (default=True).
epochs (int): Dataset epoch size (default=1).
batch_size (int): Dataset batch size (default=1000)
Returns:
Dataset.
"""
data_para = {'batch_size': batch_size}
if train_mode:
data_para['random_sample'] = True
data_para['shuffle_block'] = True
h5_dataset = H5Dataset(data_path=directory, train_mode=train_mode)
numbers_of_batch = math.ceil(h5_dataset.data_size / batch_size)
def _iter_h5_data():
train_eval_gen = h5_dataset.batch_generator(**data_para)
for _ in range(0, numbers_of_batch, 1):
yield train_eval_gen.__next__()
ds = de.GeneratorDataset(_iter_h5_data, ["ids", "weights", "labels"], num_samples=3000)
ds = ds.repeat(epochs)
return ds
def _get_mindrecord_dataset(directory, train_mode=True, epochs=1, batch_size=1000,
line_per_sample=1000, rank_size=None, rank_id=None):
"""
Get dataset with mindrecord format.
Args:
directory (str): Dataset directory.
train_mode (bool): Whether dataset is use for train or eval (default=True).
epochs (int): Dataset epoch size (default=1).
batch_size (int): Dataset batch size (default=1000).
line_per_sample (int): The number of sample per line (default=1000).
rank_size (int): The number of device, not necessary for single device (default=None).
rank_id (int): Id of device, not necessary for single device (default=None).
Returns:
Dataset.
"""
file_prefix_name = 'train_input_part.mindrecord' if train_mode else 'test_input_part.mindrecord'
file_suffix_name = '00' if train_mode else '0'
shuffle = train_mode
if rank_size is not None and rank_id is not None:
ds = de.MindDataset(os.path.join(directory, file_prefix_name + file_suffix_name),
columns_list=['feat_ids', 'feat_vals', 'label'],
num_shards=rank_size, shard_id=rank_id, shuffle=shuffle,
num_parallel_workers=8)
else:
ds = de.MindDataset(os.path.join(directory, file_prefix_name + file_suffix_name),
columns_list=['feat_ids', 'feat_vals', 'label'],
shuffle=shuffle, num_parallel_workers=8)
ds = ds.batch(int(batch_size / line_per_sample), drop_remainder=True)
ds = ds.map(operations=(lambda x, y, z: (np.array(x).flatten().reshape(batch_size, 39),
np.array(y).flatten().reshape(batch_size, 39),
np.array(z).flatten().reshape(batch_size, 1))),
input_columns=['feat_ids', 'feat_vals', 'label'],
columns_order=['feat_ids', 'feat_vals', 'label'],
num_parallel_workers=8)
ds = ds.repeat(epochs)
return ds
def _get_tf_dataset(directory, train_mode=True, epochs=1, batch_size=1000,
line_per_sample=1000, rank_size=None, rank_id=None):
"""
Get dataset with tfrecord format.
Args:
directory (str): Dataset directory.
train_mode (bool): Whether dataset is use for train or eval (default=True).
epochs (int): Dataset epoch size (default=1).
batch_size (int): Dataset batch size (default=1000).
line_per_sample (int): The number of sample per line (default=1000).
rank_size (int): The number of device, not necessary for single device (default=None).
rank_id (int): Id of device, not necessary for single device (default=None).
Returns:
Dataset.
"""
dataset_files = []
file_prefixt_name = 'train' if train_mode else 'test'
shuffle = train_mode
for (dir_path, _, filenames) in os.walk(directory):
for filename in filenames:
if file_prefixt_name in filename and 'tfrecord' in filename:
dataset_files.append(os.path.join(dir_path, filename))
schema = de.Schema()
schema.add_column('feat_ids', de_type=mstype.int32)
schema.add_column('feat_vals', de_type=mstype.float32)
schema.add_column('label', de_type=mstype.float32)
if rank_size is not None and rank_id is not None:
ds = de.TFRecordDataset(dataset_files=dataset_files, shuffle=shuffle,
schema=schema, num_parallel_workers=8,
num_shards=rank_size, shard_id=rank_id,
shard_equal_rows=True, num_samples=3000)
else:
ds = de.TFRecordDataset(dataset_files=dataset_files, shuffle=shuffle,
schema=schema, num_parallel_workers=8, num_samples=3000)
ds = ds.batch(int(batch_size / line_per_sample), drop_remainder=True)
ds = ds.map(operations=(lambda x, y, z: (
np.array(x).flatten().reshape(batch_size, 39),
np.array(y).flatten().reshape(batch_size, 39),
np.array(z).flatten().reshape(batch_size, 1))),
input_columns=['feat_ids', 'feat_vals', 'label'],
column_order=['feat_ids', 'feat_vals', 'label'],
num_parallel_workers=8)
ds = ds.repeat(epochs)
return ds
def create_dataset(directory, train_mode=True, epochs=1, batch_size=1000,
data_type=DataType.TFRECORD, line_per_sample=1000,
rank_size=None, rank_id=None):
"""
Get dataset.
Args:
directory (str): Dataset directory.
train_mode (bool): Whether dataset is use for train or eval (default=True).
epochs (int): Dataset epoch size (default=1).
batch_size (int): Dataset batch size (default=1000).
data_type (DataType): The type of dataset which is one of H5, TFRECORE, MINDRECORD (default=TFRECORD).
line_per_sample (int): The number of sample per line (default=1000).
rank_size (int): The number of device, not necessary for single device (default=None).
rank_id (int): Id of device, not necessary for single device (default=None).
Returns:
Dataset.
"""
if data_type == DataType.MINDRECORD:
return _get_mindrecord_dataset(directory, train_mode, epochs,
batch_size, line_per_sample,
rank_size, rank_id)
if data_type == DataType.TFRECORD:
return _get_tf_dataset(directory, train_mode, epochs, batch_size,
line_per_sample, rank_size=rank_size, rank_id=rank_id)
if rank_size is not None and rank_size > 1:
raise ValueError('Please use mindrecord dataset.')
return _get_h5_dataset(directory, train_mode, epochs, batch_size)
| 41.735786
| 110
| 0.617277
|
b22dd4e29d4f61296191ad20fe4bfae5bbc9c793
| 370
|
py
|
Python
|
output/models/nist_data/atomic/any_uri/schema_instance/nistschema_sv_iv_atomic_any_uri_enumeration_4_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/atomic/any_uri/schema_instance/nistschema_sv_iv_atomic_any_uri_enumeration_4_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/atomic/any_uri/schema_instance/nistschema_sv_iv_atomic_any_uri_enumeration_4_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.nist_data.atomic.any_uri.schema_instance.nistschema_sv_iv_atomic_any_uri_enumeration_4_xsd.nistschema_sv_iv_atomic_any_uri_enumeration_4 import (
NistschemaSvIvAtomicAnyUriEnumeration4,
NistschemaSvIvAtomicAnyUriEnumeration4Type,
)
__all__ = [
"NistschemaSvIvAtomicAnyUriEnumeration4",
"NistschemaSvIvAtomicAnyUriEnumeration4Type",
]
| 37
| 164
| 0.864865
|
fe4a5d48f6a15deb87188013f0a7718f973f4b26
| 6,792
|
py
|
Python
|
setup/ingest_data.py
|
j12y/predix-volcano-app
|
69b3fd038ba3fc752b62c4546586e4826f861b16
|
[
"BSD-3-Clause"
] | 1
|
2019-04-25T18:50:20.000Z
|
2019-04-25T18:50:20.000Z
|
setup/ingest_data.py
|
jpruiz114/predix-volcano-app
|
69b3fd038ba3fc752b62c4546586e4826f861b16
|
[
"BSD-3-Clause"
] | null | null | null |
setup/ingest_data.py
|
jpruiz114/predix-volcano-app
|
69b3fd038ba3fc752b62c4546586e4826f861b16
|
[
"BSD-3-Clause"
] | 2
|
2019-11-24T00:38:36.000Z
|
2021-08-06T17:47:21.000Z
|
#!/usr/bin/env python
import csv
import time
import logging
import predix.app
import assetmodel
manifest_path = '../manifest.yml'
def read_csv(csvfile):
"""
Read CSV file and index by the unique id.
"""
index = {}
with open(csvfile, 'r') as data:
for row in csv.DictReader(data):
index[row['id']] = row
return index
def index_datasets():
"""
Read CSV and return an index of the datasets where
relationships can be referenced by id.
"""
datasets = [
'volcanos', #id,description,location,name,status
'nodes', # id,description,location,name,status,volcano_id
'sensors', # id,data_frequency,data_type_id,description,node_id,status
'datatypes', # id,si_unit,type,type_id
'datapoints', # id,sensor_id,timestamp,value
]
index = {}
for dataset in datasets:
index[dataset] = read_csv("./data/%s.csv" % (dataset))
return index
def utc_to_epoch(utc):
"""
Take UTC formatted date and return it in
millisecond accurate epoch time.
"""
timeformat = '%Y-%m-%d %H:%M:%S+00'
return int(time.mktime(time.strptime(utc, timeformat)) * 1000)
def batch(asset, mock=False):
"""
To load a large dataset we want to send the data up in
batches. We've batched based on common attributes so
that we can send a bunch of datapoints together.
"""
batches = {}
asset_catalog = {}
index = index_datasets()
for point in index['datapoints'].values():
# Get attributes
sensor = index['sensors'][point['sensor_id']]
node = index['nodes'][sensor['node_id']]
datatype = index['datatypes'][sensor['data_type_id']]
volcano = index['volcanos'][node['volcano_id']]
# We need to chunk by volcano/node/sensor for batch
# ingestion since attributes are per set
volcano_id = volcano['id']
node_id = node['id']
sensor_id = sensor['id']
if volcano_id not in batches:
batches[volcano_id] = {}
name = index['volcanos'][volcano_id]['name']
description = index['volcanos'][volcano_id]['description']
status = index['volcanos'][volcano_id]['status']
location = index['volcanos'][volcano_id]['location']
volcano = assetmodel.Volcano(name, description, location, status, guid=volcano_id)
if not mock:
asset.save(volcano)
asset_catalog[volcano_id] = volcano
if node_id not in batches[volcano_id]:
batches[volcano_id][node_id] = {}
name = index['nodes'][node_id]['name']
description = index['nodes'][node_id]['description']
location = index['nodes'][node_id]['location']
status = index['nodes'][node_id]['status']
volcano_uri = asset_catalog[volcano_id].uri
node = assetmodel.Node(name, description, location, status,
volcano_uri, guid=node_id)
if not mock:
asset.save(node)
asset_catalog[node_id] = node
if sensor_id not in batches[volcano_id][node_id]:
batches[volcano_id][node_id][sensor_id] = []
description = index['sensors'][sensor_id]['description']
status = index['sensors'][sensor_id]['status']
data_frequency = index['sensors'][sensor_id]['data_frequency']
node_uri = asset_catalog[node_id].uri
data_type_id = index['sensors'][sensor_id]['data_type_id']
if data_type_id not in asset_catalog:
data_type = index['datatypes'][data_type_id]['type']
unit = index['datatypes'][data_type_id]['si_unit']
tag = index['datatypes'][data_type_id]['type_id']
dt = assetmodel.DataType(data_type, unit, tag,
guid=data_type_id)
if not mock:
asset.save(dt)
asset_catalog[data_type_id] = dt
data_type = asset_catalog[data_type_id].uri
sensor = assetmodel.Sensor(description, status, data_type,
data_frequency, node_uri, guid=sensor_id)
if not mock:
asset.save(sensor)
asset_catalog[sensor_id] = sensor
# Get Timestamp
stamp = utc_to_epoch(point['timestamp'])
# Get value / quality
value = point['value']
quality = predix.data.timeseries.TimeSeries.UNCERTAIN
if value == 'NaN':
quality = predix.data.timeseries.TimeSeries.BAD
value = 0
tag = datatype['type_id']
batches[volcano_id][node_id][sensor_id].append({
'tag': tag,
'attributes': {
'volcano': asset_catalog[volcano_id].uri,
'node': asset_catalog[node_id].uri,
'sensor': asset_catalog[sensor_id].uri
},
'timestamp': stamp,
'value': value,
'quality': quality
})
return batches
def main(mock=False):
# Load configuration from manifest
app = predix.app.Manifest(manifest_path)
timeseries = app.get_timeseries()
asset = app.get_asset()
total = 0
# Iterate over the hierarchy in batches of datapoints
# based on volcano > node > sensor relationship.
batches = batch(asset, mock=mock)
for volcano in batches.keys():
logging.info("Processing volcano " + volcano)
for node in batches[volcano].keys():
logging.info("Processing node " + node)
for sensor in batches[volcano][node].keys():
logging.info("Processing sensor " + sensor)
count = 0
for item in batches[volcano][node][sensor]:
if mock:
logging.info(item)
continue
logging.debug(str(item))
timeseries.queue(item['tag'], item['value'],
timestamp=item['timestamp'],
quality=item['quality'],
attributes=item['attributes'])
count += 1
if count == 100:
total += count
logging.info("Sent %s total datapoints" % (total))
if not mock:
timeseries.send()
count = 0
if not mock:
timeseries.send()
if __name__ == '__main__':
debug = False
if debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
main(mock=debug)
| 32.189573
| 94
| 0.557862
|
da0162b59b519a86b8a60614c122df6b16e8cf95
| 2,218
|
py
|
Python
|
Software/Python/grove_80cm_infrared_proximity_sensor.py
|
benmcclelland/GrovePi
|
1e48137b6007ffd4ce430e821d2aa744349e362a
|
[
"MIT"
] | 482
|
2015-01-09T03:06:14.000Z
|
2022-03-24T10:05:07.000Z
|
Software/Python/grove_80cm_infrared_proximity_sensor.py
|
benmcclelland/GrovePi
|
1e48137b6007ffd4ce430e821d2aa744349e362a
|
[
"MIT"
] | 257
|
2015-01-13T14:08:17.000Z
|
2022-01-20T08:43:50.000Z
|
Software/Python/grove_80cm_infrared_proximity_sensor.py
|
benmcclelland/GrovePi
|
1e48137b6007ffd4ce430e821d2aa744349e362a
|
[
"MIT"
] | 510
|
2015-01-27T17:15:44.000Z
|
2022-03-29T01:27:13.000Z
|
#!/usr/bin/env python
#
# GrovePi Example for using the Grove 80cm Infrared Proximity Sensor(http://www.seeedstudio.com/wiki/Grove_-_80cm_Infrared_Proximity_Sensor)
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://forum.dexterindustries.com/c/grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2017 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import grovepi
# Connect the Grove 80cm Infrared Proximity Sensor to analog port A0
# SIG,NC,VCC,GND
sensor = 0
grovepi.pinMode(sensor,"INPUT")
time.sleep(1)
# Reference voltage of ADC is 5v
adc_ref = 5
# Vcc of the grove interface is normally 5v
grove_vcc = 5
while True:
try:
# Read sensor value
sensor_value = grovepi.analogRead(sensor)
# Calculate voltage
voltage = round((float)(sensor_value) * adc_ref / 1024, 2)
print("sensor_value =", sensor_value, " voltage =", voltage)
except IOError:
print ("Error")
| 34.123077
| 140
| 0.760144
|
df4e5fba69a605d01cd4b6bc3eb7799fa06daf63
| 6,892
|
py
|
Python
|
flask_admin/form/fields.py
|
shoeffner/flask-admin
|
51f8fc9e456f04e093e32230393e629be882f083
|
[
"BSD-3-Clause"
] | null | null | null |
flask_admin/form/fields.py
|
shoeffner/flask-admin
|
51f8fc9e456f04e093e32230393e629be882f083
|
[
"BSD-3-Clause"
] | null | null | null |
flask_admin/form/fields.py
|
shoeffner/flask-admin
|
51f8fc9e456f04e093e32230393e629be882f083
|
[
"BSD-3-Clause"
] | null | null | null |
import time
import datetime
import json
from wtforms import fields
from flask_admin.babel import gettext
from flask_admin._compat import text_type, as_unicode
from . import widgets as admin_widgets
"""
An understanding of WTForms's Custom Widgets is helpful for understanding this code:
http://wtforms.simplecodes.com/docs/0.6.2/widgets.html#custom-widgets
"""
__all__ = ['DateTimeField', 'TimeField', 'Select2Field', 'Select2TagsField',
'JSONField']
class DateTimeField(fields.DateTimeField):
"""
Allows modifying the datetime format of a DateTimeField using form_args.
"""
widget = admin_widgets.DateTimePickerWidget()
def __init__(self, label=None, validators=None, format=None, **kwargs):
"""
Constructor
:param label:
Label
:param validators:
Field validators
:param format:
Format for text to date conversion. Defaults to '%Y-%m-%d %H:%M:%S'
:param kwargs:
Any additional parameters
"""
format = format or '%Y-%m-%d %H:%M:%S'
super(DateTimeField, self).__init__(label, validators, format, **kwargs)
class TimeField(fields.Field):
"""
A text field which stores a `datetime.time` object.
Accepts time string in multiple formats: 20:10, 20:10:00, 10:00 am, 9:30pm, etc.
"""
widget = admin_widgets.TimePickerWidget()
def __init__(self, label=None, validators=None, formats=None,
default_format=None, widget_format=None, **kwargs):
"""
Constructor
:param label:
Label
:param validators:
Field validators
:param formats:
Supported time formats, as a enumerable.
:param default_format:
Default time format. Defaults to '%H:%M:%S'
:param kwargs:
Any additional parameters
"""
super(TimeField, self).__init__(label, validators, **kwargs)
self.formats = formats or ('%H:%M:%S', '%H:%M',
'%I:%M:%S%p', '%I:%M%p',
'%I:%M:%S %p', '%I:%M %p')
self.default_format = default_format or '%H:%M:%S'
def _value(self):
if self.raw_data:
return u' '.join(self.raw_data)
elif self.data is not None:
return self.data.strftime(self.default_format)
else:
return u''
def process_formdata(self, valuelist):
if valuelist:
date_str = u' '.join(valuelist)
if date_str.strip():
for format in self.formats:
try:
timetuple = time.strptime(date_str, format)
self.data = datetime.time(timetuple.tm_hour,
timetuple.tm_min,
timetuple.tm_sec)
return
except ValueError:
pass
raise ValueError(gettext('Invalid time format'))
else:
self.data = None
class Select2Field(fields.SelectField):
"""
`Select2 <https://github.com/ivaynberg/select2>`_ styled select widget.
You must include select2.js, form-x.x.x.js and select2 stylesheet for it to
work.
"""
widget = admin_widgets.Select2Widget()
def __init__(self, label=None, validators=None, coerce=text_type,
choices=None, allow_blank=False, blank_text=None, **kwargs):
super(Select2Field, self).__init__(
label, validators, coerce, choices, **kwargs
)
self.allow_blank = allow_blank
self.blank_text = blank_text or ' '
def iter_choices(self):
if self.allow_blank:
yield (u'__None', self.blank_text, self.data is None)
for choice in self.choices:
if isinstance(choice, tuple):
yield (choice[0], choice[1], self.coerce(choice[0]) == self.data)
else:
yield (choice.value, choice.name, self.coerce(choice.value) == self.data)
def process_data(self, value):
if value is None:
self.data = None
else:
try:
self.data = self.coerce(value)
except (ValueError, TypeError):
self.data = None
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
try:
self.data = self.coerce(valuelist[0])
except ValueError:
raise ValueError(self.gettext(u'Invalid Choice: could not coerce'))
def pre_validate(self, form):
if self.allow_blank and self.data is None:
return
super(Select2Field, self).pre_validate(form)
class Select2TagsField(fields.StringField):
"""`Select2 <http://ivaynberg.github.com/select2/#tags>`_ styled text field.
You must include select2.js, form-x.x.x.js and select2 stylesheet for it to work.
"""
widget = admin_widgets.Select2TagsWidget()
def __init__(self, label=None, validators=None, save_as_list=False, coerce=text_type, **kwargs):
"""Initialization
:param save_as_list:
If `True` then populate ``obj`` using list else string
"""
self.save_as_list = save_as_list
self.coerce = coerce
super(Select2TagsField, self).__init__(label, validators, **kwargs)
def process_formdata(self, valuelist):
if valuelist:
if self.save_as_list:
self.data = [self.coerce(v.strip()) for v in valuelist[0].split(',') if v.strip()]
else:
self.data = self.coerce(valuelist[0])
def _value(self):
if isinstance(self.data, (list, tuple)):
return u','.join(as_unicode(v) for v in self.data)
elif self.data:
return as_unicode(self.data)
else:
return u''
class JSONField(fields.TextAreaField):
def _value(self):
if self.raw_data:
return self.raw_data[0]
elif self.data:
# prevent utf8 characters from being converted to ascii
return as_unicode(json.dumps(self.data, ensure_ascii=False))
else:
return '{}'
def process_formdata(self, valuelist):
if valuelist:
value = valuelist[0]
# allow saving blank field as None
if not value:
self.data = None
return
try:
self.data = json.loads(valuelist[0])
except ValueError:
raise ValueError(self.gettext('Invalid JSON'))
| 32.509434
| 100
| 0.560215
|
004b061809b0801970e07ef4364ee398e008faed
| 6,951
|
py
|
Python
|
tests/unit/async_/io/test_direct.py
|
matilda-me/neo4j-python-driver
|
4fb25a266841bf2a861f00d5dcf257bd5ae5c686
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/async_/io/test_direct.py
|
matilda-me/neo4j-python-driver
|
4fb25a266841bf2a861f00d5dcf257bd5ae5c686
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/async_/io/test_direct.py
|
matilda-me/neo4j-python-driver
|
4fb25a266841bf2a861f00d5dcf257bd5ae5c686
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from neo4j import (
Config,
PoolConfig,
WorkspaceConfig,
)
from neo4j._async.io import AsyncBolt
from neo4j._async.io._pool import AsyncIOPool
from neo4j.exceptions import (
ClientError,
ServiceUnavailable,
)
from ...._async_compat import mark_async_test
class AsyncFakeSocket:
def __init__(self, address):
self.address = address
def getpeername(self):
return self.address
async def sendall(self, data):
return
def close(self):
return
class AsyncQuickConnection:
def __init__(self, socket):
self.socket = socket
self.address = socket.getpeername()
@property
def is_reset(self):
return True
def stale(self):
return False
async def reset(self):
pass
def close(self):
self.socket.close()
def closed(self):
return False
def defunct(self):
return False
def timedout(self):
return False
class AsyncFakeBoltPool(AsyncIOPool):
def __init__(self, address, *, auth=None, **config):
self.pool_config, self.workspace_config = Config.consume_chain(config, PoolConfig, WorkspaceConfig)
if config:
raise ValueError("Unexpected config keys: %s" % ", ".join(config.keys()))
async def opener(addr, timeout):
return AsyncQuickConnection(AsyncFakeSocket(addr))
super().__init__(opener, self.pool_config, self.workspace_config)
self.address = address
async def acquire(
self, access_mode=None, timeout=None, database=None, bookmarks=None,
lifeness_check_timeout=None
):
return await self._acquire(self.address, timeout,
lifeness_check_timeout)
@mark_async_test
async def test_bolt_connection_open():
with pytest.raises(ServiceUnavailable):
await AsyncBolt.open(("localhost", 9999), auth=("test", "test"))
@mark_async_test
async def test_bolt_connection_open_timeout():
with pytest.raises(ServiceUnavailable):
await AsyncBolt.open(("localhost", 9999), auth=("test", "test"),
timeout=1)
@mark_async_test
async def test_bolt_connection_ping():
protocol_version = await AsyncBolt.ping(("localhost", 9999))
assert protocol_version is None
@mark_async_test
async def test_bolt_connection_ping_timeout():
protocol_version = await AsyncBolt.ping(("localhost", 9999), timeout=1)
assert protocol_version is None
@pytest.fixture
async def pool():
async with AsyncFakeBoltPool(("127.0.0.1", 7687)) as pool:
yield pool
def assert_pool_size( address, expected_active, expected_inactive, pool):
try:
connections = pool.connections[address]
except KeyError:
assert 0 == expected_active
assert 0 == expected_inactive
else:
assert expected_active == len([cx for cx in connections if cx.in_use])
assert (expected_inactive
== len([cx for cx in connections if not cx.in_use]))
@mark_async_test
async def test_pool_can_acquire(pool):
address = ("127.0.0.1", 7687)
connection = await pool._acquire(address, 3, None)
assert connection.address == address
assert_pool_size(address, 1, 0, pool)
@mark_async_test
async def test_pool_can_acquire_twice(pool):
address = ("127.0.0.1", 7687)
connection_1 = await pool._acquire(address, 3, None)
connection_2 = await pool._acquire(address, 3, None)
assert connection_1.address == address
assert connection_2.address == address
assert connection_1 is not connection_2
assert_pool_size(address, 2, 0, pool)
@mark_async_test
async def test_pool_can_acquire_two_addresses(pool):
address_1 = ("127.0.0.1", 7687)
address_2 = ("127.0.0.1", 7474)
connection_1 = await pool._acquire(address_1, 3, None)
connection_2 = await pool._acquire(address_2, 3, None)
assert connection_1.address == address_1
assert connection_2.address == address_2
assert_pool_size(address_1, 1, 0, pool)
assert_pool_size(address_2, 1, 0, pool)
@mark_async_test
async def test_pool_can_acquire_and_release(pool):
address = ("127.0.0.1", 7687)
connection = await pool._acquire(address, 3, None)
assert_pool_size(address, 1, 0, pool)
await pool.release(connection)
assert_pool_size(address, 0, 1, pool)
@mark_async_test
async def test_pool_releasing_twice(pool):
address = ("127.0.0.1", 7687)
connection = await pool._acquire(address, 3, None)
await pool.release(connection)
assert_pool_size(address, 0, 1, pool)
await pool.release(connection)
assert_pool_size(address, 0, 1, pool)
@mark_async_test
async def test_pool_in_use_count(pool):
address = ("127.0.0.1", 7687)
assert pool.in_use_connection_count(address) == 0
connection = await pool._acquire(address, 3, None)
assert pool.in_use_connection_count(address) == 1
await pool.release(connection)
assert pool.in_use_connection_count(address) == 0
@mark_async_test
async def test_pool_max_conn_pool_size(pool):
async with AsyncFakeBoltPool((), max_connection_pool_size=1) as pool:
address = ("127.0.0.1", 7687)
await pool._acquire(address, 0, None)
assert pool.in_use_connection_count(address) == 1
with pytest.raises(ClientError):
await pool._acquire(address, 0, None)
assert pool.in_use_connection_count(address) == 1
@pytest.mark.parametrize("is_reset", (True, False))
@mark_async_test
async def test_pool_reset_when_released(is_reset, pool, mocker):
address = ("127.0.0.1", 7687)
quick_connection_name = AsyncQuickConnection.__name__
is_reset_mock = mocker.patch(
f"{__name__}.{quick_connection_name}.is_reset",
new_callable=mocker.PropertyMock
)
reset_mock = mocker.patch(
f"{__name__}.{quick_connection_name}.reset",
new_callable=mocker.AsyncMock
)
is_reset_mock.return_value = is_reset
connection = await pool._acquire(address, 3, None)
assert isinstance(connection, AsyncQuickConnection)
assert is_reset_mock.call_count == 0
assert reset_mock.call_count == 0
await pool.release(connection)
assert is_reset_mock.call_count == 1
assert reset_mock.call_count == int(not is_reset)
| 29.705128
| 107
| 0.697597
|
8fdaa27144aed1eb5a5ad0c73e30dad3191c812e
| 11,331
|
py
|
Python
|
pandas/tests/frame/methods/test_align.py
|
GabrielUlisses/pandas
|
6430d5324ae2b602b314a7851e9c1f4c5313cceb
|
[
"BSD-3-Clause"
] | 1
|
2020-10-29T17:32:26.000Z
|
2020-10-29T17:32:26.000Z
|
pandas/tests/frame/methods/test_align.py
|
GabrielUlisses/pandas
|
6430d5324ae2b602b314a7851e9c1f4c5313cceb
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/frame/methods/test_align.py
|
GabrielUlisses/pandas
|
6430d5324ae2b602b314a7851e9c1f4c5313cceb
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, Index, Series, date_range
import pandas._testing as tm
class TestDataFrameAlign:
def test_frame_align_aware(self):
idx1 = date_range("2001", periods=5, freq="H", tz="US/Eastern")
idx2 = date_range("2001", periods=5, freq="2H", tz="US/Eastern")
df1 = DataFrame(np.random.randn(len(idx1), 3), idx1)
df2 = DataFrame(np.random.randn(len(idx2), 3), idx2)
new1, new2 = df1.align(df2)
assert df1.index.tz == new1.index.tz
assert df2.index.tz == new2.index.tz
# different timezones convert to UTC
# frame with frame
df1_central = df1.tz_convert("US/Central")
new1, new2 = df1.align(df1_central)
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
# frame with Series
new1, new2 = df1.align(df1_central[0], axis=0)
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
df1[0].align(df1_central, axis=0)
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
def test_align_float(self, float_frame):
af, bf = float_frame.align(float_frame)
assert af._mgr is not float_frame._mgr
af, bf = float_frame.align(float_frame, copy=False)
assert af._mgr is float_frame._mgr
# axis = 0
other = float_frame.iloc[:-5, :3]
af, bf = float_frame.align(other, axis=0, fill_value=-1)
tm.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = float_frame.index.join(other.index)
diff_a = float_frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
assert (diff_a_vals == -1).all()
af, bf = float_frame.align(other, join="right", axis=0)
tm.assert_index_equal(bf.columns, other.columns)
tm.assert_index_equal(bf.index, other.index)
tm.assert_index_equal(af.index, other.index)
# axis = 1
other = float_frame.iloc[:-5, :3].copy()
af, bf = float_frame.align(other, axis=1)
tm.assert_index_equal(bf.columns, float_frame.columns)
tm.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = float_frame.index.join(other.index)
diff_a = float_frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
# TODO(wesm): unused?
diff_b_vals = bf.reindex(diff_b).values # noqa
assert (diff_a_vals == -1).all()
af, bf = float_frame.align(other, join="inner", axis=1)
tm.assert_index_equal(bf.columns, other.columns)
af, bf = float_frame.align(other, join="inner", axis=1, method="pad")
tm.assert_index_equal(bf.columns, other.columns)
af, bf = float_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=None
)
tm.assert_index_equal(bf.index, Index([]))
af, bf = float_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
# Try to align DataFrame to Series along bad axis
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
float_frame.align(af.iloc[0, :3], join="inner", axis=2)
# align dataframe to series with broadcast or not
idx = float_frame.index
s = Series(range(len(idx)), index=idx)
left, right = float_frame.align(s, axis=0)
tm.assert_index_equal(left.index, float_frame.index)
tm.assert_index_equal(right.index, float_frame.index)
assert isinstance(right, Series)
left, right = float_frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, float_frame.index)
expected = {c: s for c in float_frame.columns}
expected = DataFrame(
expected, index=float_frame.index, columns=float_frame.columns
)
tm.assert_frame_equal(right, expected)
# see gh-9558
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
result = df[df["a"] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
result = df.where(df["a"] == 2, 0)
expected = DataFrame({"a": [0, 2, 0], "b": [0, 5, 0]})
tm.assert_frame_equal(result, expected)
def test_align_int(self, int_frame):
# test other non-float types
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = int_frame.align(other, join="inner", axis=1, method="pad")
tm.assert_index_equal(bf.columns, other.columns)
def test_align_mixed_type(self, float_string_frame):
af, bf = float_string_frame.align(
float_string_frame, join="inner", axis=1, method="pad"
)
tm.assert_index_equal(bf.columns, float_string_frame.columns)
def test_align_mixed_float(self, mixed_float_frame):
# mixed floats/ints
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = mixed_float_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
def test_align_mixed_int(self, mixed_int_frame):
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = mixed_int_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
@pytest.mark.parametrize(
"l_ordered,r_ordered,expected",
[
[True, True, pd.CategoricalIndex],
[True, False, pd.Index],
[False, True, pd.Index],
[False, False, pd.CategoricalIndex],
],
)
def test_align_categorical(self, l_ordered, r_ordered, expected):
# GH-28397
df_1 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
"B": Series(list("aabbca")).astype(
pd.CategoricalDtype(list("cab"), ordered=l_ordered)
),
}
).set_index("B")
df_2 = DataFrame(
{
"A": np.arange(5, dtype="int64"),
"B": Series(list("babca")).astype(
pd.CategoricalDtype(list("cab"), ordered=r_ordered)
),
}
).set_index("B")
aligned_1, aligned_2 = df_1.align(df_2)
assert isinstance(aligned_1.index, expected)
assert isinstance(aligned_2.index, expected)
tm.assert_index_equal(aligned_1.index, aligned_2.index)
def test_align_multiindex(self):
# GH#10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product(
[range(2), range(3), range(2)], names=("a", "b", "c")
)
idx = pd.Index(range(2), name="b")
df1 = pd.DataFrame(np.arange(12, dtype="int64"), index=midx)
df2 = pd.DataFrame(np.arange(2, dtype="int64"), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join="left")
res2l, res2r = df2.align(df1, join="right")
expl = df1
tm.assert_frame_equal(expl, res1l)
tm.assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_frame_equal(expr, res1r)
tm.assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join="right")
res2l, res2r = df2.align(df1, join="left")
exp_idx = pd.MultiIndex.from_product(
[range(2), range(2), range(2)], names=("a", "b", "c")
)
expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_frame_equal(expl, res1l)
tm.assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_frame_equal(expr, res1r)
tm.assert_frame_equal(expr, res2l)
def test_align_series_combinations(self):
df = pd.DataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE"))
s = Series([1, 2, 4], index=list("ABD"), name="x")
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = pd.DataFrame(
{"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]},
index=list("ABCDE"),
)
exp2 = Series([1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x")
tm.assert_frame_equal(res1, exp1)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
tm.assert_series_equal(res1, exp2)
tm.assert_frame_equal(res2, exp1)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(
b, axis=axis, join=how, method=method, limit=limit, fill_axis=fill_axis
)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
tm.assert_frame_equal(aa, ea)
tm.assert_frame_equal(ab, eb)
@pytest.mark.parametrize("meth", ["pad", "bfill"])
@pytest.mark.parametrize("ax", [0, 1, None])
@pytest.mark.parametrize("fax", [0, 1])
@pytest.mark.parametrize("how", ["inner", "outer", "left", "right"])
def test_align_fill_method(self, how, meth, ax, fax, float_frame):
df = float_frame
self._check_align_fill(df, how, meth, ax, fax)
def _check_align_fill(self, frame, kind, meth, ax, fax):
left = frame.iloc[0:4, :10]
right = frame.iloc[2:, 6:]
empty = frame.iloc[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax, how=kind, method=meth)
self._check_align(
left, right, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1
)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax, how=kind, method=meth)
self._check_align(
empty, right, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1
)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax, how=kind, method=meth)
self._check_align(
left, empty, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1
)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax, how=kind, method=meth)
self._check_align(
empty, empty, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1
)
| 37.029412
| 86
| 0.592887
|
5e55238d90e511631c455a3e1d709269c89e239a
| 4,747
|
py
|
Python
|
train.py
|
Daisy-Zhang/Video-Classification-Pytorch
|
000b2b791ba56adcd63a71e7f2d2fe3686c07f15
|
[
"MIT"
] | 5
|
2021-06-29T18:02:08.000Z
|
2022-03-01T11:59:50.000Z
|
train.py
|
Daisy-Zhang/Video-Classification-Pytorch
|
000b2b791ba56adcd63a71e7f2d2fe3686c07f15
|
[
"MIT"
] | null | null | null |
train.py
|
Daisy-Zhang/Video-Classification-Pytorch
|
000b2b791ba56adcd63a71e7f2d2fe3686c07f15
|
[
"MIT"
] | null | null | null |
import os
import sys
import argparse
from datetime import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import DataLoader
from torch.autograd import Variable
from utils import getModel, WarmUpLR
from dataset import getDataLoader
import conf
def train(model, epoch, train_loader, loss_function, optimizer, warmup_scheduler, use_gpu):
model.train()
for batch_index, (sequences, labels) in enumerate(train_loader):
if epoch <= conf.WARM_EPOCH:
warmup_scheduler.step()
#print(sequences.size())
#sequences = sequences.reshape(sequences.size()[0], sequences.size()[1], sequences.size()[2] * sequences.size()[3] * sequences.size()[4])
sequences = Variable(sequences)
labels = Variable(labels)
if use_gpu:
labels = labels.cuda()
sequences = sequences.cuda()
optimizer.zero_grad()
#print(sequences.size())
outputs = model(sequences)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
print('Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tLR: {:0.6f}'.format(
loss.item(),
optimizer.param_groups[0]['lr'],
epoch=epoch,
trained_samples=batch_index * conf.TRAINING_BATCH_SIZE + len(sequences),
total_samples=len(train_loader.dataset)
))
return
def eval(model, epoch, val_loader, loss_function, use_gpu):
model.eval()
loss = 0.0
correct = 0.0
for (sequences, labels) in val_loader:
#sequences = sequences.reshape(sequences.size()[0], sequences.size()[1], sequences.size()[2] * sequences.size()[3] * sequences.size()[4])
sequences = Variable(sequences)
labels = Variable(labels)
if use_gpu:
sequences = sequences.cuda()
labels = labels.cuda()
outputs = model(sequences)
loss = loss_function(outputs, labels)
loss += loss.item()
_, preds = outputs.max(1)
correct += preds.eq(labels).sum()
print('Test set: Average loss: {:.4f}, Accuracy: {:.4f}'.format(
loss / len(val_loader.dataset),
correct.float() / len(val_loader.dataset)
))
return correct.float() / len(val_loader.dataset)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-model', type = str, required = True, help = 'model type')
parser.add_argument('-seq_dir', type = str, required = True, help = 'features dir')
parser.add_argument('-seq_length', type = int, required = True, help = 'sequences length')
parser.add_argument('-cnn_type', type = str, required = True, help = 'features extractor cnn type')
parser.add_argument('-gpu', action="store_true", help = 'use gpu or not')
args = parser.parse_args()
print(args.model)
print(args.gpu)
model = getModel(model_type = args.model, use_gpu = args.gpu)
train_loader = getDataLoader(args.seq_dir, args.seq_dir + '/train_metadata.txt', args.seq_length, args.cnn_type)
print('get train loader done')
val_loader = getDataLoader(args.seq_dir, args.seq_dir + '/test_metadata.txt', args.seq_length, args.cnn_type)
print('get val loader done')
checkpoints_path = os.path.join(conf.CHECKPOINTS_PATH, args.model, datetime.now().isoformat())
if not os.path.exists(checkpoints_path):
os.makedirs(checkpoints_path)
checkpoints_path = os.path.join(checkpoints_path, '{model}-{epoch}-{type}.pth')
loss_function = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=conf.LEARNING_RATE, momentum=conf.MOMENTUM, weight_decay=conf.WEIGHT_DECAY)
train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=conf.MILESTONES, gamma=conf.GAMMA)
iter_per_epoch = len(train_loader)
warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * conf.WARM_EPOCH)
best_acc = 0.0
for epoch in range(1, conf.EPOCH):
if epoch > conf.WARM_EPOCH:
train_scheduler.step(epoch)
train(model, epoch, train_loader, loss_function, optimizer, warmup_scheduler, args.gpu)
acc = eval(model, epoch, val_loader, loss_function, args.gpu)
if best_acc < acc:
torch.save(model.state_dict(), checkpoints_path.format(model=args.model, epoch=epoch, type='best'))
best_acc = acc
continue
#if not epoch % conf.SAVE_EPOCH:
# torch.save(model.state_dict(), checkpoints_path.format(model=args.model, epoch=epoch, type='regular'))
| 37.377953
| 145
| 0.66758
|
993d7a39cc18d57d67bd02988910f72b0974940e
| 1,205
|
py
|
Python
|
Testing Code/img_sharp.py
|
amanwalia92/VisionChess
|
c57219b3b7ce1fd98b27573aa0a8658ceabd0593
|
[
"MIT"
] | null | null | null |
Testing Code/img_sharp.py
|
amanwalia92/VisionChess
|
c57219b3b7ce1fd98b27573aa0a8658ceabd0593
|
[
"MIT"
] | null | null | null |
Testing Code/img_sharp.py
|
amanwalia92/VisionChess
|
c57219b3b7ce1fd98b27573aa0a8658ceabd0593
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 3 16:34:55 2016
@author: aman
"""
# "Sharpen" an image by multiplying every pixel by 2, and then subtracting
# the average value of the neighborhood from it.
#See slide number 22 from IrfanEssa-CP-02-5-Filtering.pdf
#
# Jay Summet 2015
#
#Python 2.7, OpenCV 2.4.x
#
import cv2
import numpy as np
filename = '/home/aman/Pictures/18.png'
#Linux window/threading setup code.
#Load source / input image as grayscale, also works on color images...
imgIn = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
cv2.imshow("Original", imgIn)
#Create the identity filter, but with the 1 shifted to the right!
kernel = np.zeros( (9,9), np.float32)
kernel[4,4] = 2.0 #Identity, times two!
#Create a box filter:
boxFilter = np.ones( (9,9), np.float32) / 81.0
#Subtract the two:
kernel = kernel - boxFilter
#Note that we are subject to overflow and underflow here...but I believe that
# filter2D clips top and bottom ranges on the output, plus you'd need a
# very bright or very dark pixel surrounded by the opposite type.
custom = cv2.filter2D(imgIn, -1, kernel)
cv2.imshow("Sharpen", custom)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 21.517857
| 77
| 0.716183
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.