blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
22e3d0ce7d47a5f3494e82146632cd45c24f84d9 | 6b699b7763a0ff8c32b85014d96f6faf02514a2e | /models/official/utils/logs/hooks_helper.py | f4f14492389a8885bf6deb8eeb607c1422e0548f | [
"Apache-2.0"
] | permissive | leizeling/Base_tensorflow-object_detection_2Dcord | df7c195685fed21fd456f1dd79881a198cf8b6e0 | d07418eb68543adc2331211ccabbc27137c8676e | refs/heads/master | 2020-03-19T11:51:57.961688 | 2018-06-07T14:47:16 | 2018-06-07T14:47:16 | 136,481,479 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,802 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hooks helper to return a list of TensorFlow hooks for training by name.
More hooks can be added to this set. To add a new hook, 1) add the new hook to
the registry in HOOKS, 2) add a corresponding function that parses out necessary
parameters.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.utils.logs import hooks
from official.utils.logs import logger
from official.utils.logs import metric_hook
_TENSORS_TO_LOG = dict((x, x) for x in ['learning_rate',
'cross_entropy',
'train_accuracy'])
def get_train_hooks(name_list, **kwargs):
"""Factory for getting a list of TensorFlow hooks for training by name.
Args:
name_list: a list of strings to name desired hook classes. Allowed:
LoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined
as keys in HOOKS
**kwargs: a dictionary of arguments to the hooks.
Returns:
list of instantiated hooks, ready to be used in a classifier.train call.
Raises:
ValueError: if an unrecognized name is passed.
"""
if not name_list:
return []
train_hooks = []
for name in name_list:
hook_name = HOOKS.get(name.strip().lower())
if hook_name is None:
raise ValueError('Unrecognized training hook requested: {}'.format(name))
else:
train_hooks.append(hook_name(**kwargs))
return train_hooks
def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs): # pylint: disable=unused-argument
"""Function to get LoggingTensorHook.
Args:
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
**kwargs: a dictionary of arguments to LoggingTensorHook.
Returns:
Returns a LoggingTensorHook with a standard set of tensors that will be
printed to stdout.
"""
if tensors_to_log is None:
tensors_to_log = _TENSORS_TO_LOG
return tf.train.LoggingTensorHook(
tensors=tensors_to_log,
every_n_iter=every_n_iter)
def get_profiler_hook(save_steps=1000, **kwargs): # pylint: disable=unused-argument
"""Function to get ProfilerHook.
Args:
save_steps: `int`, print profile traces every N steps.
**kwargs: a dictionary of arguments to ProfilerHook.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing.
"""
return tf.train.ProfilerHook(save_steps=save_steps)
def get_examples_per_second_hook(every_n_steps=100,
batch_size=128,
warm_steps=5,
**kwargs): # pylint: disable=unused-argument
"""Function to get ExamplesPerSecondHook.
Args:
every_n_steps: `int`, print current and average examples per second every
N steps.
batch_size: `int`, total batch size used to calculate examples/second from
global time.
warm_steps: skip this number of steps before logging and running average.
**kwargs: a dictionary of arguments to ExamplesPerSecondHook.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing.
"""
return hooks.ExamplesPerSecondHook(every_n_steps=every_n_steps,
batch_size=batch_size,
warm_steps=warm_steps)
def get_logging_metric_hook(benchmark_log_dir=None,
tensors_to_log=None,
every_n_secs=600,
**kwargs): # pylint: disable=unused-argument
"""Function to get LoggingMetricHook.
Args:
benchmark_log_dir: `string`, directory path to save the metric log.
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
every_n_secs: `int`, the frequency for logging the metric. Default to every
10 mins.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing.
"""
logger.config_benchmark_logger(benchmark_log_dir)
if tensors_to_log is None:
tensors_to_log = _TENSORS_TO_LOG
return metric_hook.LoggingMetricHook(
tensors=tensors_to_log,
metric_logger=logger.get_benchmark_logger(),
every_n_secs=every_n_secs)
# A dictionary to map one hook name and its corresponding function
HOOKS = {
'loggingtensorhook': get_logging_tensor_hook,
'profilerhook': get_profiler_hook,
'examplespersecondhook': get_examples_per_second_hook,
'loggingmetrichook': get_logging_metric_hook,
}
| [
"1072113944@qq.comm"
] | 1072113944@qq.comm |
895788a38bd1a6df0015b588a15a32c39f5d77a8 | b185a636557b0e2642f75a539752c5ecae402249 | /manage.py | ffd6ea0963435993ba9e8a989ed7c7dbe44b43b3 | [] | no_license | crowdbotics-apps/test-27645 | 4d90ad575eb59f05e078ea59ae6d79158581442d | 9a787e4430aca1bcb8e19882e08e0ee6d74704eb | refs/heads/master | 2023-05-08T13:03:18.363810 | 2021-06-01T09:01:06 | 2021-06-01T09:01:06 | 372,761,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_27645.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
82f1aa278d2d713ff7e37345ceae2c185accee21 | c074ce302e0a2a09ebe8b0a94e342380afbaa911 | /beakjoon_PS/no17144.py | 74a99479091ac9aefa711ffac7d15e85ca9d1f48 | [] | no_license | elrion018/CS_study | eeea7a48e9e9b116ddf561ebf10633670d305722 | 3d5478620c4d23343ae0518d27920b3211f686fd | refs/heads/master | 2021-06-10T13:35:20.258335 | 2021-04-25T10:12:17 | 2021-04-25T10:12:17 | 169,424,097 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,424 | py | import sys
def four_way_spread(y,x, room, temp1, temp2):
dy = [0,0,1,-1]
dx = [1,-1,0,0]
if room[y][x] > 0:
for k in range(4):
ay = y + dy[k]
ax = x + dx[k]
if ay >= 0 and ay <r and ax>=0 and ax <c and room[ay][ax] != -1:
part = room[y][x] // 5
temp1[ay][ax] += part
temp2[y][x] += part
return room, temp1, temp2
def sum_dust(r,c,room, temp1,temp2):
for y in range(r):
for x in range(c):
room[y][x] += temp1[y][x]
room[y][x] -= temp2[y][x]
return room
def spread_dust(r,c,room):
temp1 = [[0]*c for _ in range(r)]
temp2 = [[0]*c for _ in range(r)]
for y in range(r):
for x in range(c):
room, temp1, temp2 = four_way_spread(y,x, room, temp1, temp2)
room = sum_dust(r,c, room, temp1, temp2)
return room
def wind_dust(r,c, room):
cleaner_y = None
for y in range(r):
for x in range(c):
if room[y][x] == -1:
cleaner_y = y
break
area1 = room[0:cleaner_y]
area2 = room[cleaner_y:]
# area 1 (반시계 방향)
area1 = wind_area1(area1,c)
area2 = wind_area2(area2,c)
return area1 + area2
# area 2 (시계 방향)
def wind_area1(area1,c):
temp = [[0]*c for _ in range(len(area1))]
for y in range(len(area1)):
for x in range(c):
if y-1 < 0 and x-1 < 0: #왼쪽 상단 구석
temp[y+1][x] = area1[y][x]
elif y-1 <0 and x +1 == c: #오른쪽 상단 구석
temp[y][x-1] = area1[y][x]
elif y+1 == len(area1) and x -1 <0: # 왼쪽 하단 구석
temp[y][x+1] = area1[y][x]
elif y+1 == len(area1) and x + 1 == c: # 오른쪽 하단 구석
temp[y-1][x] = area1[y][x]
elif y-1 <0:
temp[y][x-1] = area1[y][x]
elif y+1 == len(area1):
temp[y][x+1] = area1[y][x]
elif x-1 <0:
temp[y+1][x] = area1[y][x]
elif x+1 == c:
temp[y-1][x] = area1[y][x]
else:
temp[y][x] = area1[y][x]
area1 = overwrite_area(temp, area1, c)
return area1
def wind_area2(area2,c):
temp = [[0]*c for _ in range(len(area2))]
for y in range(len(area2)):
for x in range(c):
if y-1 < 0 and x-1 < 0: #왼쪽 상단 구석
temp[y][x+1] = area2[y][x]
elif y-1 <0 and x +1 == c: #오른쪽 상단 구석
temp[y+1][x] = area2[y][x]
elif y+1 == len(area2) and x -1 <0: # 왼쪽 하단 구석
temp[y-1][x] = area2[y][x]
elif y+1 == len(area2) and x + 1 == c: # 오른쪽 하단 구석
temp[y][x-1] = area2[y][x]
elif y-1 <0:
temp[y][x+1] = area2[y][x]
elif y+1 == len(area2):
temp[y][x-1] = area2[y][x]
elif x-1 <0:
temp[y-1][x] = area2[y][x]
elif x+1 == c:
temp[y+1][x] = area2[y][x]
else:
temp[y][x] = area2[y][x]
area2 = overwrite_area(temp, area2, c)
return area2
def overwrite_area(temp, area,c):
for y in range(len(area)):
for x in range(c):
if area[y][x] != -1:
if temp[y][x] == -1:
area[y][x] = 0
else:
area[y][x] = temp[y][x]
return area
def get_answer(r,c, room):
answer = 0
for y in range(r):
for x in range(c):
if room[y][x] != -1:
answer += room[y][x]
return answer
def solution(r,c,t, room):
for _ in range(t):
room = spread_dust(r,c,room)
room = wind_dust(r,c, room)
return get_answer(r,c,room)
r,c,t = map(int, sys.stdin.readline().split())
room = [list(map(int, sys.stdin.readline().split())) for _ in range(r)]
print(solution(r,c,t, room)) | [
"elrion018@gmail.com"
] | elrion018@gmail.com |
1a0e35d230b3b14fdb921c31b726a5c7fe3ab471 | 4cf5b34118daa3c26a47c7213eaaa656b3ab57f0 | /src/treeminer.py | 2e40ad5b834c7f67b85145bdbe3f55bcbf42e320 | [] | no_license | vrthra/Cmimid | a177b5dbd3ef69852b6746ee09a140f35cbc0580 | 25465201830b35a0338407b0a4e13ef512881731 | refs/heads/master | 2020-06-17T08:24:46.465783 | 2019-11-11T15:17:25 | 2019-11-11T15:17:25 | 195,860,435 | 0 | 1 | null | 2019-09-10T09:22:53 | 2019-07-08T17:49:25 | Python | UTF-8 | Python | false | false | 6,808 | py | import sys
import json
import itertools as it
from operator import itemgetter
from fuzzingbook.GrammarFuzzer import tree_to_string
def reconstruct_method_tree(method_map):
first_id = None
tree_map = {}
for key in method_map:
m_id, m_name, m_children = method_map[key]
children = []
if m_id in tree_map:
# just update the name and children
assert not tree_map[m_id]
tree_map[m_id]['id'] = m_id
tree_map[m_id]['name'] = m_name
tree_map[m_id]['indexes'] = []
tree_map[m_id]['children'] = children
else:
assert first_id is None
tree_map[m_id] = {'id': m_id, 'name': m_name, 'children': children, 'indexes': []}
first_id = m_id
for c in m_children:
assert c not in tree_map
val = {}
tree_map[c] = val
children.append(val)
return first_id, tree_map
def last_comparisons(comparisons):
HEURISTIC = True
last_cmp_only = {}
last_idx = {}
# get the last indexes compared in methods.
# first, for each method, find the index that
# was accessed in that method invocation last.
for idx, char, mid in comparisons:
if mid in last_idx:
if idx > last_idx[mid]:
last_idx[mid] = idx
else:
last_idx[mid] = idx
# next, for each index, find the method that
# accessed it last.
for idx, char, mid in comparisons:
if HEURISTIC:
if idx in last_cmp_only:
if last_cmp_only[idx] > mid:
# do not clobber children unless it was the last character
# for that child.
if last_idx[mid] > idx:
# if it was the last index, may be the child used it
# as a boundary check.
continue
last_cmp_only[idx] = mid
return last_cmp_only
def attach_comparisons(method_tree, comparisons):
for idx in comparisons:
mid = comparisons[idx]
method_tree[mid]['indexes'].append(idx)
def to_node(idxes, my_str):
assert len(idxes) == idxes[-1] - idxes[0] + 1
assert min(idxes) == idxes[0]
assert max(idxes) == idxes[-1]
return my_str[idxes[0]:idxes[-1] + 1], [], idxes[0], idxes[-1]
def indexes_to_children(indexes, my_str):
lst = [
list(map(itemgetter(1), g))
for k, g in it.groupby(enumerate(indexes), lambda x: x[0] - x[1])
]
return [to_node(n, my_str) for n in lst]
def does_item_overlap(s, e, s_, e_):
return (s_ >= s and s_ <= e) or (e_ >= s and e_ <= e) or (s_ <= s and e_ >= e)
def is_second_item_included(s, e, s_, e_):
return (s_ >= s and e_ <= e)
def has_overlap(ranges, s_, e_):
return {(s, e) for (s, e) in ranges if does_item_overlap(s, e, s_, e_)}
def is_included(ranges, s_, e_):
return {(s, e) for (s, e) in ranges if is_second_item_included(s, e, s_, e_)}
def remove_overlap_from(original_node, orange):
node, children, start, end = original_node
new_children = []
if not children:
return None
start = -1
end = -1
for child in children:
if does_item_overlap(*child[2:4], *orange):
new_child = remove_overlap_from(child, orange)
if new_child: # and new_child[1]:
if start == -1: start = new_child[2]
new_children.append(new_child)
end = new_child[3]
else:
new_children.append(child)
if start == -1: start = child[2]
end = child[3]
if not new_children:
return None
assert start != -1
assert end != -1
return (node, new_children, start, end)
def no_overlap(arr):
my_ranges = {}
for a in arr:
_, _, s, e = a
included = is_included(my_ranges, s, e)
if included:
continue # we will fill up the blanks later.
else:
overlaps = has_overlap(my_ranges, s, e)
if overlaps:
# unlike include which can happen only once in a set of
# non-overlapping ranges, overlaps can happen on multiple parts.
# The rule is, the later child gets the say. So, we recursively
# remove any ranges that overlap with the current one from the
# overlapped range.
# assert len(overlaps) == 1
#oitem = list(overlaps)[0]
for oitem in overlaps:
v = remove_overlap_from(my_ranges[oitem], (s,e))
del my_ranges[oitem]
if v:
my_ranges[v[2:4]] = v
my_ranges[(s, e)] = a
else:
my_ranges[(s, e)] = a
res = my_ranges.values()
# assert no overlap, and order by starting index
s = sorted(res, key=lambda x: x[2])
return s
def to_tree(node, my_str):
method_name = ("<%s>" % node['name']) if node['name'] is not None else '<START>'
indexes = node['indexes']
node_children = []
for c in node.get('children', []):
t = to_tree(c, my_str)
if t is None: continue
node_children.append(t)
idx_children = indexes_to_children(indexes, my_str)
children = no_overlap(node_children + idx_children)
if not children:
return None
start_idx = children[0][2]
end_idx = children[-1][3]
si = start_idx
my_children = []
# FILL IN chars that we did not compare. This is likely due to an i + n
# instruction.
for c in children:
if c[2] != si:
sbs = my_str[si: c[2]]
my_children.append((sbs, [], si, c[2] - 1))
my_children.append(c)
si = c[3] + 1
m = (method_name, my_children, start_idx, end_idx)
return m
import os.path, copy, random
random.seed(0)
def miner(call_traces):
my_trees = []
for call_trace in call_traces:
method_map = call_trace['method_map']
first, method_tree = reconstruct_method_tree(method_map)
comparisons = call_trace['comparisons']
attach_comparisons(method_tree, last_comparisons(comparisons))
my_str = call_trace['inputstr']
#print("INPUT:", my_str, file=sys.stderr)
tree = to_tree(method_tree[first], my_str)
#print("RECONSTRUCTED INPUT:", tree_to_string(tree), file=sys.stderr)
my_tree = {'tree': tree, 'original': call_trace['original'], 'arg': call_trace['arg']}
assert tree_to_string(tree) == my_str
my_trees.append(my_tree)
return my_trees
def main(tracefile):
with open(tracefile) as f:
my_trace = json.load(f)
mined_trees = miner(my_trace)
json.dump(mined_trees, sys.stdout)
main(sys.argv[1])
| [
"rahul@gopinath.org"
] | rahul@gopinath.org |
810b7d95938ec105f9de7bc48d3eb11b3338bb3f | 471ea669e21abdb4e4915610b4b5eb43ea3cffe9 | /剑指Offer/27.字符串的排列.py | ca20aaf9bf80c69e0f52a5f8cce2ced8237e0549 | [] | no_license | JiahuaLink/nowcoder-leetcode | 26aed099e215cfc1d8e8afffc62fafa26b26b06f | 0155fc33511cbe892f58550d561d3aa3efcd56b9 | refs/heads/master | 2023-07-09T03:05:31.227720 | 2021-08-03T06:50:36 | 2021-08-03T06:50:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | # 使用迭代工具类的排列方法
import itertools
class Solution:
def Permutation(self, ss):
if not ss:
return []
return sorted(list(set(map(''.join, itertools.permutations(ss))))) | [
"noreply@github.com"
] | JiahuaLink.noreply@github.com |
e2905bb4e5faef01c90b6b4b41b6d12df04ef5c3 | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v11/enums/types/feed_item_set_status.py | 67e70b95b7c03afefde49c4e499c07f3d86fe750 | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 1,152 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v11.enums",
marshal="google.ads.googleads.v11",
manifest={"FeedItemSetStatusEnum",},
)
class FeedItemSetStatusEnum(proto.Message):
r"""Container for enum describing possible statuses of a feed
item set.
"""
class FeedItemSetStatus(proto.Enum):
r"""Possible statuses of a feed item set."""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | GerhardusM.noreply@github.com |
3c01565ab29a50b5e2ab3b2e0ebc8671fa00fa1d | 84fbca1dd86aa1d7d65afd65bc65a71420513fb8 | /queriesapp/migrations/0002_auto_20200313_1226.py | 4fc15376311aefa9eef66940a093094bb9bb41e9 | [] | no_license | corri-golden/queries | 40a7415868149014dcf9d20dedd17801b4552f0b | fc7a4f26f185e3a6018cd257a12abe74019db531 | refs/heads/master | 2021-09-26T16:47:14.470875 | 2021-03-20T04:23:57 | 2021-03-20T04:23:57 | 245,893,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | # Generated by Django 3.0.4 on 2020-03-13 12:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('queriesapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Status',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status_name', models.CharField(max_length=50)),
],
options={
'verbose_name': 'status',
'verbose_name_plural': 'statuses',
},
),
migrations.AddField(
model_name='query',
name='status',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='queriesapp.Status'),
preserve_default=False,
),
]
| [
"corri.golden@gmail.com"
] | corri.golden@gmail.com |
52c1148649b7e02419ea12246d13cbd9cc58ec08 | b85592e0d0768e250b1e968ba6bacbeae0d91623 | /manage.py | 1116323941b02c0807089dec868b05112ce7b773 | [] | no_license | KimaruThagna/graphql-Example | 206824d60f6c523a27bc0875e7af1004ce4720f4 | 874e862e7e7d54d2767cd7615e7374c904814a42 | refs/heads/master | 2021-07-25T23:22:16.647747 | 2017-11-06T08:26:43 | 2017-11-06T08:26:43 | 106,149,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "GraphQl.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"thagana44@gmail.com"
] | thagana44@gmail.com |
a65d310a45773ea11f0818aa6af806ea9d490511 | 3de1e940512394a6aebe499a9ce07a33a427ea7b | /tools/stats/monitor.py | 972d0dbea038bda843c1db9f7728bd4e8bbd1fa5 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | shinh/pytorch | 3719d1a6129db16e932452bec9b6646edf153226 | c6b7c33885eeff9dc125f87c7134772d59d0ba21 | refs/heads/master | 2022-10-21T22:03:14.775982 | 2022-10-10T05:47:11 | 2022-10-10T05:47:11 | 220,897,452 | 1 | 0 | NOASSERTION | 2019-11-11T03:56:40 | 2019-11-11T03:56:39 | null | UTF-8 | Python | false | false | 2,871 | py | #!/usr/bin/env python3
import datetime
import json
import signal
import time
from typing import Any, Dict, List
import psutil # type: ignore[import]
import pynvml # type: ignore[import]
def get_processes_running_python_tests() -> List[Any]:
python_processes = []
for process in psutil.process_iter():
try:
if "python" in process.name() and process.cmdline():
python_processes.append(process)
except (psutil.NoSuchProcess, psutil.AccessDenied):
# access denied or the process died
pass
return python_processes
def get_per_process_cpu_info() -> List[Dict[str, Any]]:
processes = get_processes_running_python_tests()
per_process_info = []
for p in processes:
info = {
"pid": p.pid,
"cmd": " ".join(p.cmdline()),
"cpu_percent": p.cpu_percent(),
"rss_memory": p.memory_info().rss,
"uss_memory": p.memory_full_info().uss,
}
if "pss" in p.memory_full_info():
# only availiable in linux
info["pss_memory"] = p.memory_full_info().pss
per_process_info.append(info)
return per_process_info
def get_per_process_gpu_info(handle: Any) -> List[Dict[str, Any]]:
processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
per_process_info = []
for p in processes:
info = {"pid": p.pid, "gpu_memory": p.usedGpuMemory}
per_process_info.append(info)
return per_process_info
if __name__ == "__main__":
handle = None
try:
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(0)
except pynvml.NVMLError:
# no pynvml avaliable, probably because not cuda
pass
kill_now = False
def exit_gracefully(*args: Any) -> None:
global kill_now
kill_now = True
signal.signal(signal.SIGTERM, exit_gracefully)
while not kill_now:
try:
stats = {
"time": datetime.datetime.utcnow().isoformat("T") + "Z",
"total_cpu_percent": psutil.cpu_percent(),
"per_process_cpu_info": get_per_process_cpu_info(),
}
if handle is not None:
stats["per_process_gpu_info"] = get_per_process_gpu_info(handle)
# https://docs.nvidia.com/deploy/nvml-api/structnvmlUtilization__t.html
gpu_utilization = pynvml.nvmlDeviceGetUtilizationRates(handle)
stats["total_gpu_utilization"] = gpu_utilization.gpu
stats["total_gpu_mem_utilization"] = gpu_utilization.memory
except Exception as e:
stats = {
"time": datetime.datetime.utcnow().isoformat("T") + "Z",
"error": str(e),
}
finally:
print(json.dumps(stats))
time.sleep(1)
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
fc15825c91764aeef1e3c0d91ecb2bc9384037cb | d5fc28473e41a11e3ee793362e80f2db83b1d386 | /tests/unit/cli.py | 1f4810cb82091b028bc3a6c0ceb35d0071fb39b7 | [
"Apache-2.0"
] | permissive | starbops/haas | 71335ea29dbf06579381c7745176ee9f7c86d423 | 751d4fc27732ac7d660886b7c47948300c606460 | refs/heads/master | 2020-05-29T11:37:40.484664 | 2016-03-17T03:04:11 | 2016-03-17T03:04:11 | 53,773,276 | 0 | 0 | null | 2016-03-13T07:41:34 | 2016-03-13T07:41:34 | null | UTF-8 | Python | false | false | 1,877 | py | import pytest
import tempfile
import os
import signal
from subprocess import check_call, Popen
from time import sleep
config = """
[headnode]
base_imgs = base-headnode, img1, img2, img3, img4
[database]
uri = sqlite:///haas.db
[extensions]
haas.ext.auth.null =
haas.ext.network_allocators.null =
"""
@pytest.fixture(autouse=True)
def make_config(request):
tmpdir = tempfile.mkdtemp()
cwd = os.getcwd()
os.chdir(tmpdir)
with open('haas.cfg', 'w') as f:
f.write(config)
def cleanup():
os.remove('haas.cfg')
os.remove('haas.db')
os.chdir(cwd)
os.rmdir(tmpdir)
request.addfinalizer(cleanup)
def test_init_db():
check_call(['haas', 'init_db'])
def runs_for_seconds(cmd, seconds=1):
"""Test if the command ``cmd`` runs for at least ``seconds`` seconds.
``cmd`` is a list containing the name of a command and its arguments.
``seconds`` is a number of seconds (by default 1).
``run_for_seconds`` will execute ``cmd``, wait for ``seconds`` seconds,
send SIGTERM to the process, and then wait() for it. If the exit status
indicates that it stopped for any reason other than SIGTERM,
``run_for_seconds`` returns False, otherwise it returns True.
This is useful to check that a server process does not immediately die on
startup, though it's a bit of a hack --- checking rigorously would require
extra knowledge of the workings of that process (hooray for the halting
problem).
"""
proc = Popen(cmd)
sleep(seconds)
proc.terminate()
status = proc.wait()
return status == -signal.SIGTERM
def test_serve():
check_call(['haas', 'init_db'])
assert runs_for_seconds(['haas', 'serve', '5000'], seconds=1)
def test_serve_networks():
check_call(['haas', 'init_db'])
assert runs_for_seconds(['haas', 'serve_networks'], seconds=1)
| [
"ian@zenhack.net"
] | ian@zenhack.net |
dd1c933da5c0589e147cfac927c95849c3d02401 | 07c75f8717683b9c84864c446a460681150fb6a9 | /back_cursor/S-scrapy/zhilian/zhilian/pipelines.py | ce305e87f0e027bad9b1bb740e708506227f2072 | [] | no_license | laomu/py_1709 | 987d9307d9025001bd4386381899eb3778f9ccd6 | 80630e6ac3ed348a2a6445e90754bb6198cfe65a | refs/heads/master | 2021-05-11T09:56:45.382526 | 2018-01-19T07:08:00 | 2018-01-19T07:08:00 | 118,088,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
# 引入sqlalchemy模块
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# 进入pymysql模块,用于替代sqlalchemy底层的mysqldb
import pymysql
pymysql.install_as_MySQLdb()
class ZhilianPipeline(object):
'''
智联招聘爬虫管道模块,进行数据验证和存储
'''
def __init__(self):
# 打开和数据库的连接引擎,获取连接会话对象
engine = create_engine("mysql://root:@localhost/py1709_spider?charset=utf8")
Session = sessionmaker(bind=engine)
self.session = Session()
def process_item(self, item, spider):
# 生成sql语句
zl_sql = "insert into jobs(job_name, company, salary) values('%s', '%s', '%s')" % \
(item['job_name'], item['company'], item['salary'])
# 执行sql语句
self.session.execute(zl_sql)
return item
def close_spider(self, spider):
# 提交数据并关闭数据库连接会话
self.session.commit()
self.session.close()
| [
"1007821300@qq.com"
] | 1007821300@qq.com |
7f5466358e1afe8affcb50531035a634f09d47f2 | 3e3bf98840d133e56f0d0eb16ba85678ddd6ca45 | /.history/iss_20200102114952.py | b10444b89fa1b6c3096e6c74cc748aa78096e84a | [] | no_license | Imraj423/backend-iss-location-assessment | a05d3cc229a5fc4857483ae466348c1f8c23c234 | b0565c089a445ccffcb8d0aab3c0be3bb0c1d5b8 | refs/heads/master | 2020-12-03T17:04:58.512124 | 2020-06-24T16:02:02 | 2020-06-24T16:02:02 | 231,400,854 | 0 | 0 | null | 2020-06-24T16:02:04 | 2020-01-02T14:43:44 | null | UTF-8 | Python | false | false | 920 | py | import requests
import turtle
import time
screen = turtle.Screen()
image = "iss.gif"
screen.addshape(image)
raf = turtle.Turtle()
raf.shape(image)
raf.setheading(90)
raf.penup()
screen.bgpic("map.gif")
screen.screensize(4000, 3000)
screen.setup(width=800, height=600, startx=0, starty=0)
screen.exitonclick()
while True:
s = requests.get('http://api.open-notify.org/iss-now.json')
s.raise_for_status
result = json.loads(s.read())
print(s.text)
#Let's extract the required information
location = result["iss_position"]
lat = location["latitude"]
lon = location["longitude"]
#Output informationon screen
print("\nLatitude: " + str(lat))
print("Longitude: " + str(lon))
#Plot the ISS on the map
raf.goto(lon, lat)
#refresh position every 5 seconds
time.sleep(5)
r = requests.get('http://api.open-notify.org/astros.json')
r.raise_for_status()
print(r.text)
| [
"dahqniss@gmail.com"
] | dahqniss@gmail.com |
a802808f5187909756b07ccfd2d5e6956da34179 | 866b7169c069c153bacfa7961dce8909aa391faa | /blog/admin.py | e7b63a18af39c397f3075c4067021ab8e245f26b | [] | no_license | nikolasribeiro/pagina-jovenes-40 | d2502b5d1569000eb7dc059f62b66978ea8642ab | 5618cbc1ac03f6dd47eba6360dbdbb2ead70f268 | refs/heads/main | 2023-03-08T18:45:08.323432 | 2021-02-27T22:36:47 | 2021-02-27T22:36:47 | 336,833,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | from django.contrib import admin
from .models import Blog, Categoria
# Import Export para guardar datos
from import_export import resources
from import_export.admin import ImportExportModelAdmin
# Clases del import Export
class BlogResource(resources.ModelResource):
class Meta:
model = Blog
class BlogAdmin(ImportExportModelAdmin, admin.ModelAdmin):
list_display = (
'titulo_blog',
'subtitulo_blog',
'imagen_blog',
'slug',
'descripcion_breve',
'contenido_blog',
'autor',
'fecha_publicacion',
)
resource_class = BlogResource
class CategoriaResource(resources.ModelResource):
class Meta:
model = Blog
class CategoriaAdmin(ImportExportModelAdmin, admin.ModelAdmin):
list_display = (
'nombre_categoria',
)
resource_class = CategoriaResource
# Register your models here.
admin.site.register(Blog, BlogAdmin)
admin.site.register(Categoria, CategoriaAdmin) | [
"nikolasribeiro2@outlook.com"
] | nikolasribeiro2@outlook.com |
cf43f252196aa5e91466d9092828816745be5ca3 | 3851d5eafcc5fd240a06a7d95a925518412cafa0 | /Django_Code/gs25/gs25/asgi.py | cb53a37a73a74e22db638ccc220b4e981d1b6bca | [] | no_license | Ikshansaleem/DjangoandRest | c0fafaecde13570ffd1d5f08019e04e1212cc2f3 | 0ccc620ca609b4ab99a9efa650b5893ba65de3c5 | refs/heads/master | 2023-01-31T04:37:57.746016 | 2020-12-10T06:27:24 | 2020-12-10T06:27:24 | 320,180,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
ASGI config for gs25 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gs25.settings')
application = get_asgi_application()
| [
"ikshan3108@gmail.com"
] | ikshan3108@gmail.com |
c67b2260e2dab5a6ed7a5447cb5d23fbae2047c7 | 07131e91dcf2529e9c7058f8a8f239d419c8f7e0 | /1447.simplified-fractions.py | 240c2196f222b390151a2ea343df329835207f3d | [] | no_license | Code-Wen/LeetCode_Notes | 5194c5c5306cb9f4a0fac85e06fefe6c02d65d44 | 791fc1b43beef89d668788de6d12f5c643431b8f | refs/heads/master | 2021-07-04T14:41:00.830723 | 2020-09-27T16:31:22 | 2020-09-27T16:31:22 | 178,456,323 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,492 | py | #
# @lc app=leetcode id=1447 lang=python3
#
# [1447] Simplified Fractions
#
# https://leetcode.com/problems/simplified-fractions/description/
#
# algorithms
# Medium (59.85%)
# Likes: 50
# Dislikes: 6
# Total Accepted: 7.5K
# Total Submissions: 12.5K
# Testcase Example: '2\r'
#
# Given an integer n, return a list of all simplified fractions between 0 and 1
# (exclusive) such that the denominator is less-than-or-equal-to n. The
# fractions can be in any order.
#
#
# Example 1:
#
#
# Input: n = 2
# Output: ["1/2"]
# Explanation: "1/2" is the only unique fraction with a denominator
# less-than-or-equal-to 2.
#
# Example 2:
#
#
# Input: n = 3
# Output: ["1/2","1/3","2/3"]
#
#
# Example 3:
#
#
# Input: n = 4
# Output: ["1/2","1/3","1/4","2/3","3/4"]
# Explanation: "2/4" is not a simplified fraction because it can be simplified
# to "1/2".
#
# Example 4:
#
#
# Input: n = 1
# Output: []
#
#
#
# Constraints:
#
#
# 1 <= n <= 100
#
#
# @lc code=start
class Solution:
def simplifiedFractions(self, n: int) -> List[str]:
res = []
def gcd(n1,n2):
if n1 < n2:
n1, n2 = n2, n1
while n2 > 0:
n2, n1 = n1%n2, n2
return n1
for denom in range(2, n+1):
for numerator in range(1, denom):
if gcd(denom, numerator) == 1:
res.append(str(numerator)+"/"+str(denom))
return res
# @lc code=end
| [
"chenxu.wen.math@gmail.com"
] | chenxu.wen.math@gmail.com |
6924ccab05426697554fea2c956596a548469849 | 4d2475135f5fc9cea73572b16f59bfdc7232e407 | /prob130_surrounded_regions.py | cbb05c421a1740056d03c472983e5335f0f064ce | [] | no_license | Hu-Wenchao/leetcode | 5fa0ae474aadaba372756d234bc5ec397c8dba50 | 31b2b4dc1e5c3b1c53b333fe30b98ed04b0bdacc | refs/heads/master | 2021-06-24T04:57:45.340001 | 2017-06-17T02:33:09 | 2017-06-17T02:33:09 | 45,328,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | """
Given a 2D board containing 'X' and 'O', capture
all regions surrounded by 'X'.
A region is captured by flipping all 'O's into 'X's
in that surrounded region.
For example,
X X X X
X O O X
X X O X
X O X X
After running your function, the board should be:
X X X X
X X X X
X X X X
X O X X
"""
class Solution(object):
def solve(self, board):
"""
:type board: List[List[str]]
:rtype: void Do not return anything, modify board in-place instead.
"""
if not board:
return
m, n = len(board), len(board[0])
stack = [(i, 0) for i in range(m)] + [(i, n-1) for i in range(m)]
stack += [(0, j) for j in range(1, n-1)] + \
[(m-1, j) for j in range(1, n-1)]
while stack:
i, j = stack.pop()
if 0 <= i < m and 0 <= j < n and board[i][j] == 'O':
board[i][j] = 'S'
stack.append((i-1, j))
stack.append((i+1, j))
stack.append((i, j+1))
stack.append((i, j-1))
board[:] = [['XO'[node=='S'] for node in row] for row in board]
| [
"huwcbill@gmail.com"
] | huwcbill@gmail.com |
08219127157ebde6d147ef569a41948422a2de14 | 44064ed79f173ddca96174913910c1610992b7cb | /Second_Processing_app/temboo/Library/Fitbit/Sleep/__init__.py | 69ab238a19c2f3b9dae489cc14501cebfa600295 | [] | no_license | dattasaurabh82/Final_thesis | 440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5 | 8edaea62f5987db026adfffb6b52b59b119f6375 | refs/heads/master | 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | from DeleteSleepLog import *
from LogSleep import *
from GetSleep import *
| [
"dattasaurabh82@gmail.com"
] | dattasaurabh82@gmail.com |
73e6a2ec930cc79de0a569f1fe7e1d0881098d19 | 4a52362d2a46c747af74e3c321b1bd9d73bd0116 | /virtual/bin/static | fcf01443e92188fc7d4641feefe749e4664b5659 | [
"MIT"
] | permissive | TonyKioko/PichaZa | 1420242b48c204637a166778084aaa4cb3776938 | 8e2e8f3d002a624fe64ce089e4581265080975d6 | refs/heads/master | 2020-03-30T01:31:53.034302 | 2018-10-03T07:35:30 | 2018-10-03T07:35:30 | 150,580,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | #!/home/tony/Desktop/PichaZa/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from static import command
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(command())
| [
"tonykioko384@gmail.com"
] | tonykioko384@gmail.com | |
a754658047ec2d9bd7b8997aa6df168e5080f297 | 0fc2b99fd8414dbce5f1f6057b9b800c968d5d05 | /tests/pysge/test_pysge.py | a54c3b00256e1b2b635b0b89ce3728218a840e3b | [
"MIT"
] | permissive | widdowquinn/lpbio | 9df898cb9580f62da1f66d5736cbf7a984633561 | 8b95642396d05a56c1c54389e3de6d88d7cbffb5 | refs/heads/master | 2020-03-29T02:08:56.675473 | 2019-11-07T14:27:44 | 2019-11-07T14:27:44 | 149,422,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,938 | py | # -*- coding: utf-8 -*-
"""Tests of SGE job submission"""
import shutil
import time
import unittest
import pytest
from lpbio import pysge
class TestPysge(unittest.TestCase):
"""Class collecting tests for pysge"""
@staticmethod
def test_create_job():
"""Create Job for SGE-like scheduler"""
pysge.Job(name="test_job", command="echo {}".format(time.asctime()))
@staticmethod
def test_create_job_dependencies():
"""Create Job with dependencies for SGE-like scheduler"""
job = pysge.Job(
name="test_job_dependencies", command="echo {}".format(time.asctime())
)
depjobs = [
pysge.Job(
name="dependency {}".format(i), command="echo {}".format(time.asctime())
)
for i in range(3)
]
[job.add_dependency(depjob) for depjob in depjobs]
@staticmethod
def test_create_jobgroup():
"""Create parameter-sweep JobGroup for SGE-like scheduler"""
args = {"arg1": ["a", "b", "c"]}
pysge.JobGroup(name="test_jobgroup", command="echo", arguments=args)
@staticmethod
def test_create_jobgroup_dependencies():
"""Create parameter-sweep JobGroup with dependencies for SGE-like scheduler"""
args = {"arg1": ["a", "b", "c"]}
jobgroup = pysge.JobGroup(
name="test_jobgroup_dependencies", command="echo", arguments=args
)
depjobs = [
pysge.Job(
name="dependency {}".format(i), command="echo {}".format(time.asctime())
)
for i in range(3)
]
for depjob in depjobs:
jobgroup.add_dependency(depjob)
@pytest.mark.skipif(
shutil.which(pysge.QSUB_DEFAULT) is None,
reason="qsub executable ({}) could not be found".format(pysge.QSUB_DEFAULT),
)
def test_create_run_job(self):
"""Create and run Job with SGE-like scheduler"""
job = pysge.Job(
name="test_run_job",
command="echo {} \\(test_create_run_job\\)".format(time.asctime()),
)
pysge.build_and_submit_jobs(job)
@pytest.mark.skipif(
shutil.which(pysge.QSUB_DEFAULT) is None,
reason="qsub executable ({}) could not be found".format(pysge.QSUB_DEFAULT),
)
def test_create_run_job_badname(self):
"""Create and run a Job using SGE-like scheduler
This job has undesirable characters in the name
"""
job = pysge.Job(
name="test run job #|!;,.?",
command="echo This was a bad name! \\(test_create_run_job_badname\\)",
)
pysge.build_and_submit_jobs(job)
@pytest.mark.skipif(
shutil.which(pysge.QSUB_DEFAULT) is None,
reason="qsub executable ({}) could not be found".format(pysge.QSUB_DEFAULT),
)
def test_create_run_jobgroup(self):
"""Create and run JobGroup with SGE-like scheduler"""
args = {"arg1": ["a", "b", "c"]}
jobgroup = pysge.JobGroup(
name="test_run_jobgroup",
command="echo $arg1 \\(test_create_run_jobgroup\\)",
arguments=args,
)
pysge.build_and_submit_jobs(jobgroup)
@pytest.mark.skipif(
shutil.which(pysge.QSUB_DEFAULT) is None,
reason="qsub executable ({}) could not be found".format(pysge.QSUB_DEFAULT),
)
def test_create_run_job_dependencies(self):
"""Create and run Job with dependencies for SGE-like scheduler"""
job = pysge.Job(
name="test_run_job_dependencies",
command="echo {} \\(test_create_run_job_dependencies\\)".format(
time.asctime()
),
)
depjobs = [
pysge.Job(
name="testjob_dependency_{}".format(i),
command="echo {}".format(time.asctime()),
)
for i in range(3)
]
for depjob in depjobs:
job.add_dependency(depjob)
pysge.build_and_submit_jobs([job] + depjobs)
@pytest.mark.skipif(
shutil.which(pysge.QSUB_DEFAULT) is None,
reason="qsub executable ({}) could not be found".format(pysge.QSUB_DEFAULT),
)
def test_create_run_jobgroup_dependencies(self):
"""Create parameter-sweep JobGroup with dependencies for SGE-like scheduler"""
args = {"arg1": ["a", "b", "c"]}
jobgroup = pysge.JobGroup(
name="test_run_jobgroup_dependencies",
command="echo $arg1 \\(test_create_run_jobgroup_dependencies\\)",
arguments=args,
)
depjobs = [
pysge.Job(
name="testjobgroup_dependency_{}".format(i),
command="echo {}".format(time.asctime()),
)
for i in range(3)
]
for depjob in depjobs:
jobgroup.add_dependency(depjob)
pysge.build_and_submit_jobs([jobgroup] + depjobs)
| [
"leighton.pritchard@hutton.ac.uk"
] | leighton.pritchard@hutton.ac.uk |
d2297f166ccb241eecdd47f1c9b46e9d9faad85c | 11a246743073e9d2cb550f9144f59b95afebf195 | /kattis/chopin.py | 67513e777cb40682ed9aa5468bad04aa7e93f38a | [] | no_license | ankitpriyarup/online-judge | b5b779c26439369cedc05c045af5511cbc3c980f | 8a00ec141142c129bfa13a68dbf704091eae9588 | refs/heads/master | 2020-09-05T02:46:56.377213 | 2019-10-27T20:12:25 | 2019-10-27T20:12:25 | 219,959,932 | 0 | 1 | null | 2019-11-06T09:30:58 | 2019-11-06T09:30:57 | null | UTF-8 | Python | false | false | 538 | py | import sys
def main():
tc = 1
flip = {'major': {}, 'minor': {}}
flip['A#'] = 'Bb'
flip['Bb'] = 'A#'
flip['C#'] = 'Db'
flip['Db'] = 'C#'
flip['D#'] = 'Eb'
flip['Eb'] = 'D#'
flip['F#'] = 'Gb'
flip['Gb'] = 'F#'
flip['G#'] = 'Ab'
flip['Ab'] = 'G#'
for line in sys.stdin:
note, tone = line.strip().split()
if note in flip:
print('Case {}: {} {}'.format(tc, flip[note], tone))
else:
print('Case {}: UNIQUE'.format(tc))
tc += 1
main()
| [
"arnavsastry@gmail.com"
] | arnavsastry@gmail.com |
e0049f98e7e82d3f7e4ed64035a39d25d6443025 | d1c67f2031d657902acef4411877d75b992eab91 | /swagger_client/models/list_escalations_response.py | f2b5efd0d2aa78275881ff42e8022c09b496ba2d | [] | no_license | Certn/opsgenie-python | c6e6a7f42394499e5224d679cc9a449042fcf9c3 | bd5f402f97d591e4082b38c938cbabca4cf29787 | refs/heads/master | 2023-01-01T10:45:13.132455 | 2020-10-27T17:40:01 | 2020-10-27T17:40:01 | 307,769,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,621 | py | # coding: utf-8
"""
Opsgenie REST API
Opsgenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ListEscalationsResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'request_id': 'str',
'took': 'float',
'data': 'list[Escalation]'
}
attribute_map = {
'request_id': 'requestId',
'took': 'took',
'data': 'data'
}
def __init__(self, request_id=None, took=0.0, data=None): # noqa: E501
"""ListEscalationsResponse - a model defined in Swagger""" # noqa: E501
self._request_id = None
self._took = None
self._data = None
self.discriminator = None
self.request_id = request_id
self.took = took
if data is not None:
self.data = data
@property
def request_id(self):
"""Gets the request_id of this ListEscalationsResponse. # noqa: E501
:return: The request_id of this ListEscalationsResponse. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this ListEscalationsResponse.
:param request_id: The request_id of this ListEscalationsResponse. # noqa: E501
:type: str
"""
if request_id is None:
raise ValueError("Invalid value for `request_id`, must not be `None`") # noqa: E501
self._request_id = request_id
@property
def took(self):
"""Gets the took of this ListEscalationsResponse. # noqa: E501
:return: The took of this ListEscalationsResponse. # noqa: E501
:rtype: float
"""
return self._took
@took.setter
def took(self, took):
"""Sets the took of this ListEscalationsResponse.
:param took: The took of this ListEscalationsResponse. # noqa: E501
:type: float
"""
if took is None:
raise ValueError("Invalid value for `took`, must not be `None`") # noqa: E501
self._took = took
@property
def data(self):
"""Gets the data of this ListEscalationsResponse. # noqa: E501
:return: The data of this ListEscalationsResponse. # noqa: E501
:rtype: list[Escalation]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this ListEscalationsResponse.
:param data: The data of this ListEscalationsResponse. # noqa: E501
:type: list[Escalation]
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ListEscalationsResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListEscalationsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"john@oram.ca"
] | john@oram.ca |
8fde5030c14e3eb60ee0aae8b332acfc19a8dc8b | 758bf41e46a3093f4923af603f1f7f8063408b9c | /website/testFromRemoteRepo/_bsch3398/museum/python/user.py | 719bf82b3ee747cef3765681b6702717b4d1b1a9 | [] | no_license | mpetyx/mpetyx.com | 4033d97b21c9227a6ba505980fd0c1b57254e8fb | d50c379b4fe09e0135656573f7049225fc90ae36 | refs/heads/master | 2021-01-10T19:50:15.488371 | 2014-01-22T09:04:14 | 2014-01-22T09:04:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,103 | py | #!/usr/local/bin/python
from util import *
import connect
import sessions
#django for templates only
from django.conf import settings
from django.template import Template, Context
methods = ("login", "logout", "add", "delete", "view", "update")
def login(fields, cookie):
if fields.has_key('user') and fields.has_key('password'):
user = fields['user'].value #.value
password = fields['password'].value #.value
db = connect.connect()
temp = db.read('users', {'userid': user})
#print temp # testing
# user does exist and password matches
if temp and temp[0]['PASSWORD'] == password:
# create session cookie
sid = sessions.create(user)
newcookie = 'id=' + str(sid)
# redirect to catalogue menu page
t = loader('loggedin')
c = Context({}) #TODO
print http_response(t.render(c), newcookie)
# no match
else:
t = loader('login')
c = Context({'errors': 'Incorrect username or password. Also, I slept with your sister.'})
print http_response(t.render(c))
# go back to login page with error message
else:
t = loader('login')
c = Context({})
print http_response(t.render(c))
def logout(fields, cookie):
pass
def add(fields, cookie):
pass
def delete(fields, cookie):
pass
def view(fields, cookie):
pass
def update(fields, cookie):
pass
def run(fields, cookie):
if fields.has_key('method'):
method = fields['method'].value
if method in methods:
if method == "login":
login(fields, cookie)
elif method == "logout":
logout(fields, cookie)
elif method == "add":
add(fields, cookie)
elif method == "delete":
delete(fields, cookie)
elif method == "view":
view(fields, cookie)
elif method == "update":
update(fields, cookie)
if __name__ == "__main__":
pass
| [
"mpetyx@gmail.com"
] | mpetyx@gmail.com |
49ef083dd7476bf8ff926498dd04773df7b9d6f1 | f7c4084ddb4b26ac6005e569c907e94ce63f9993 | /项目/api_sh/data_01/migrations/0001_initial.py | 991a987ed1f657a88f79ba7cc71819dc786de534 | [] | no_license | LDZ-RGZN/- | 01caeb008bab16e1f7dd1c02137def2e030e1636 | 696be7f7a33f009eac92dff504365eb386060df1 | refs/heads/master | 2020-04-13T14:56:43.565470 | 2019-01-13T11:19:28 | 2019-01-13T11:19:28 | 162,986,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,352 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-01-08 11:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='content',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='章节名称')),
('bookname', models.CharField(max_length=150, verbose_name='书名')),
('author', models.CharField(max_length=100, verbose_name='作者')),
('WordNumber', models.CharField(max_length=100, verbose_name='本章字数')),
('FaBuData', models.CharField(max_length=200, verbose_name='发布时间')),
('content', models.TextField(verbose_name='内容')),
],
options={
'verbose_name': '详情',
'verbose_name_plural': '详情',
},
),
migrations.CreateModel(
name='liebiao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bookname', models.CharField(max_length=200, verbose_name='书名')),
('img_url', models.TextField(verbose_name='图片链接')),
('author', models.CharField(max_length=100, verbose_name='作者')),
('State', models.CharField(max_length=100, verbose_name='状态')),
('WordNumber', models.CharField(max_length=100, verbose_name='字数')),
('introduce', models.TextField(verbose_name='简介')),
],
options={
'verbose_name': '列表',
'verbose_name_plural': '列表',
},
),
migrations.CreateModel(
name='zhangjie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='章节名称')),
('zj_link', models.TextField(verbose_name='链接')),
('mf', models.CharField(max_length=100, verbose_name='状态')),
],
options={
'verbose_name': '章节',
'verbose_name_plural': '章节',
},
),
migrations.CreateModel(
name='zonglei',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(max_length=100, verbose_name='类别')),
('CGurl', models.TextField(verbose_name='类别链接')),
],
options={
'verbose_name': '总类别',
'verbose_name_plural': '总类别',
},
),
migrations.AddField(
model_name='liebiao',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='data_01.zonglei', verbose_name='类别'),
),
]
| [
"2654213432@qq.com"
] | 2654213432@qq.com |
a448f9c388862144f132bcfef5eeb24bb2bad601 | 638af6b8c580eeae23fc1034882c4b514195137a | /Packages/vcs_legacy/Test/test_ps_hatching.py | 4716cae2fa5f0c4e53cb057cd6ace51ee97ef4e6 | [] | no_license | doutriaux1/uvcdat | 83684a86b514b8cac4d8900a503fc13d557fc4d2 | 37e9635f988696c346b4c3cdb49144d1e21dab5d | refs/heads/master | 2021-01-17T07:57:22.897539 | 2015-02-02T22:52:12 | 2015-02-02T22:52:12 | 14,878,320 | 1 | 0 | null | 2015-02-19T20:54:25 | 2013-12-02T23:44:46 | C | UTF-8 | Python | false | false | 792 | py | # Adapted for numpy/ma/cdms2 by convertcdms.py
import cdms2 as cdms,vcs_legacy,sys,time,support,os
bg=support.bg
x=vcs_legacy.init()
x.portrait()
#x.setdefaultfont(2)
f=cdms.open(os.path.join(cdms.__path__[0],'..','..','..','..','sample_data','clt.nc'))
s=f('clt')
iso = x.createisofill('my')
levs = range(0,95,5)
#print len(levs)
colors = vcs_legacy.getcolors(levs)
hatch = []
iso.levels=levs
iso.fillareacolors=colors
iso.fillareastyle='pattern'
iso.fillareastyle='hatch'
iso.fillareaindices=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18]
#print iso.fillareaindices
#iso.fillareaindices=[17,]*21
#print colors
#iso.list()
l = x.createline('my')
l.x=[.001,.999,.999,.001,.001]
l.y=[.001,.001,.999,.999,.001]
x.plot(l,bg=bg)
support.check_plot(x)
x.plot(s,iso,bg=bg)
support.check_plot(x)
| [
"doutriaux1@llnl.gov"
] | doutriaux1@llnl.gov |
c3fc957bd8157028fc72a63f5e48786b003b968b | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /77/77.combinations.664188670.Wrong-Answer.leetcode.python3.py | f9d4b9c263f19faa1a2c5bbe5831661b9d091441 | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | class Solution(object):
def combine(self, n, k):
res = []
self.get_combine(res, [], n, k, 1)
return res
def get_combine(self, res, prefix, n, k, start):
if k == 0:
res.append(list(prefix))
for idx in range(start, n + 1):
self.get_combine(res, prefix + [idx], n, k - 1, start + 1)
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
35651e65aea695ef813cc1faf53c12e1c4efeff5 | cbe264842df4eae3569b28ed4aae9489014ed23c | /codeit/algorithm/greedy_min_fee.py | cc107882b08662f5008275ceceac29457af32609 | [
"MIT"
] | permissive | zeroam/TIL | 31e176c2f4c3e1ef72b1155353690cc2f7160f96 | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | refs/heads/master | 2021-07-23T01:43:34.135033 | 2021-07-10T06:47:17 | 2021-07-10T06:47:17 | 167,952,375 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | def min_fee(pages_to_print):
sorted_pages_to_print = sorted(pages_to_print)
total_fee = 0
while sorted_pages_to_print:
size = len(sorted_pages_to_print)
minute = sorted_pages_to_print.pop(0)
total_fee += size * minute
return total_fee
if __name__ == '__main__':
from util import test_value
test_value(min_fee([6, 11, 4, 1]), 39)
test_value(min_fee([3, 2, 1]), 10)
test_value(min_fee([3, 1, 4, 3, 2]), 32)
test_value(min_fee([8, 4, 2, 3, 9, 23, 6, 8]), 188) | [
"imdff0803@gmail.com"
] | imdff0803@gmail.com |
df3c79870df90f3021b5a630eb28b3efd6fa07c0 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/numenta_nupic/nupic-master/tests/unit/nupic/algorithms/sp_overlap_test.py | 1ca3af5d62178bc471696f4ea91f01904d2a56cf | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 7,048 | py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This is a legacy test from trunk and may replicate spatial pooler tests.
The allocation of cells to new patterns is explored. After all the cells
have been allocated, cells must be reused. This test makes sure that the
allocation of new cells is such that we achieve maximum generality and
predictive power.
Note: Since the sp pooler has 2048 cells with a sparsity of 40 cells active
per iteration, 100% allocation is reached at the 51st unique pattern.
"""
import unittest2 as unittest
import random as rnd
import time
import numpy
from nupic.bindings.math import GetNTAReal
from nupic.encoders import scalar
from nupic.bindings.algorithms import SpatialPooler
realDType = GetNTAReal()
SEED = 42
class TestSPFrequency(unittest.TestCase):
def testCategory(self):
"""Test that the most frequent possible option is chosen for a scalar
encoded field """
self.frequency(n=100, w=21, seed=SEED, numColors=90, encoder = 'scalar')
def testScalar(self):
"""Test that the most frequent possible option is chosen for a category
encoded field """
self.frequency(n=30, w=21, seed=SEED, numColors=90, encoder = 'category')
@unittest.skip("Not working...")
def testScalarLong(self):
"""Test that the most frequent possible option is chosen for a scalar
encoded field. Run through many different numbers of patterns and random
seeds"""
for n in [52, 70, 80, 90, 100, 110]:
self.frequency(n=100, w=21, seed=SEED, numColors=n, encoder='scalar')
@unittest.skip("Not working...")
def testCategoryLong(self):
"""Test that the most frequent possible option is chosen for a category
encoded field. Run through many different numbers of patterns and random
seeds"""
for n in [52, 70, 80, 90, 100, 110]:
self.frequency(n=100, w=21, seed=SEED, numColors=n)
def frequency(self,
n=15,
w=7,
columnDimensions = 2048,
numActiveColumnsPerInhArea = 40,
stimulusThreshold = 0,
spSeed = 1,
spVerbosity = 0,
numColors = 2,
seed=42,
minVal=0,
maxVal=10,
encoder = 'category',
forced=True):
""" Helper function that tests whether the SP predicts the most
frequent record """
print "\nRunning SP overlap test..."
print encoder, 'encoder,', 'Random seed:', seed, 'and', numColors, 'colors'
#Setting up SP and creating training patterns
# Instantiate Spatial Pooler
spImpl = SpatialPooler(
columnDimensions=(columnDimensions, 1),
inputDimensions=(1, n),
potentialRadius=n/2,
numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
spVerbosity=spVerbosity,
stimulusThreshold=stimulusThreshold,
potentialPct=0.5,
seed=spSeed,
globalInhibition=True,
)
rnd.seed(seed)
numpy.random.seed(seed)
colors = []
coincs = []
reUsedCoincs = []
spOutput = []
patterns = set([])
# Setting up the encodings
if encoder=='scalar':
enc = scalar.ScalarEncoder(name='car', w=w, n=n, minval=minVal,
maxval=maxVal, periodic=False, forced=True) # forced: it's strongly recommended to use w>=21, in the example we force skip the check for readibility
for y in xrange(numColors):
temp = enc.encode(rnd.random()*maxVal)
colors.append(numpy.array(temp, dtype=realDType))
else:
for y in xrange(numColors):
sdr = numpy.zeros(n, dtype=realDType)
# Randomly setting w out of n bits to 1
sdr[rnd.sample(xrange(n), w)] = 1
colors.append(sdr)
# Training the sp
print 'Starting to train the sp on', numColors, 'patterns'
startTime = time.time()
for i in xrange(numColors):
# TODO: See https://github.com/numenta/nupic/issues/2072
spInput = colors[i]
onCells = numpy.zeros(columnDimensions)
spImpl.compute(spInput, True, onCells)
spOutput.append(onCells.tolist())
activeCoincIndices = set(onCells.nonzero()[0])
# Checking if any of the active cells have been previously active
reUsed = activeCoincIndices.intersection(patterns)
if len(reUsed) == 0:
# The set of all coincidences that have won at least once
coincs.append((i, activeCoincIndices, colors[i]))
else:
reUsedCoincs.append((i, activeCoincIndices, colors[i]))
# Adding the active cells to the set of coincs that have been active at
# least once
patterns.update(activeCoincIndices)
if (i + 1) % 100 == 0:
print 'Record number:', i + 1
print "Elapsed time: %.2f seconds" % (time.time() - startTime)
print len(reUsedCoincs), "re-used coinc(s),"
# Check if results match expectations
summ = []
for z in coincs:
summ.append(sum([len(z[1].intersection(y[1])) for y in reUsedCoincs]))
zeros = len([x for x in summ if x==0])
factor = max(summ)*len(summ)/sum(summ)
if len(reUsed) < 10:
self.assertLess(factor, 41,
"\nComputed factor: %d\nExpected Less than %d" % (
factor, 41))
self.assertLess(zeros, 0.99*len(summ),
"\nComputed zeros: %d\nExpected Less than %d" % (
zeros, 0.99*len(summ)))
else:
self.assertLess(factor, 8,
"\nComputed factor: %d\nExpected Less than %d" % (
factor, 8))
self.assertLess(zeros, 12,
"\nComputed zeros: %d\nExpected Less than %d" % (
zeros, 12))
def hammingDistance(s1, s2):
assert len(s1) == len(s2)
return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))
if __name__ == '__main__':
unittest.main()
| [
"659338505@qq.com"
] | 659338505@qq.com |
d56407a6b0db2c1f4b194d83c021e874ad9414d2 | 4e7db10524c938c8c6e687521def2889e20ec646 | /P16/1-7.py | 62f18ba79452ebca3058388918cdc5cc264b5f6c | [] | no_license | mpigrobot/python | e5cf60ca438e0d5e63a1e87a266a9e255bc07271 | bf9262657a7401f37de38318db768e630fab97a9 | refs/heads/master | 2020-03-15T15:37:03.228080 | 2018-03-31T07:33:07 | 2018-03-31T07:33:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | #!/usr/bin/env python
# coding:utf-8
import sys
sys.path.append("../")
import jieba
print ", ".join(jieba.cut("大连美容美发学校中君意是你值得信赖的选择"))
jieba.load_userdict( 'C:\Users\Administrator\Desktop\dir1.txt')
print ", ".join(jieba.cut("大连美容美发学校中君意是你值得信赖的选择"))
# jieba.suggest_freq('君意', True)
jieba.add_word("君意", freq = 20000, tag = None)
print ", ".join(jieba.cut("大连美容美发学校中君意是你值得信赖的选择")) | [
"noreply@github.com"
] | mpigrobot.noreply@github.com |
eda674ee22d94b233e6ae2676f25b8efc5cdcd5b | ca5e3595a9949abba08c642842166f82d768c153 | /modulation.py | f70357fbf07fdbf03eccb00b5de4dd5781fa470c | [] | no_license | MaksimKulya/PromaX | f36980300ec564a0c8d523f3e07ebf9ed2a9b262 | e723cf4390f9cdcc4bbda627dff90a2185322a54 | refs/heads/master | 2023-06-25T13:13:09.968726 | 2021-07-13T15:09:37 | 2021-07-13T15:09:37 | 385,638,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | import matplotlib.pyplot as plt
import math
import cmath
import pylab
from matplotlib import mlab
from PIL import Image
import numpy as np
import scipy.constants
import numba as nb
pi=math.pi
@nb.njit
def modulation(Nx,Ny,am,ph,nu,n,c,h):
AM = np.zeros(shape=(nu.shape[0], Nx, Ny))
PH = np.zeros(shape=(nu.shape[0], Nx, Ny))
G_object = np.zeros(shape=(nu.shape[0], Nx, Ny), dtype=nb.types.complex64)
for k in range(nu.shape[0]):
AM[k, :, :] = am
PH[k, :, :] = ph * (2*pi*nu[k]*(n-1)*h/c)
G_object[k, :, :] = AM[k, :, :] * np.exp(1j * PH[k, :, :])
return G_object
| [
"maxk2350@yandex.ru"
] | maxk2350@yandex.ru |
182d7ea8a9a297586f8be5758698b81f665b8e65 | 5c7da7dabdc076ad7113ccd20561a8bbf5f9a70e | /portfolios/migrations/0007_auto_20200215_1347.py | 9bae61e5b98ca60a0561156c6c192a93b6cb9cd2 | [] | no_license | aqcloudacio/cloudaciofeez | 2499fb5fc5334fa871daab2abea6c34bfa8c7667 | 8399560ece9aa10a6d6801f42c027dca26a65936 | refs/heads/master | 2023-02-27T22:36:20.501159 | 2021-02-11T00:03:46 | 2021-02-11T00:03:46 | 337,887,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | # Generated by Django 2.2.7 on 2020-02-15 02:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('portfolios', '0006_auto_20200215_1344'),
]
operations = [
migrations.AlterField(
model_name='portfolio',
name='platform',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='portfolios', to='platforms.Platform'),
),
migrations.AlterField(
model_name='portfolio',
name='platform_fee_group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='portfolios', to='platforms.PlatformFees'),
),
]
| [
"alejandro.quintero@clouxter.com"
] | alejandro.quintero@clouxter.com |
8ebe16558fbb73183b625591be5a7e29dd52634b | 393ccacef32461f5d7f4b21419a7c695df9c62a7 | /lpo/sfmail/postcodes/39.cgi | 1e6fe51c8664d405a7ca266965e9036946713c72 | [] | no_license | emoshu-yuta-okuma/nakagawa-dent-hp | ebc6c66efc624a256f0d7e30c2e26b9aae162cd7 | e83e8c7060881b7267f90ca3f2c599d614a219a1 | refs/heads/master | 2023-01-14T12:39:19.874341 | 2020-11-12T06:33:00 | 2020-11-12T06:33:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95,999 | cgi | 20202,399,3990000,ナガノケン,マツモトシ,イカニケイサイガナイバアイ,長野県,松本市,以下に掲載がない場合,0,0,0,0,0,0
20202,39974,3997402,ナガノケン,マツモトシ,アイダ,長野県,松本市,会田,0,0,0,0,0,0
20202,390,3900812,ナガノケン,マツモトシ,アガタ,長野県,松本市,県,0,0,1,0,0,0
20202,39974,3997415,ナガノケン,マツモトシ,アカヌタ,長野県,松本市,赤怒田,0,0,0,0,0,0
20202,390,3900802,ナガノケン,マツモトシ,アサヒ,長野県,松本市,旭,0,0,1,0,0,0
20202,39003,3900303,ナガノケン,マツモトシ,アサマオンセン,長野県,松本市,浅間温泉,0,0,0,0,0,0
20202,39017,3901702,ナガノケン,マツモトシ,アズサガワアズサ,長野県,松本市,梓川梓,0,0,0,0,0,0
20202,39017,3901703,ナガノケン,マツモトシ,アズサガワウエノ,長野県,松本市,梓川上野,0,0,0,0,0,0
20202,39017,3901701,ナガノケン,マツモトシ,アズサガワヤマト,長野県,松本市,梓川倭,0,0,0,0,0,0
20202,39015,3901504,ナガノケン,マツモトシ,アヅミ(イネコキ),長野県,松本市,安曇(稲核),1,0,0,0,0,0
20202,39015,3901501,ナガノケン,マツモトシ,アヅミ(オオノタ),長野県,松本市,安曇(大野田),1,0,0,0,0,0
20202,39015,3901516,ナガノケン,マツモトシ,アヅミ(カミコウチ),長野県,松本市,安曇(上高地),1,0,0,0,0,0
20202,39015,3901514,ナガノケン,マツモトシ,アヅミ(サワンド),長野県,松本市,安曇(沢渡),1,0,0,0,0,0
20202,39015,3901502,ナガノケン,マツモトシ,アヅミ(シマシマ),長野県,松本市,安曇(島々),1,0,0,0,0,0
20202,39015,3901515,ナガノケン,マツモトシ,アヅミ(シラホネ),長野県,松本市,安曇(白骨),1,0,0,0,0,0
20202,39015,3901513,ナガノケン,マツモトシ,アヅミ(スズラン),長野県,松本市,安曇(鈴蘭),1,0,0,0,0,0
20202,39015,3901511,ナガノケン,マツモトシ,アヅミ(センゴクダイラ),長野県,松本市,安曇(千石平),1,0,0,0,0,0
20202,39015,3901512,ナガノケン,マツモトシ,アヅミ(ナラノキ),長野県,松本市,安曇(楢ノ木),1,0,0,0,0,0
20202,39015,3901503,ナガノケン,マツモトシ,アヅミ(ハシバ),長野県,松本市,安曇(橋場),1,0,0,0,0,0
20202,39015,3901506,ナガノケン,マツモトシ,アヅミ(バンドコロ),長野県,松本市,安曇(番所),1,0,0,0,0,0
20202,39015,3901507,ナガノケン,マツモトシ,アヅミ(ミヤノハラ),長野県,松本市,安曇(宮の原),1,0,0,0,0,0
20202,39015,3901520,ナガノケン,マツモトシ,アヅミ(ソノタ),長野県,松本市,安曇(その他),1,0,0,0,0,0
20202,39974,3997403,ナガノケン,マツモトシ,アナザワ,長野県,松本市,穴沢,0,0,0,0,0,0
20202,390,3900861,ナガノケン,マツモトシ,アリガサキ,長野県,松本市,蟻ケ崎,0,0,0,0,0,0
20202,390,3900867,ナガノケン,マツモトシ,アリガサキダイ,長野県,松本市,蟻ケ崎台,0,0,0,0,0,0
20202,390,3900831,ナガノケン,マツモトシ,イガワジョウ,長野県,松本市,井川城,0,0,1,0,0,0
20202,390,3900845,ナガノケン,マツモトシ,イシシバ(1、2チョウメ),長野県,松本市,石芝(1、2丁目),1,0,1,0,0,0
20202,399,3990007,ナガノケン,マツモトシ,イシシバ(3、4チョウメ),長野県,松本市,石芝(3、4丁目),1,0,1,0,0,0
20202,39974,3997405,ナガノケン,マツモトシ,イタバ,長野県,松本市,板場,0,0,0,0,0,0
20202,399,3990004,ナガノケン,マツモトシ,イチバ,長野県,松本市,市場,0,0,0,0,0,0
20202,390,3900827,ナガノケン,マツモトシ,イデガワ,長野県,松本市,出川,0,0,0,0,0,0
20202,390,3900826,ナガノケン,マツモトシ,イデガワマチ,長野県,松本市,出川町,0,0,0,0,0,0
20202,39011,3901131,ナガノケン,マツモトシ,イマイ,長野県,松本市,今井,0,0,0,0,0,0
20202,39002,3900222,ナガノケン,マツモトシ,イリヤマベ,長野県,松本市,入山辺,0,0,0,0,0,0
20202,390,3900813,ナガノケン,マツモトシ,ウズハシ,長野県,松本市,埋橋,0,0,1,0,0,0
20202,399,3990023,ナガノケン,マツモトシ,ウチダ,長野県,松本市,内田,0,0,0,0,0,0
20202,390,3900874,ナガノケン,マツモトシ,オオテ,長野県,松本市,大手,0,0,1,0,0,0
20202,39003,3900304,ナガノケン,マツモトシ,オオムラ,長野県,松本市,大村,0,0,0,0,0,0
20202,39003,3900314,ナガノケン,マツモトシ,オカダイブカ,長野県,松本市,岡田伊深,0,0,0,0,0,0
20202,39003,3900313,ナガノケン,マツモトシ,オカダシモオカダ,長野県,松本市,岡田下岡田,0,0,0,0,0,0
20202,39003,3900315,ナガノケン,マツモトシ,オカダマチ,長野県,松本市,岡田町,0,0,0,0,0,0
20202,39003,3900312,ナガノケン,マツモトシ,オカダマツオカ,長野県,松本市,岡田松岡,0,0,0,0,0,0
20202,390,3900876,ナガノケン,マツモトシ,カイチ,長野県,松本市,開智,0,0,1,0,0,0
20202,39974,3997413,ナガノケン,マツモトシ,カナヤママチ,長野県,松本市,金山町,0,0,0,0,0,0
20202,390,3900837,ナガノケン,マツモトシ,カマダ,長野県,松本市,鎌田,0,0,1,0,0,0
20202,39974,3997417,ナガノケン,マツモトシ,カリヤハラマチ,長野県,松本市,刈谷原町,0,0,0,0,0,0
20202,390,3900822,ナガノケン,マツモトシ,カンダ,長野県,松本市,神田,0,0,0,0,0,0
20202,39012,3901243,ナガノケン,マツモトシ,カンバヤシ,長野県,松本市,神林,0,0,0,0,0,0
20202,390,3900872,ナガノケン,マツモトシ,キタフカシ,長野県,松本市,北深志,0,0,1,0,0,0
20202,390,3900871,ナガノケン,マツモトシ,キリ,長野県,松本市,桐,0,0,1,0,0,0
20202,39011,3901132,ナガノケン,マツモトシ,クウコウヒガシ,長野県,松本市,空港東,0,0,0,0,0,0
20202,39974,3997401,ナガノケン,マツモトシ,ゴジョウ,長野県,松本市,五常,0,0,0,0,0,0
20202,399,3990024,ナガノケン,マツモトシ,コトブキコアカ,長野県,松本市,寿小赤,0,0,0,0,0,0
20202,399,3990012,ナガノケン,マツモトシ,コトブキシラセブチ,長野県,松本市,寿白瀬渕,0,0,0,0,0,0
20202,399,3990025,ナガノケン,マツモトシ,コトブキダイ,長野県,松本市,寿台,0,0,1,0,0,0
20202,399,3990021,ナガノケン,マツモトシ,コトブキトヨオカ,長野県,松本市,寿豊丘,0,0,0,0,0,0
20202,399,3990011,ナガノケン,マツモトシ,コトブキキタ,長野県,松本市,寿北,0,0,1,0,0,0
20202,399,3990026,ナガノケン,マツモトシ,コトブキナカ,長野県,松本市,寿中,0,0,0,0,0,0
20202,399,3990027,ナガノケン,マツモトシ,コトブキミナミ,長野県,松本市,寿南,0,0,1,0,0,0
20202,399,3990033,ナガノケン,マツモトシ,ササガ,長野県,松本市,笹賀,0,0,0,0,0,0
20202,390,3900847,ナガノケン,マツモトシ,ササベ,長野県,松本市,笹部,0,0,0,0,0,0
20202,39002,3900221,ナガノケン,マツモトシ,サトヤマベ,長野県,松本市,里山辺,0,0,0,0,0,0
20202,390,3900877,ナガノケン,マツモトシ,サワムラ,長野県,松本市,沢村,0,0,1,0,0,0
20202,39003,3900301,ナガノケン,マツモトシ,シナグラ,長野県,松本市,稲倉,0,0,0,0,0,0
20202,39982,3998251,ナガノケン,マツモトシ,シマウチ(9820、9821、9823-9830、9864バンチイジョウ),長野県,松本市,島内(9820、9821、9823〜9830、9864番地以上),1,0,0,0,0,0
20202,390,3900851,ナガノケン,マツモトシ,シマウチ(ソノタ),長野県,松本市,島内(その他),1,0,0,0,0,0
20202,390,3900852,ナガノケン,マツモトシ,シマダチ,長野県,松本市,島立,0,0,0,0,0,0
20202,390,3900805,ナガノケン,マツモトシ,シミズ,長野県,松本市,清水,0,0,1,0,0,0
20202,390,3900875,ナガノケン,マツモトシ,ジョウセイ,長野県,松本市,城西,0,0,1,0,0,0
20202,390,3900807,ナガノケン,マツモトシ,ジョウトウ,長野県,松本市,城東,0,0,1,0,0,0
20202,390,3900828,ナガノケン,マツモトシ,ショウナイ,長野県,松本市,庄内,0,0,1,0,0,0
20202,390,3900866,ナガノケン,マツモトシ,ジョウヤマ,長野県,松本市,城山,0,0,0,0,0,0
20202,390,3900863,ナガノケン,マツモトシ,シライタ,長野県,松本市,白板,0,0,1,0,0,0
20202,390,3900865,ナガノケン,マツモトシ,シンバシ,長野県,松本市,新橋,0,0,0,0,0,0
20202,39003,3900305,ナガノケン,マツモトシ,ソウザ,長野県,松本市,惣社,0,0,0,0,0,0
20202,390,3900842,ナガノケン,マツモトシ,ソヤノ,長野県,松本市,征矢野,0,0,1,0,0,0
20202,39974,3997418,ナガノケン,マツモトシ,ソリマチ,長野県,松本市,反町,0,0,0,0,0,0
20202,390,3900834,ナガノケン,マツモトシ,タカミヤナカ,長野県,松本市,高宮中,0,0,0,0,0,0
20202,390,3900835,ナガノケン,マツモトシ,タカミヤヒガシ,長野県,松本市,高宮東,0,0,0,0,0,0
20202,390,3900844,ナガノケン,マツモトシ,タカミヤニシ,長野県,松本市,高宮西,0,0,0,0,0,0
20202,390,3900843,ナガノケン,マツモトシ,タカミヤミナミ,長野県,松本市,高宮南,0,0,0,0,0,0
20202,390,3900836,ナガノケン,マツモトシ,タカミヤキタ,長野県,松本市,高宮北,0,0,0,0,0,0
20202,390,3900811,ナガノケン,マツモトシ,チュウオウ,長野県,松本市,中央,0,0,1,0,0,0
20202,390,3900821,ナガノケン,マツモトシ,ツカマ,長野県,松本市,筑摩,0,0,0,0,0,0
20202,39974,3997414,ナガノケン,マツモトシ,トノノイリ,長野県,松本市,殿野入,0,0,0,0,0,0
20202,39974,3997404,ナガノケン,マツモトシ,トリイデ,長野県,松本市,取出,0,0,0,0,0,0
20202,39974,3997411,ナガノケン,マツモトシ,ナカガワ,長野県,松本市,中川,0,0,0,0,0,0
20202,390,3900816,ナガノケン,マツモトシ,ナカジョウ,長野県,松本市,中条,0,0,0,0,0,0
20202,390,3900823,ナガノケン,マツモトシ,ナカヤマ,長野県,松本市,中山,0,0,0,0,0,0
20202,390,3900824,ナガノケン,マツモトシ,ナカヤマダイ,長野県,松本市,中山台,0,0,0,0,0,0
20202,39016,3901611,ナガノケン,マツモトシ,ナガワ,長野県,松本市,奈川,0,0,0,0,0,0
20202,390,3900841,ナガノケン,マツモトシ,ナギサ,長野県,松本市,渚,0,0,0,0,0,0
20202,39974,3997416,ナガノケン,マツモトシ,ナナアラシ,長野県,松本市,七嵐,0,0,0,0,0,0
20202,390,3900825,ナガノケン,マツモトシ,ナミヤナギ,長野県,松本市,並柳,0,0,0,0,0,0
20202,39012,3901241,ナガノケン,マツモトシ,ニイムラ,長野県,松本市,新村,0,0,0,0,0,0
20202,399,3990006,ナガノケン,マツモトシ,ノミゾニシ,長野県,松本市,野溝西,0,0,1,0,0,0
20202,399,3990034,ナガノケン,マツモトシ,ノミゾヒガシ,長野県,松本市,野溝東,0,0,1,0,0,0
20202,399,3990005,ナガノケン,マツモトシ,ノミゾモッコウ,長野県,松本市,野溝木工,0,0,1,0,0,0
20202,39014,3901401,ナガノケン,マツモトシ,ハタ,長野県,松本市,波田,0,0,0,0,0,0
20202,390,3900817,ナガノケン,マツモトシ,ハバウエ,長野県,松本市,巾上,0,0,0,0,0,0
20202,39003,3900316,ナガノケン,マツモトシ,ハラ,長野県,松本市,原,0,0,0,0,0,0
20202,399,3990014,ナガノケン,マツモトシ,ヒラタヒガシ,長野県,松本市,平田東,0,0,1,0,0,0
20202,399,3990015,ナガノケン,マツモトシ,ヒラタニシ,長野県,松本市,平田西,0,0,1,0,0,0
20202,390,3900815,ナガノケン,マツモトシ,フカシ,長野県,松本市,深志,0,0,1,0,0,0
20202,390,3900833,ナガノケン,マツモトシ,フタバ,長野県,松本市,双葉,0,0,0,0,0,0
20202,39974,3997412,ナガノケン,マツモトシ,ホフクジマチ,長野県,松本市,保福寺町,0,0,0,0,0,0
20202,39003,3900317,ナガノケン,マツモトシ,ホラ,長野県,松本市,洞,0,0,0,0,0,0
20202,390,3900814,ナガノケン,マツモトシ,ホンジョウ,長野県,松本市,本庄,0,0,1,0,0,0
20202,399,3990022,ナガノケン,マツモトシ,マツバラ,長野県,松本市,松原,0,0,0,0,0,0
20202,390,3900873,ナガノケン,マツモトシ,マルノウチ,長野県,松本市,丸の内,0,0,0,0,0,0
20202,39003,3900302,ナガノケン,マツモトシ,ミサヤマ,長野県,松本市,三才山,0,0,0,0,0,0
20202,390,3900801,ナガノケン,マツモトシ,ミスズ,長野県,松本市,美須々,0,0,0,0,0,0
20202,39003,3900311,ナガノケン,マツモトシ,ミズクマ,長野県,松本市,水汲,0,0,0,0,0,0
20202,39003,3900306,ナガノケン,マツモトシ,ミナミアサマ,長野県,松本市,南浅間,0,0,0,0,0,0
20202,390,3900846,ナガノケン,マツモトシ,ミナミハラ,長野県,松本市,南原,0,0,1,0,0,0
20202,390,3900832,ナガノケン,マツモトシ,ミナミマツモト,長野県,松本市,南松本,0,0,1,0,0,0
20202,399,3990001,ナガノケン,マツモトシ,ミヤタ,長野県,松本市,宮田,0,0,0,0,0,0
20202,390,3900862,ナガノケン,マツモトシ,ミヤブチ,長野県,松本市,宮渕,0,0,0,0,0,0
20202,390,3900864,ナガノケン,マツモトシ,ミヤブチホンムラ,長野県,松本市,宮渕本村,0,0,0,0,0,0
20202,399,3990035,ナガノケン,マツモトシ,ムライマチキタ,長野県,松本市,村井町北,0,0,1,0,0,0
20202,390,3900806,ナガノケン,マツモトシ,メトバ,長野県,松本市,女鳥羽,0,0,1,0,0,0
20202,390,3900803,ナガノケン,マツモトシ,モトマチ,長野県,松本市,元町,0,0,1,0,0,0
20202,390,3900804,ナガノケン,マツモトシ,ヨコタ,長野県,松本市,横田,0,0,0,0,0,0
20202,399,3990031,ナガノケン,マツモトシ,ヨシカワコヤ,長野県,松本市,芳川小屋,0,0,0,0,0,0
20202,399,3990013,ナガノケン,マツモトシ,ヨシカワヒラタ,長野県,松本市,芳川平田,0,0,0,0,0,0
20202,399,3990032,ナガノケン,マツモトシ,ヨシカワムライマチ,長野県,松本市,芳川村井町,0,0,0,0,0,0
20202,399,3990002,ナガノケン,マツモトシ,ヨシノ,長野県,松本市,芳野,0,0,0,0,0,0
20202,390,3900848,ナガノケン,マツモトシ,リョウシマ,長野県,松本市,両島,0,0,0,0,0,0
20202,39012,3901242,ナガノケン,マツモトシ,ワダ,長野県,松本市,和田,0,0,0,0,0,0
20204,394,3940000,ナガノケン,オカヤシ,イカニケイサイガナイバアイ,長野県,岡谷市,以下に掲載がない場合,0,0,0,0,0,0
20204,394,3940091,ナガノケン,オカヤシ,オカヤシノツギニバンチガクルバアイ,長野県,岡谷市,岡谷市の次に番地がくる場合,0,0,0,0,0,0
20204,394,3940002,ナガノケン,オカヤシ,アカハネ,長野県,岡谷市,赤羽,0,0,1,0,0,0
20204,394,3940053,ナガノケン,オカヤシ,アシノサワ,長野県,岡谷市,芦ノ沢,0,0,0,1,0,0
20204,394,3940001,ナガノケン,オカヤシ,イマイ,長野県,岡谷市,今井,0,0,0,0,0,0
20204,394,3940055,ナガノケン,オカヤシ,ウチヤマ,長野県,岡谷市,内山,0,0,0,0,0,0
20204,394,3940051,ナガノケン,オカヤシ,エンレイ,長野県,岡谷市,塩嶺,0,0,0,0,0,0
20204,394,3940043,ナガノケン,オカヤシ,オクラチョウ,長野県,岡谷市,御倉町,0,0,0,0,0,0
20204,394,3940011,ナガノケン,オカヤシ,オサチ,長野県,岡谷市,長地,0,0,1,0,0,0
20204,394,3940089,ナガノケン,オカヤシ,オサチイズハヤ,長野県,岡谷市,長地出早,0,0,1,0,0,0
20204,394,3940084,ナガノケン,オカヤシ,オサチカタマチョウ,長野県,岡谷市,長地片間町,0,0,1,0,0,0
20204,394,3940082,ナガノケン,オカヤシ,オサチゴショ,長野県,岡谷市,長地御所,0,0,1,0,0,0
20204,394,3940085,ナガノケン,オカヤシ,オサチコハギ,長野県,岡谷市,長地小萩,0,0,1,0,0,0
20204,394,3940081,ナガノケン,オカヤシ,オサチゴンゲンチョウ,長野県,岡谷市,長地権現町,0,0,1,0,0,0
20204,394,3940087,ナガノケン,オカヤシ,オサチシズメ,長野県,岡谷市,長地鎮,0,0,1,0,0,0
20204,394,3940083,ナガノケン,オカヤシ,オサチシバミヤ,長野県,岡谷市,長地柴宮,0,0,1,0,0,0
20204,394,3940088,ナガノケン,オカヤシ,オサチナシクボ,長野県,岡谷市,長地梨久保,0,0,1,0,0,0
20204,394,3940086,ナガノケン,オカヤシ,オサチミナモト,長野県,岡谷市,長地源,0,0,1,0,0,0
20204,394,3940003,ナガノケン,オカヤシ,カモチョウ,長野県,岡谷市,加茂町,0,0,1,0,0,0
20204,394,3940048,ナガノケン,オカヤシ,カワギシカミ,長野県,岡谷市,川岸上,0,0,1,0,0,0
20204,394,3940047,ナガノケン,オカヤシ,カワギシナカ,長野県,岡谷市,川岸中,0,0,1,0,0,0
20204,394,3940045,ナガノケン,オカヤシ,カワギシヒガシ,長野県,岡谷市,川岸東,0,0,1,0,0,0
20204,394,3940046,ナガノケン,オカヤシ,カワギシニシ,長野県,岡谷市,川岸西,0,0,1,0,0,0
20204,394,3940022,ナガノケン,オカヤシ,ギンザ,長野県,岡谷市,銀座,0,0,1,0,0,0
20204,394,3940021,ナガノケン,オカヤシ,ゴウダ,長野県,岡谷市,郷田,0,0,1,0,0,0
20204,394,3940034,ナガノケン,オカヤシ,コハン,長野県,岡谷市,湖畔,0,0,1,0,0,0
20204,394,3940029,ナガノケン,オカヤシ,サイワイチョウ,長野県,岡谷市,幸町,0,0,0,0,0,0
20204,394,3940004,ナガノケン,オカヤシ,シンメイチョウ,長野県,岡谷市,神明町,0,0,1,0,0,0
20204,394,3940025,ナガノケン,オカヤシ,ダイエイチョウ,長野県,岡谷市,大栄町,0,0,1,0,0,0
20204,394,3940031,ナガノケン,オカヤシ,タナカチョウ,長野県,岡谷市,田中町,0,0,1,0,0,0
20204,394,3940027,ナガノケン,オカヤシ,チュウオウチョウ,長野県,岡谷市,中央町,0,0,1,0,0,0
20204,394,3940026,ナガノケン,オカヤシ,ツカマチョウ,長野県,岡谷市,塚間町,0,0,1,0,0,0
20204,394,3940035,ナガノケン,オカヤシ,テンリュウチョウ,長野県,岡谷市,天竜町,0,0,1,0,0,0
20204,394,3940042,ナガノケン,オカヤシ,ナルタチョウ,長野県,岡谷市,成田町,0,0,1,0,0,0
20204,394,3940033,ナガノケン,オカヤシ,ナングウ,長野県,岡谷市,南宮,0,0,1,0,0,0
20204,394,3940052,ナガノケン,オカヤシ,ニシバヤシ,長野県,岡谷市,西林,0,0,0,0,0,0
20204,394,3940054,ナガノケン,オカヤシ,ハンノキ,長野県,岡谷市,半ノ木,0,0,0,0,0,0
20204,394,3940023,ナガノケン,オカヤシ,ヒガシギンザ,長野県,岡谷市,東銀座,0,0,1,0,0,0
20204,394,3940056,ナガノケン,オカヤシ,ヒザワ,長野県,岡谷市,樋沢,0,0,0,0,0,0
20204,394,3940024,ナガノケン,オカヤシ,ホリノウチ,長野県,岡谷市,堀ノ内,0,0,1,0,0,0
20204,394,3940028,ナガノケン,オカヤシ,ホンチョウ,長野県,岡谷市,本町,0,0,1,0,0,0
20204,394,3940044,ナガノケン,オカヤシ,ミナト,長野県,岡谷市,湊,0,0,1,0,0,0
20204,394,3940005,ナガノケン,オカヤシ,ヤマシタチョウ,長野県,岡谷市,山下町,0,0,1,0,0,0
20204,394,3940041,ナガノケン,オカヤシ,ヤマテチョウ,長野県,岡谷市,山手町,0,0,1,0,0,0
20204,394,3940053,ナガノケン,オカヤシ,ヨギトギサワ,長野県,岡谷市,ヨギトギ沢,0,0,0,1,0,0
20204,394,3940032,ナガノケン,オカヤシ,ワカミヤ,長野県,岡谷市,若宮,0,0,1,0,0,0
20205,395,3950000,ナガノケン,イイダシ,イカニケイサイガナイバアイ,長野県,飯田市,以下に掲載がない場合,0,0,0,0,0,0
20205,395,3950072,ナガノケン,イイダシ,アケボノチョウ,長野県,飯田市,曙町,0,0,0,0,0,0
20205,395,3950055,ナガノケン,イイダシ,アサヒマチ,長野県,飯田市,旭町,0,0,0,0,0,0
20205,395,3950085,ナガノケン,イイダシ,アズマチョウ,長野県,飯田市,吾妻町,0,0,0,0,0,0
20205,395,3950036,ナガノケン,イイダシ,アタゴチョウ,長野県,飯田市,愛宕町,0,0,0,0,0,0
20205,39501,3950152,ナガノケン,イイダシ,イクラチョウ,長野県,飯田市,育良町,0,0,1,0,0,0
20205,39924,3992434,ナガノケン,イイダシ,イズキ,長野県,飯田市,伊豆木,0,0,0,0,0,0
20205,395,3950071,ナガノケン,イイダシ,イマミヤチョウ,長野県,飯田市,今宮町,0,0,1,0,0,0
20205,395,3950023,ナガノケン,イイダシ,エドハマチョウ,長野県,飯田市,江戸浜町,0,0,0,0,0,0
20205,395,3950015,ナガノケン,イイダシ,エドマチ,長野県,飯田市,江戸町,0,0,1,0,0,0
20205,395,3950046,ナガノケン,イイダシ,オウギマチ,長野県,飯田市,扇町,0,0,0,0,0,0
20205,395,3950034,ナガノケン,イイダシ,オウテマチ,長野県,飯田市,追手町,0,0,1,0,0,0
20205,395,3950053,ナガノケン,イイダシ,オオクボチョウ,長野県,飯田市,大久保町,0,0,0,0,0,0
20205,39501,3950157,ナガノケン,イイダシ,オオセギ,長野県,飯田市,大瀬木,0,0,0,0,0,0
20205,395,3950056,ナガノケン,イイダシ,オオドオリ,長野県,飯田市,大通,0,0,1,0,0,0
20205,395,3950068,ナガノケン,イイダシ,オオヤスミ,長野県,飯田市,大休,0,0,0,0,0,0
20205,395,3950028,ナガノケン,イイダシ,オサヒメチョウ,長野県,飯田市,長姫町,0,0,0,0,0,0
20205,395,3950805,ナガノケン,イイダシ,カナエイッシキ,長野県,飯田市,鼎一色,0,0,0,0,0,0
20205,395,3950808,ナガノケン,イイダシ,カナエカミチャヤ,長野県,飯田市,鼎上茶屋,0,0,0,0,0,0
20205,395,3950802,ナガノケン,イイダシ,カナエシモチャヤ,長野県,飯田市,鼎下茶屋,0,0,0,0,0,0
20205,395,3950806,ナガノケン,イイダシ,カナエカミヤマ,長野県,飯田市,鼎上山,0,0,0,0,0,0
20205,395,3950803,ナガノケン,イイダシ,カナエシモヤマ,長野県,飯田市,鼎下山,0,0,0,0,0,0
20205,395,3950807,ナガノケン,イイダシ,カナエキリイシ,長野県,飯田市,鼎切石,0,0,0,0,0,0
20205,395,3950801,ナガノケン,イイダシ,カナエナカダイラ,長野県,飯田市,鼎中平,0,0,0,0,0,0
20205,395,3950804,ナガノケン,イイダシ,カナエナゴクマ,長野県,飯田市,鼎名古熊,0,0,0,0,0,0
20205,395,3950817,ナガノケン,イイダシ,カナエヒガシカナエ,長野県,飯田市,鼎東鼎,0,0,0,0,0,0
20205,395,3950026,ナガノケン,イイダシ,カナエニシカナエ,長野県,飯田市,鼎西鼎,0,0,0,0,0,0
20205,39925,3992564,ナガノケン,イイダシ,カミカワジ,長野県,飯田市,上川路,0,0,0,0,0,0
20205,395,3950002,ナガノケン,イイダシ,カミサトイイヌマ,長野県,飯田市,上郷飯沼,0,0,0,0,0,0
20205,395,3950003,ナガノケン,イイダシ,カミサトベップ,長野県,飯田市,上郷別府,0,0,0,0,0,0
20205,395,3950004,ナガノケン,イイダシ,カミサトクロダ,長野県,飯田市,上郷黒田,0,0,0,0,0,0
20205,39501,3950153,ナガノケン,イイダシ,カミトノオカ,長野県,飯田市,上殿岡,0,0,0,0,0,0
20205,39926,3992611,ナガノケン,イイダシ,カミヒサカタ,長野県,飯田市,上久堅,0,0,0,0,0,0
20205,39926,3992612,ナガノケン,イイダシ,カミヒサカタボ,長野県,飯田市,上久堅戊,0,0,0,0,0,0
20205,39914,3991403,ナガノケン,イイダシ,カミムラ,長野県,飯田市,上村,0,0,0,0,0,0
20205,39924,3992431,ナガノケン,イイダシ,カワジ,長野県,飯田市,川路,0,0,0,0,0,0
20205,39501,3950151,ナガノケン,イイダシ,キタガタ,長野県,飯田市,北方,0,0,0,0,0,0
20205,39925,3992565,ナガノケン,イイダシ,キリバヤシ,長野県,飯田市,桐林,0,0,0,0,0,0
20205,395,3950031,ナガノケン,イイダシ,ギンザ,長野県,飯田市,銀座,0,0,1,0,0,0
20205,39502,3950241,ナガノケン,イイダシ,クメ,長野県,飯田市,久米,0,0,0,0,0,0
20205,395,3950813,ナガノケン,イイダシ,ケガ,長野県,飯田市,毛賀,0,0,0,0,0,0
20205,395,3950013,ナガノケン,イイダシ,コデンマチョウ,長野県,飯田市,小伝馬町,0,0,1,0,0,0
20205,395,3950014,ナガノケン,イイダシ,サクラマチ,長野県,飯田市,桜町,0,0,1,0,0,0
20205,395,3950001,ナガノケン,イイダシ,ザコウジ,長野県,飯田市,座光寺,0,0,0,0,0,0
20205,39925,3992566,ナガノケン,イイダシ,シマ,長野県,飯田市,嶋,0,0,0,0,0,0
20205,39924,3992432,ナガノケン,イイダシ,シモゼ,長野県,飯田市,下瀬,0,0,0,0,0,0
20205,39501,3950154,ナガノケン,イイダシ,シモトノオカ,長野県,飯田市,下殿岡,0,0,0,0,0,0
20205,39926,3992606,ナガノケン,イイダシ,シモヒサカタイナバ,長野県,飯田市,下久堅稲葉,0,0,0,0,0,0
20205,39926,3992604,ナガノケン,イイダシ,シモヒサカタカキノサワ,長野県,飯田市,下久堅柿野沢,0,0,0,0,0,0
20205,39926,3992605,ナガノケン,イイダシ,シモヒサカタコバヤシ,長野県,飯田市,下久堅小林,0,0,0,0,0,0
20205,39926,3992602,ナガノケン,イイダシ,シモヒサカタシモトライワ,長野県,飯田市,下久堅下虎岩,0,0,0,0,0,0
20205,39926,3992603,ナガノケン,イイダシ,シモヒサカタチクダイラ,長野県,飯田市,下久堅知久平,0,0,0,0,0,0
20205,39926,3992607,ナガノケン,イイダシ,シモヒサカタミナバラ,長野県,飯田市,下久堅南原,0,0,0,0,0,0
20205,395,3950061,ナガノケン,イイダシ,ショウエイチョウ,長野県,飯田市,正永町,0,0,1,0,0,0
20205,395,3950084,ナガノケン,イイダシ,スズカチョウ,長野県,飯田市,鈴加町,0,0,1,0,0,0
20205,395,3950062,ナガノケン,イイダシ,スナハライチョウ,長野県,飯田市,砂払町,0,0,1,0,0,0
20205,395,3950018,ナガノケン,イイダシ,スワチョウ,長野県,飯田市,諏訪町,0,0,0,0,0,0
20205,395,3950012,ナガノケン,イイダシ,ダイオウジ,長野県,飯田市,大王路,0,0,1,0,0,0
20205,395,3950011,ナガノケン,イイダシ,ダイモンチョウ,長野県,飯田市,大門町,0,0,0,0,0,0
20205,395,3950051,ナガノケン,イイダシ,タカハチョウ,長野県,飯田市,高羽町,0,0,1,0,0,0
20205,395,3950048,ナガノケン,イイダシ,タキノサワ,長野県,飯田市,滝の沢,0,0,0,0,0,0
20205,39502,3950242,ナガノケン,イイダシ,タケサ,長野県,飯田市,竹佐,0,0,0,0,0,0
20205,39925,3992561,ナガノケン,イイダシ,ダシナ,長野県,飯田市,駄科,0,0,0,0,0,0
20205,39922,3992221,ナガノケン,イイダシ,タツエ,長野県,飯田市,龍江,0,0,0,0,0,0
20205,39924,3992433,ナガノケン,イイダシ,タテイシ,長野県,飯田市,立石,0,0,0,0,0,0
20205,395,3950032,ナガノケン,イイダシ,チカラマチ,長野県,飯田市,主税町,0,0,0,0,0,0
20205,395,3950045,ナガノケン,イイダシ,チクマチ,長野県,飯田市,知久町,0,0,1,0,0,0
20205,39922,3992223,ナガノケン,イイダシ,チハエ,長野県,飯田市,千栄,0,0,0,0,0,0
20205,395,3950041,ナガノケン,イイダシ,チュウオウドオリ,長野県,飯田市,中央通り,0,0,1,0,0,0
20205,39922,3992222,ナガノケン,イイダシ,チヨ,長野県,飯田市,千代,0,0,0,0,0,0
20205,395,3950016,ナガノケン,イイダシ,テンマチョウ,長野県,飯田市,伝馬町,0,0,1,0,0,0
20205,395,3950024,ナガノケン,イイダシ,トウエイチョウ,長野県,飯田市,東栄町,0,0,0,0,0,0
20205,395,3950017,ナガノケン,イイダシ,トウシンチョウ,長野県,飯田市,東新町,0,0,1,0,0,0
20205,395,3950086,ナガノケン,イイダシ,トウワチョウ,長野県,飯田市,東和町,0,0,1,0,0,0
20205,395,3950043,ナガノケン,イイダシ,トオリマチ,長野県,飯田市,通り町,0,0,1,0,0,0
20205,39925,3992563,ナガノケン,イイダシ,トキマタ,長野県,飯田市,時又,0,0,0,0,0,0
20205,395,3950033,ナガノケン,イイダシ,トキワマチ,長野県,飯田市,常盤町,0,0,0,0,0,0
20205,39926,3992601,ナガノケン,イイダシ,トライワ,長野県,飯田市,虎岩,0,0,0,0,0,0
20205,395,3950021,ナガノケン,イイダシ,ナカノチョウ,長野県,飯田市,仲ノ町,0,0,1,0,0,0
20205,39501,3950156,ナガノケン,イイダシ,ナカムラ,長野県,飯田市,中村,0,0,0,0,0,0
20205,39925,3992562,ナガノケン,イイダシ,ナガノハラ,長野県,飯田市,長野原,0,0,0,0,0,0
20205,395,3950083,ナガノケン,イイダシ,ニシキチョウ,長野県,飯田市,錦町,0,0,1,0,0,0
20205,395,3950029,ナガノケン,イイダシ,ニホンマツ,長野県,飯田市,二本松,0,0,1,0,0,0
20205,395,3950076,ナガノケン,イイダシ,ハクサンチョウ,長野県,飯田市,白山町,0,0,1,0,0,0
20205,395,3950075,ナガノケン,イイダシ,ハクサンドオリ,長野県,飯田市,白山通り,0,0,1,0,0,0
20205,39502,3950243,ナガノケン,イイダシ,ハコガワ,長野県,飯田市,箱川,0,0,0,0,0,0
20205,395,3950066,ナガノケン,イイダシ,ハバアカサカ,長野県,飯田市,羽場赤坂,0,0,0,0,0,0
20205,395,3950065,ナガノケン,イイダシ,ハバカミガワラ,長野県,飯田市,羽場上河原,0,0,0,0,0,0
20205,395,3950067,ナガノケン,イイダシ,ハバゴンゲン,長野県,飯田市,羽場権現,0,0,0,0,0,0
20205,395,3950073,ナガノケン,イイダシ,ハバザカチョウ,長野県,飯田市,羽場坂町,0,0,0,0,0,0
20205,395,3950063,ナガノケン,イイダシ,ハバチョウ,長野県,飯田市,羽場町,0,0,1,0,0,0
20205,395,3950064,ナガノケン,イイダシ,ハバナカハタ,長野県,飯田市,羽場仲畑,0,0,0,0,0,0
20205,395,3950027,ナガノケン,イイダシ,ババンチョウ,長野県,飯田市,馬場町,0,0,1,0,0,0
20205,395,3950022,ナガノケン,イイダシ,ハマイチョウ,長野県,飯田市,浜井町,0,0,0,0,0,0
20205,395,3950025,ナガノケン,イイダシ,ヒガシチュウオウドオリ,長野県,飯田市,東中央通り,0,0,0,0,0,0
20205,395,3950044,ナガノケン,イイダシ,ホンマチ,長野県,飯田市,本町,0,0,1,0,0,0
20205,395,3950811,ナガノケン,イイダシ,マツオアゲミゾ,長野県,飯田市,松尾上溝,0,0,0,0,0,0
20205,395,3950821,ナガノケン,イイダシ,マツオアライ,長野県,飯田市,松尾新井,0,0,0,0,0,0
20205,395,3950824,ナガノケン,イイダシ,マツオシミズ,長野県,飯田市,松尾清水,0,0,0,0,0,0
20205,395,3950812,ナガノケン,イイダシ,マツオシロダ,長野県,飯田市,松尾代田,0,0,0,0,0,0
20205,395,3950825,ナガノケン,イイダシ,マツオジョウ,長野県,飯田市,松尾城,0,0,0,0,0,0
20205,395,3950822,ナガノケン,イイダシ,マツオテラドコ,長野県,飯田市,松尾寺所,0,0,0,0,0,0
20205,395,3950815,ナガノケン,イイダシ,マツオトキワダイ,長野県,飯田市,松尾常盤台,0,0,0,0,0,0
20205,395,3950816,ナガノケン,イイダシ,マツオヒサイ,長野県,飯田市,松尾久井,0,0,0,0,0,0
20205,395,3950042,ナガノケン,イイダシ,マツオマチ,長野県,飯田市,松尾町,0,0,1,0,0,0
20205,395,3950826,ナガノケン,イイダシ,マツオミサジロ,長野県,飯田市,松尾水城,0,0,0,0,0,0
20205,395,3950823,ナガノケン,イイダシ,マツオミョウ,長野県,飯田市,松尾明,0,0,0,0,0,0
20205,395,3950074,ナガノケン,イイダシ,マツカワチョウ,長野県,飯田市,松川町,0,0,0,0,0,0
20205,395,3950077,ナガノケン,イイダシ,マルヤマチョウ,長野県,飯田市,丸山町,0,0,1,0,0,0
20205,395,3950035,ナガノケン,イイダシ,ミズノテチョウ,長野県,飯田市,水の手町,0,0,0,0,0,0
20205,39501,3950155,ナガノケン,イイダシ,ミッカイチバ,長野県,飯田市,三日市場,0,0,0,0,0,0
20205,39914,3991401,ナガノケン,イイダシ,ミナミシナノキザワ,長野県,飯田市,南信濃木沢,0,0,0,0,0,0
20205,39912,3991221,ナガノケン,イイダシ,ミナミシナノミナミワダ(マンゴ),長野県,飯田市,南信濃南和田(万古),1,0,0,0,0,0
20205,39913,3991313,ナガノケン,イイダシ,ミナミシナノミナミワダ(ソノタ),長野県,飯田市,南信濃南和田(その他),1,0,0,0,0,0
20205,39913,3991312,ナガノケン,イイダシ,ミナミシナノヤエゴウチ,長野県,飯田市,南信濃八重河内,0,0,0,0,0,0
20205,39913,3991311,ナガノケン,イイダシ,ミナミシナノワダ,長野県,飯田市,南信濃和田,0,0,0,0,0,0
20205,395,3950037,ナガノケン,イイダシ,ミナミトキワマチ,長野県,飯田市,南常盤町,0,0,0,0,0,0
20205,395,3950054,ナガノケン,イイダシ,ミノゼチョウ,長野県,飯田市,箕瀬町,0,0,1,0,0,0
20205,395,3950081,ナガノケン,イイダシ,ミヤノウエ,長野県,飯田市,宮ノ上,0,0,0,0,0,0
20205,395,3950082,ナガノケン,イイダシ,ミヤノマエ,長野県,飯田市,宮の前,0,0,0,0,0,0
20205,395,3950052,ナガノケン,イイダシ,モトマチ,長野県,飯田市,元町,0,0,0,0,0,0
20205,39502,3950244,ナガノケン,イイダシ,ヤマモト,長野県,飯田市,山本,0,0,0,0,0,0
20205,39502,3950245,ナガノケン,イイダシ,ヤマモトフタツヤマダンチ,長野県,飯田市,山本二ツ山団地,0,0,0,0,0,0
20205,395,3950814,ナガノケン,イイダシ,ヤワタマチ,長野県,飯田市,八幡町,0,0,0,0,0,0
20206,392,3920000,ナガノケン,スワシ,イカニケイサイガナイバアイ,長野県,諏訪市,以下に掲載がない場合,0,0,0,0,0,0
20206,392,3920011,ナガノケン,スワシ,アカハネ,長野県,諏訪市,赤羽根,0,0,0,0,0,0
20206,392,3920026,ナガノケン,スワシ,オオテ,長野県,諏訪市,大手,0,0,1,0,0,0
20206,392,3920005,ナガノケン,スワシ,オカムラ,長野県,諏訪市,岡村,0,0,1,0,0,0
20206,392,3920013,ナガノケン,スワシ,オキタマチ,長野県,諏訪市,沖田町,0,0,1,0,0,0
20206,392,3920001,ナガノケン,スワシ,オワ,長野県,諏訪市,大和,0,0,1,0,0,0
20206,392,3920021,ナガノケン,スワシ,カミガワ,長野県,諏訪市,上川,0,0,1,0,0,0
20206,392,3920003,ナガノケン,スワシ,カミスワ,長野県,諏訪市,上諏訪,0,0,0,0,0,0
20206,392,3920008,ナガノケン,スワシ,キリガミネ,長野県,諏訪市,霧ケ峰,0,0,0,0,0,0
20206,392,3920027,ナガノケン,スワシ,コガンドオリ,長野県,諏訪市,湖岸通り,0,0,1,0,0,0
20206,39201,3920131,ナガノケン,スワシ,コナミ,長野県,諏訪市,湖南,0,0,0,0,0,0
20206,392,3920024,ナガノケン,スワシ,コワタ,長野県,諏訪市,小和田,0,0,0,0,0,0
20206,392,3920023,ナガノケン,スワシ,コワタミナミ,長野県,諏訪市,小和田南,0,0,0,0,0,0
20206,392,3920012,ナガノケン,スワシ,シガ,長野県,諏訪市,四賀,0,0,0,0,0,0
20206,392,3920010,ナガノケン,スワシ,シブサキ,長野県,諏訪市,渋崎,0,0,0,0,0,0
20206,392,3920007,ナガノケン,スワシ,シミズ,長野県,諏訪市,清水,0,0,1,0,0,0
20206,392,3920017,ナガノケン,スワシ,ジョウナン,長野県,諏訪市,城南,0,0,1,0,0,0
20206,392,3920025,ナガノケン,スワシ,スエヒロ,長野県,諏訪市,末広,0,0,0,0,0,0
20206,392,3920009,ナガノケン,スワシ,スギナイケ,長野県,諏訪市,杉菜池,0,0,0,0,0,0
20206,392,3920004,ナガノケン,スワシ,スワ,長野県,諏訪市,諏訪,0,0,1,0,0,0
20206,392,3920022,ナガノケン,スワシ,タカシマ,長野県,諏訪市,高島,0,0,1,0,0,0
20206,392,3920016,ナガノケン,スワシ,トヨダ,長野県,諏訪市,豊田,0,0,0,0,0,0
20206,392,3920015,ナガノケン,スワシ,ナカス,長野県,諏訪市,中洲,0,0,0,0,0,0
20206,392,3920014,ナガノケン,スワシ,ミナミマチ,長野県,諏訪市,南町,0,0,0,0,0,0
20206,392,3920006,ナガノケン,スワシ,モトマチ,長野県,諏訪市,元町,0,0,0,0,0,0
20206,392,3920002,ナガノケン,スワシ,ユノワキ,長野県,諏訪市,湯の脇,0,0,1,0,0,0
20209,396,3960000,ナガノケン,イナシ,イカニケイサイガナイバアイ,長野県,伊那市,以下に掲載がない場合,0,0,0,0,0,0
20209,396,3960025,ナガノケン,イナシ,アライ,長野県,伊那市,荒井,0,0,0,0,0,0
20209,396,3960028,ナガノケン,イナシ,アライウチノカヤ,長野県,伊那市,荒井内の萱,0,0,0,0,0,0
20209,396,3960021,ナガノケン,イナシ,イナ,長野県,伊那市,伊那,0,0,0,0,0,0
20209,396,3960011,ナガノケン,イナシ,イナベ,長野県,伊那市,伊那部,0,0,0,0,0,0
20209,396,3960008,ナガノケン,イナシ,ウエノハラ,長野県,伊那市,上の原,0,0,0,0,0,0
20209,396,3960032,ナガノケン,イナシ,オザワ,長野県,伊那市,小沢,0,0,0,0,0,0
20209,396,3960012,ナガノケン,イナシ,カミシンデン,長野県,伊那市,上新田,0,0,0,0,0,0
20209,396,3960006,ナガノケン,イナシ,カミマキ,長野県,伊那市,上牧,0,0,0,0,0,0
20209,396,3960014,ナガノケン,イナシ,キツネジマ,長野県,伊那市,狐島,0,0,0,0,0,0
20209,396,3960033,ナガノケン,イナシ,コシロウクボ,長野県,伊那市,小四郎久保,0,0,0,0,0,0
20209,396,3960010,ナガノケン,イナシ,サカイ,長野県,伊那市,境,0,0,0,0,0,0
20209,396,3960024,ナガノケン,イナシ,サカシタ,長野県,伊那市,坂下,0,0,0,0,0,0
20209,396,3960013,ナガノケン,イナシ,シモシンデン,長野県,伊那市,下新田,0,0,0,0,0,0
20209,39601,3960114,ナガノケン,イナシ,センミ,長野県,伊那市,仙美,0,0,0,0,0,0
20209,39602,3960212,ナガノケン,イナシ,タカトオマチオサフジ(1-658バンチ),長野県,伊那市,高遠町長藤(1〜658番地),1,0,0,0,0,0
20209,39603,3960305,ナガノケン,イナシ,タカトオマチオサフジ(ソノタ),長野県,伊那市,高遠町長藤(その他),1,0,0,0,0,0
20209,39602,3960215,ナガノケン,イナシ,タカトオマチオバラ,長野県,伊那市,高遠町小原,0,0,0,0,0,0
20209,39602,3960214,ナガノケン,イナシ,タカトオマチカツマ,長野県,伊那市,高遠町勝間,0,0,0,0,0,0
20209,39602,3960217,ナガノケン,イナシ,タカトオマチカミヤマダ,長野県,伊那市,高遠町上山田,0,0,0,0,0,0
20209,39603,3960302,ナガノケン,イナシ,タカトオマチシビラ,長野県,伊那市,高遠町芝平,0,0,0,0,0,0
20209,39602,3960216,ナガノケン,イナシ,タカトオマチシモヤマダ,長野県,伊那市,高遠町下山田,0,0,0,0,0,0
20209,39602,3960211,ナガノケン,イナシ,タカトオマチニシタカトオ,長野県,伊那市,高遠町西高遠,0,0,0,0,0,0
20209,39603,3960303,ナガノケン,イナシ,タカトオマチバラグチ,長野県,伊那市,高遠町荊口,0,0,0,0,0,0
20209,39602,3960213,ナガノケン,イナシ,タカトオマチヒガシタカトオ,長野県,伊那市,高遠町東高遠,0,0,0,0,0,0
20209,39603,3960301,ナガノケン,イナシ,タカトオマチフジサワ,長野県,伊那市,高遠町藤沢,0,0,0,0,0,0
20209,39603,3960304,ナガノケン,イナシ,タカトオマチヤマムロ,長野県,伊那市,高遠町山室,0,0,0,0,0,0
20209,396,3960015,ナガノケン,イナシ,チュウオウ,長野県,伊那市,中央,0,0,0,0,0,0
20209,396,3960002,ナガノケン,イナシ,テラサワオカ,長野県,伊那市,手良沢岡,0,0,0,0,0,0
20209,396,3960004,ナガノケン,イナシ,テラナカツボ,長野県,伊那市,手良中坪,0,0,0,0,0,0
20209,396,3960003,ナガノケン,イナシ,テラノグチ,長野県,伊那市,手良野口,0,0,0,0,0,0
20209,39606,3960621,ナガノケン,イナシ,トミガタ,長野県,伊那市,富県,0,0,0,0,0,0
20209,396,3960031,ナガノケン,イナシ,ナカノハラ,長野県,伊那市,中の原,0,0,0,0,0,0
20209,39944,3994431,ナガノケン,イナシ,ニシハルチカ,長野県,伊那市,西春近,0,0,0,0,0,0
20209,396,3960026,ナガノケン,イナシ,ニシマチ,長野県,伊那市,西町,0,0,0,0,0,0
20209,39945,3994501,ナガノケン,イナシ,ニシミノワ,長野県,伊那市,西箕輪,0,0,0,0,0,0
20209,396,3960005,ナガノケン,イナシ,ノソコ,長野県,伊那市,野底,0,0,0,0,0,0
20209,39604,3960405,ナガノケン,イナシ,ハセイチノセ,長野県,伊那市,長谷市野瀬,0,0,0,0,0,0
20209,39604,3960407,ナガノケン,イナシ,ハセウラ,長野県,伊那市,長谷浦,0,0,0,0,0,0
20209,39604,3960403,ナガノケン,イナシ,ハセクロゴウチ,長野県,伊那市,長谷黒河内,0,0,0,0,0,0
20209,39604,3960406,ナガノケン,イナシ,ハセスギシマ,長野県,伊那市,長谷杉島,0,0,0,0,0,0
20209,39604,3960404,ナガノケン,イナシ,ハセナカオ,長野県,伊那市,長谷中尾,0,0,0,0,0,0
20209,39604,3960401,ナガノケン,イナシ,ハセヒジ,長野県,伊那市,長谷非持,0,0,0,0,0,0
20209,39604,3960402,ナガノケン,イナシ,ハセミゾクチ,長野県,伊那市,長谷溝口,0,0,0,0,0,0
20209,396,3960009,ナガノケン,イナシ,ヒカゲ,長野県,伊那市,日影,0,0,0,0,0,0
20209,39944,3994432,ナガノケン,イナシ,ヒガシハルチカ,長野県,伊那市,東春近,0,0,0,0,0,0
20209,396,3960030,ナガノケン,イナシ,ヒラサワ,長野県,伊那市,平沢,0,0,0,0,0,0
20209,396,3960001,ナガノケン,イナシ,フクジマ,長野県,伊那市,福島,0,0,0,0,0,0
20209,39601,3960112,ナガノケン,イナシ,マエハラ,長野県,伊那市,前原,0,0,0,0,0,0
20209,396,3960027,ナガノケン,イナシ,マスミガオカ,長野県,伊那市,ますみヶ丘,0,0,0,0,0,0
20209,39601,3960111,ナガノケン,イナシ,ミスズ,長野県,伊那市,美篶,0,0,0,0,0,0
20209,396,3960022,ナガノケン,イナシ,ミソノ,長野県,伊那市,御園,0,0,0,0,0,0
20209,39601,3960113,ナガノケン,イナシ,ミハラ,長野県,伊那市,美原,0,0,0,0,0,0
20209,396,3960023,ナガノケン,イナシ,ヤマデラ,長野県,伊那市,山寺,0,0,0,0,0,0
20209,396,3960029,ナガノケン,イナシ,ヨコヤマ,長野県,伊那市,横山,0,0,0,0,0,0
20209,396,3960007,ナガノケン,イナシ,ワカミヤ,長野県,伊那市,若宮,0,0,0,0,0,0
20210,39941,3994100,ナガノケン,コマガネシ,イカニケイサイガナイバアイ,長野県,駒ヶ根市,以下に掲載がない場合,0,0,0,0,0,0
20210,39941,3994113,ナガノケン,コマガネシ,アカズマチ,長野県,駒ヶ根市,赤須町,0,0,0,0,0,0
20210,39941,3994105,ナガノケン,コマガネシ,アカズヒガシ,長野県,駒ヶ根市,赤須東,0,0,0,0,0,0
20210,39941,3994117,ナガノケン,コマガネシ,アカホ,長野県,駒ヶ根市,赤穂,0,0,0,0,0,0
20210,39941,3994102,ナガノケン,コマガネシ,イイザカ,長野県,駒ヶ根市,飯坂,0,0,1,0,0,0
20210,39941,3994115,ナガノケン,コマガネシ,ウワブサカエマチ,長野県,駒ヶ根市,上穂栄町,0,0,0,0,0,0
20210,39941,3994114,ナガノケン,コマガネシ,ウワブミナミ,長野県,駒ヶ根市,上穂南,0,0,0,0,0,0
20210,39941,3994116,ナガノケン,コマガネシ,ウワブキタ,長野県,駒ヶ根市,上穂北,0,0,0,0,0,0
20210,39941,3994111,ナガノケン,コマガネシ,キタマチ,長野県,駒ヶ根市,北町,0,0,0,0,0,0
20210,39941,3994104,ナガノケン,コマガネシ,キョウヅカ,長野県,駒ヶ根市,経塚,0,0,0,0,0,0
20210,39941,3994108,ナガノケン,コマガネシ,シモイチバ,長野県,駒ヶ根市,下市場,0,0,0,0,0,0
20210,39941,3994232,ナガノケン,コマガネシ,シモダイラ(2070-2119、2309-2311、2419、2421-2426,長野県,駒ヶ根市,下平(2070〜2119、2309〜2311、2419、2421〜2426,1,0,0,0,0,0
20210,39941,3994232,ナガノケン,コマガネシ,、2433-2450),長野県,駒ヶ根市,、2433〜2450),0,0,0,0,0,0
20210,39941,3994101,ナガノケン,コマガネシ,シモダイラ(ソノタ),長野県,駒ヶ根市,下平(その他),1,0,0,0,0,0
20210,39941,3994112,ナガノケン,コマガネシ,チュウオウ,長野県,駒ヶ根市,中央,0,0,0,0,0,0
20210,39942,3994231,ナガノケン,コマガネシ,ナカザワ,長野県,駒ヶ根市,中沢,0,0,0,0,0,0
20210,39941,3994103,ナガノケン,コマガネシ,ナシノキ,長野県,駒ヶ根市,梨の木,0,0,0,0,0,0
20210,39943,3994321,ナガノケン,コマガネシ,ヒガシイナ,長野県,駒ヶ根市,東伊那,0,0,0,0,0,0
20210,39941,3994106,ナガノケン,コマガネシ,ヒガシマチ,長野県,駒ヶ根市,東町,0,0,0,0,0,0
20210,39941,3994107,ナガノケン,コマガネシ,ミナミダ,長野県,駒ヶ根市,南田,0,0,0,0,0,0
20212,398,3980000,ナガノケン,オオマチシ,イカニケイサイガナイバアイ,長野県,大町市,以下に掲載がない場合,0,0,0,0,0,0
20212,398,3980002,ナガノケン,オオマチシ,オオマチ,長野県,大町市,大町,0,0,0,0,0,0
20212,398,3980001,ナガノケン,オオマチシ,タイラ,長野県,大町市,平,0,0,0,0,0,0
20212,398,3980004,ナガノケン,オオマチシ,トキワ,長野県,大町市,常盤,0,0,0,0,0,0
20212,39991,3999101,ナガノケン,オオマチシ,ミアサ,長野県,大町市,美麻,0,0,0,0,0,0
20212,39973,3997301,ナガノケン,オオマチシ,ヤサカ,長野県,大町市,八坂,0,0,0,0,0,0
20212,39973,3997302,ナガノケン,オオマチシ,ヤサカショウブ,長野県,大町市,八坂菖蒲,0,0,0,0,0,0
20212,398,3980003,ナガノケン,オオマチシ,ヤシロ,長野県,大町市,社,0,0,0,0,0,0
20214,391,3910000,ナガノケン,チノシ,イカニケイサイガナイバアイ,長野県,茅野市,以下に掲載がない場合,0,0,0,0,0,0
20214,39102,3910214,ナガノケン,チノシ,イズミノ,長野県,茅野市,泉野,0,0,0,0,0,0
20214,391,3910012,ナガノケン,チノシ,カナザワ,長野県,茅野市,金沢,0,0,0,0,0,0
20214,39102,3910212,ナガノケン,チノシ,キタヤマ(シブゴテンユ・シブノユ),長野県,茅野市,北山(渋御殿湯・渋の湯),1,0,0,0,0,0
20214,39103,3910301,ナガノケン,チノシ,キタヤマ(ソノタ),長野県,茅野市,北山(その他),1,0,0,0,0,0
20214,39102,3910211,ナガノケン,チノシ,コヒガシ,長野県,茅野市,湖東,0,0,0,0,0,0
20214,391,3910004,ナガノケン,チノシ,ジョウヤマ,長野県,茅野市,城山,0,0,0,0,0,0
20214,391,3910011,ナガノケン,チノシ,タマガワ,長野県,茅野市,玉川,0,0,0,0,0,0
20214,391,3910001,ナガノケン,チノシ,チノ,長野県,茅野市,ちの,0,0,0,0,0,0
20214,391,3910002,ナガノケン,チノシ,ツカハラ,長野県,茅野市,塚原,0,0,1,0,0,0
20214,39102,3910213,ナガノケン,チノシ,トヨヒラ,長野県,茅野市,豊平,0,0,0,0,0,0
20214,39102,3910215,ナガノケン,チノシ,ナカオオシオ,長野県,茅野市,中大塩,0,0,0,0,0,0
20214,391,3910005,ナガノケン,チノシ,ナカマチ,長野県,茅野市,仲町,0,0,0,0,0,0
20214,391,3910003,ナガノケン,チノシ,ホンマチ,長野県,茅野市,本町,0,0,0,0,0,0
20214,391,3910013,ナガノケン,チノシ,ミヤガワ,長野県,茅野市,宮川,0,0,0,0,0,0
20214,39102,3910216,ナガノケン,チノシ,ヨネザワ,長野県,茅野市,米沢,0,0,0,0,0,0
20215,39907,3990700,ナガノケン,シオジリシ,イカニケイサイガナイバアイ,長野県,塩尻市,以下に掲載がない場合,0,0,0,0,0,0
20215,39907,3990713,ナガノケン,シオジリシ,オオゴヤ,長野県,塩尻市,大小屋,0,0,0,0,0,0
20215,39907,3990722,ナガノケン,シオジリシ,カキザワ,長野県,塩尻市,柿沢,0,0,0,0,0,0
20215,399,3990071,ナガノケン,シオジリシ,カタオカ(3635、3680、5794、6114バンチ),長野県,塩尻市,片丘(3635、3680、5794、6114番地),1,0,0,0,0,0
20215,39907,3990711,ナガノケン,シオジリシ,カタオカ(ソノタ),長野県,塩尻市,片丘(その他),1,0,0,0,0,0
20215,39907,3990721,ナガノケン,シオジリシ,カナイ,長野県,塩尻市,金井,0,0,0,0,0,0
20215,39907,3990724,ナガノケン,シオジリシ,カミニシジョウ,長野県,塩尻市,上西条,0,0,0,0,0,0
20215,39963,3996302,ナガノケン,シオジリシ,キソヒラサワ,長野県,塩尻市,木曽平沢,0,0,0,0,0,0
20215,39906,3990651,ナガノケン,シオジリシ,キタオノ,長野県,塩尻市,北小野,0,0,0,0,0,0
20215,39907,3990723,ナガノケン,シオジリシ,キュウシオジリ,長野県,塩尻市,旧塩尻,0,0,0,0,0,0
20215,39907,3990716,ナガノケン,シオジリシ,サジキ,長野県,塩尻市,桟敷,0,0,0,0,0,0
20215,39907,3990712,ナガノケン,シオジリシ,シオジリマチ,長野県,塩尻市,塩尻町,0,0,0,0,0,0
20215,39907,3990726,ナガノケン,シオジリシ,シモニシジョウ,長野県,塩尻市,下西条,0,0,0,0,0,0
20215,39964,3996462,ナガノケン,シオジリシ,セバ,長野県,塩尻市,洗馬,0,0,0,0,0,0
20215,39964,3996461,ナガノケン,シオジリシ,ソウガ,長野県,塩尻市,宗賀,0,0,0,0,0,0
20215,39907,3990744,ナガノケン,シオジリシ,ダイモン7ク,長野県,塩尻市,大門七区,0,0,0,1,0,0
20215,39907,3990742,ナガノケン,シオジリシ,ダイモンイズミチョウ,長野県,塩尻市,大門泉町,0,0,0,0,0,0
20215,39907,3990745,ナガノケン,シオジリシ,ダイモンキキョウチョウ,長野県,塩尻市,大門桔梗町,0,0,0,0,0,0
20215,39907,3990741,ナガノケン,シオジリシ,ダイモンサイワイチョウ,長野県,塩尻市,大門幸町,0,0,0,0,0,0
20215,39907,3990743,ナガノケン,シオジリシ,ダイモンタガワチョウ,長野県,塩尻市,大門田川町,0,0,0,0,0,0
20215,39907,3990746,ナガノケン,シオジリシ,ダイモンナミキチョウ,長野県,塩尻市,大門並木町,0,0,0,0,0,0
20215,39907,3990736,ナガノケン,シオジリシ,ダイモン1バンチョウ,長野県,塩尻市,大門一番町,0,0,0,0,0,0
20215,39907,3990735,ナガノケン,シオジリシ,ダイモン2バンチョウ,長野県,塩尻市,大門二番町,0,0,0,0,0,0
20215,39907,3990733,ナガノケン,シオジリシ,ダイモン3バンチョウ,長野県,塩尻市,大門三番町,0,0,0,0,0,0
20215,39907,3990734,ナガノケン,シオジリシ,ダイモン4バンチョウ,長野県,塩尻市,大門四番町,0,0,0,0,0,0
20215,39907,3990732,ナガノケン,シオジリシ,ダイモン5バンチョウ,長野県,塩尻市,大門五番町,0,0,0,0,0,0
20215,39907,3990731,ナガノケン,シオジリシ,ダイモン6バンチョウ,長野県,塩尻市,大門六番町,0,0,0,0,0,0
20215,39907,3990738,ナガノケン,シオジリシ,ダイモン7バンチョウ,長野県,塩尻市,大門七番町,0,0,0,0,0,0
20215,39907,3990737,ナガノケン,シオジリシ,ダイモン8バンチョウ,長野県,塩尻市,大門八番町,0,0,0,0,0,0
20215,39907,3990744,ナガノケン,シオジリシ,ダイモン(バンチ),長野県,塩尻市,大門(番地),0,0,0,1,0,0
20215,39907,3990725,ナガノケン,シオジリシ,ナカニシジョウ,長野県,塩尻市,中西条,0,0,0,0,0,0
20215,39907,3990715,ナガノケン,シオジリシ,ナガウネ,長野県,塩尻市,長畝,0,0,0,0,0,0
20215,39963,3996303,ナガノケン,シオジリシ,ナライ,長野県,塩尻市,奈良井,0,0,0,0,0,0
20215,39963,3996301,ナガノケン,シオジリシ,ニエカワ,長野県,塩尻市,贄川,0,0,0,0,0,0
20215,39907,3990705,ナガノケン,シオジリシ,ヒロオカカタイシ,長野県,塩尻市,広丘堅石,0,0,0,0,0,0
20215,39907,3990704,ナガノケン,シオジリシ,ヒロオカゴウバラ,長野県,塩尻市,広丘郷原,0,0,0,0,0,0
20215,39907,3990703,ナガノケン,シオジリシ,ヒロオカタカイデ,長野県,塩尻市,広丘高出,0,0,0,0,0,0
20215,39907,3990702,ナガノケン,シオジリシ,ヒロオカノムラ,長野県,塩尻市,広丘野村,0,0,0,0,0,0
20215,39907,3990706,ナガノケン,シオジリシ,ヒロオカハラシンデン,長野県,塩尻市,広丘原新田,0,0,0,0,0,0
20215,39907,3990701,ナガノケン,シオジリシ,ヒロオカヨシダ,長野県,塩尻市,広丘吉田,0,0,0,0,0,0
20215,39907,3990714,ナガノケン,シオジリシ,ホリノウチ,長野県,塩尻市,堀ノ内,0,0,0,0,0,0
20215,39907,3990727,ナガノケン,シオジリシ,ミドリコ,長野県,塩尻市,みどり湖,0,0,0,0,0,0
20215,39907,3990728,ナガノケン,シオジリシ,ミネハラ,長野県,塩尻市,峰原,0,0,0,0,0,0
20220,39982,3998200,ナガノケン,アヅミノシ,イカニケイサイガナイバアイ,長野県,安曇野市,以下に掲載がない場合,0,0,0,0,0,0
20220,39971,3997102,ナガノケン,アヅミノシ,アカシナナカガワテ,長野県,安曇野市,明科中川手,0,0,0,0,0,0
20220,39971,3997104,ナガノケン,アヅミノシ,アカシナナナキ,長野県,安曇野市,明科七貴,0,0,0,0,0,0
20220,39971,3997101,ナガノケン,アヅミノシ,アカシナヒガシカワテ,長野県,安曇野市,明科東川手,0,0,0,0,0,0
20220,39971,3997103,ナガノケン,アヅミノシ,アカシナヒカル,長野県,安曇野市,明科光,0,0,0,0,0,0
20220,39971,3997105,ナガノケン,アヅミノシ,アカシナミナミリクゴウ,長野県,安曇野市,明科南陸郷,0,0,0,0,0,0
20220,39982,3998205,ナガノケン,アヅミノシ,トヨシナ,長野県,安曇野市,豊科,0,0,0,0,0,0
20220,39982,3998204,ナガノケン,アヅミノシ,トヨシナタキベ,長野県,安曇野市,豊科高家,0,0,0,0,0,0
20220,39982,3998203,ナガノケン,アヅミノシ,トヨシナタザワ,長野県,安曇野市,豊科田沢,0,0,0,0,0,0
20220,39982,3998202,ナガノケン,アヅミノシ,トヨシナヒカル,長野県,安曇野市,豊科光,0,0,0,0,0,0
20220,39982,3998201,ナガノケン,アヅミノシ,トヨシナミナミホタカ,長野県,安曇野市,豊科南穂高,0,0,0,0,0,0
20220,39983,3998303,ナガノケン,アヅミノシ,ホタカ,長野県,安曇野市,穂高,0,0,0,0,0,0
20220,39983,3998301,ナガノケン,アヅミノシ,ホタカアリアケ,長野県,安曇野市,穂高有明,0,0,0,0,0,0
20220,39983,3998304,ナガノケン,アヅミノシ,ホタカカシワバラ,長野県,安曇野市,穂高柏原,0,0,0,0,0,0
20220,39983,3998302,ナガノケン,アヅミノシ,ホタカキタホタカ,長野県,安曇野市,穂高北穂高,0,0,0,0,0,0
20220,39983,3998305,ナガノケン,アヅミノシ,ホタカマキ,長野県,安曇野市,穂高牧,0,0,0,0,0,0
20220,39982,3998211,ナガノケン,アヅミノシ,ホリガネカラスガワ,長野県,安曇野市,堀金烏川,0,0,0,0,0,0
20220,39982,3998212,ナガノケン,アヅミノシ,ホリガネミタ,長野県,安曇野市,堀金三田,0,0,0,0,0,0
20220,39981,3998103,ナガノケン,アヅミノシ,ミサトオグラ,長野県,安曇野市,三郷小倉,0,1,0,0,0,0
20220,39981,3998101,ナガノケン,アヅミノシ,ミサトメイセイ,長野県,安曇野市,三郷明盛,0,1,0,0,0,0
20220,39981,3998102,ナガノケン,アヅミノシ,ミサトユタカ,長野県,安曇野市,三郷温,0,1,0,0,0,0
20324,39103,3910321,ナガノケン,キタサクグンタテシナマチ,アシダハッカノ(1519-1625、1628-1635<シラカバコ>),長野県,北佐久郡立科町,芦田八ケ野(1519〜1625、1628〜1635「白樺湖」),1,0,0,1,0,0
20324,39103,3910321,ナガノケン,キタサクグンタテシナマチ,イケノタイラ,長野県,北佐久郡立科町,池の平,0,0,0,1,0,0
20361,393,3930000,ナガノケン,スワグンシモスワマチ,イカニケイサイガナイバアイ,長野県,諏訪郡下諏訪町,以下に掲載がない場合,0,0,0,0,0,0
20361,393,3930066,ナガノケン,スワグンシモスワマチ,アケボノチョウ,長野県,諏訪郡下諏訪町,曙町,0,0,0,0,0,0
20361,393,3930068,ナガノケン,スワグンシモスワマチ,カスガチョウ,長野県,諏訪郡下諏訪町,春日町,0,0,0,0,0,0
20361,393,3930052,ナガノケン,スワグンシモスワマチ,カミクボ,長野県,諏訪郡下諏訪町,上久保,0,0,0,0,0,0
20361,393,3930053,ナガノケン,スワグンシモスワマチ,カミバツパ,長野県,諏訪郡下諏訪町,上馬場,0,0,0,0,0,0
20361,393,3930042,ナガノケン,スワグンシモスワマチ,キタシオウ,長野県,諏訪郡下諏訪町,北四王,0,0,0,0,0,0
20361,393,3930014,ナガノケン,スワグンシモスワマチ,キノシタ,長野県,諏訪郡下諏訪町,木の下,0,0,0,0,0,0
20361,393,3930026,ナガノケン,スワグンシモスワマチ,クボカイドウ,長野県,諏訪郡下諏訪町,久保海道,0,0,0,0,0,0
20361,393,3930024,ナガノケン,スワグンシモスワマチ,ゴカン,長野県,諏訪郡下諏訪町,五官,0,0,0,0,0,0
20361,393,3930044,ナガノケン,スワグンシモスワマチ,コハンチョウ,長野県,諏訪郡下諏訪町,湖畔町,0,0,0,0,0,0
20361,393,3930013,ナガノケン,スワグンシモスワマチ,コユノウエ,長野県,諏訪郡下諏訪町,小湯の上,0,0,0,0,0,0
20361,393,3930084,ナガノケン,スワグンシモスワマチ,サカエチョウ,長野県,諏訪郡下諏訪町,栄町,0,0,0,0,0,0
20361,393,3930065,ナガノケン,スワグンシモスワマチ,サキガケチョウ,長野県,諏訪郡下諏訪町,魁町,0,0,0,0,0,0
20361,393,3930072,ナガノケン,スワグンシモスワマチ,サクラマチ,長野県,諏訪郡下諏訪町,桜町,0,0,0,0,0,0
20361,393,3930086,ナガノケン,スワグンシモスワマチ,シミズマチ,長野県,諏訪郡下諏訪町,清水町,0,0,0,0,0,0
20361,393,3930007,ナガノケン,スワグンシモスワマチ,シモヤシキ,長野県,諏訪郡下諏訪町,下屋敷,0,0,0,0,0,0
20361,393,3930011,ナガノケン,スワグンシモスワマチ,シンマチウエ,長野県,諏訪郡下諏訪町,新町上,0,0,0,0,0,0
20361,393,3930018,ナガノケン,スワグンシモスワマチ,シンマチシタ,長野県,諏訪郡下諏訪町,新町下,0,0,0,0,0,0
20361,393,3930054,ナガノケン,スワグンシモスワマチ,スゲノマチ,長野県,諏訪郡下諏訪町,菅野町,0,0,0,0,0,0
20361,393,3930022,ナガノケン,スワグンシモスワマチ,セキヤ,長野県,諏訪郡下諏訪町,関屋,0,0,0,0,0,0
20361,393,3930051,ナガノケン,スワグンシモスワマチ,タイシャドオリ,長野県,諏訪郡下諏訪町,大社通,0,0,0,0,0,0
20361,393,3930092,ナガノケン,スワグンシモスワマチ,ダイモン,長野県,諏訪郡下諏訪町,大門,0,0,0,0,0,0
20361,393,3930033,ナガノケン,スワグンシモスワマチ,タカキ,長野県,諏訪郡下諏訪町,高木,0,0,0,0,0,0
20361,393,3930034,ナガノケン,スワグンシモスワマチ,タカハマ,長野県,諏訪郡下諏訪町,高浜,0,0,0,0,0,0
20361,393,3930021,ナガノケン,スワグンシモスワマチ,タケイ,長野県,諏訪郡下諏訪町,武居,0,0,0,0,0,0
20361,393,3930015,ナガノケン,スワグンシモスワマチ,タツマチ,長野県,諏訪郡下諏訪町,立町,0,0,0,0,0,0
20361,393,3930078,ナガノケン,スワグンシモスワマチ,タナカマチ,長野県,諏訪郡下諏訪町,田中町,0,0,0,0,0,0
20361,393,3930075,ナガノケン,スワグンシモスワマチ,チュウオウドオリ,長野県,諏訪郡下諏訪町,中央通,0,0,0,0,0,0
20361,393,3930073,ナガノケン,スワグンシモスワマチ,ツカダマチ,長野県,諏訪郡下諏訪町,塚田町,0,0,0,0,0,0
20361,393,3930023,ナガノケン,スワグンシモスワマチ,トミガオカ,長野県,諏訪郡下諏訪町,富ケ丘,0,0,0,0,0,0
20361,393,3930057,ナガノケン,スワグンシモスワマチ,トモノマチ,長野県,諏訪郡下諏訪町,友之町,0,0,0,0,0,0
20361,393,3930001,ナガノケン,スワグンシモスワマチ,トヨハシ,長野県,諏訪郡下諏訪町,樋橋,0,0,0,0,0,0
20361,393,3930064,ナガノケン,スワグンシモスワマチ,ナカシオチョウ,長野県,諏訪郡下諏訪町,中汐町,0,0,0,0,0,0
20361,393,3930071,ナガノケン,スワグンシモスワマチ,ナカマチ,長野県,諏訪郡下諏訪町,仲町,0,0,0,0,0,0
20361,393,3930047,ナガノケン,スワグンシモスワマチ,ニシアカスナ,長野県,諏訪郡下諏訪町,西赤砂,0,0,0,0,0,0
20361,393,3930041,ナガノケン,スワグンシモスワマチ,ニシシオウ,長野県,諏訪郡下諏訪町,西四王,0,0,0,0,0,0
20361,393,3930087,ナガノケン,スワグンシモスワマチ,ニシタカノマチ,長野県,諏訪郡下諏訪町,西鷹野町,0,0,0,0,0,0
20361,393,3930032,ナガノケン,スワグンシモスワマチ,ニシハマ,長野県,諏訪郡下諏訪町,西浜,0,0,0,0,0,0
20361,393,3930082,ナガノケン,スワグンシモスワマチ,ニシヤヨイチョウ,長野県,諏訪郡下諏訪町,西弥生町,0,0,0,0,0,0
20361,393,3930035,ナガノケン,スワグンシモスワマチ,ニシユタカ,長野県,諏訪郡下諏訪町,西豊,0,0,0,0,0,0
20361,393,3930003,ナガノケン,スワグンシモスワマチ,ハギクラ,長野県,諏訪郡下諏訪町,萩倉,0,0,0,0,0,0
20361,393,3930063,ナガノケン,スワグンシモスワマチ,ハナサキチョウ,長野県,諏訪郡下諏訪町,花咲町,0,0,0,0,0,0
20361,393,3930046,ナガノケン,スワグンシモスワマチ,ヒガシアカスナ,長野県,諏訪郡下諏訪町,東赤砂,0,0,0,0,0,0
20361,393,3930043,ナガノケン,スワグンシモスワマチ,ヒガシシオウ,長野県,諏訪郡下諏訪町,東四王,0,0,0,0,0,0
20361,393,3930085,ナガノケン,スワグンシモスワマチ,ヒガシタカノマチ,長野県,諏訪郡下諏訪町,東鷹野町,0,0,0,0,0,0
20361,393,3930002,ナガノケン,スワグンシモスワマチ,ヒガシマタ,長野県,諏訪郡下諏訪町,東俣,0,0,0,0,0,0
20361,393,3930006,ナガノケン,スワグンシモスワマチ,ヒガシマチウエ,長野県,諏訪郡下諏訪町,東町上,0,0,0,0,0,0
20361,393,3930005,ナガノケン,スワグンシモスワマチ,ヒガシマチナカ,長野県,諏訪郡下諏訪町,東町中,0,0,0,0,0,0
20361,393,3930004,ナガノケン,スワグンシモスワマチ,ヒガシマチシタ,長野県,諏訪郡下諏訪町,東町下,0,0,0,0,0,0
20361,393,3930093,ナガノケン,スワグンシモスワマチ,ヒガシヤマダ,長野県,諏訪郡下諏訪町,東山田,0,0,0,0,0,0
20361,393,3930083,ナガノケン,スワグンシモスワマチ,ヒガシヤヨイチョウ,長野県,諏訪郡下諏訪町,東弥生町,0,0,0,0,0,0
20361,393,3930031,ナガノケン,スワグンシモスワマチ,ヒガシユタカ,長野県,諏訪郡下諏訪町,東豊,0,0,0,0,0,0
20361,393,3930062,ナガノケン,スワグンシモスワマチ,ヒラサワチョウ,長野県,諏訪郡下諏訪町,平沢町,0,0,0,0,0,0
20361,393,3930056,ナガノケン,スワグンシモスワマチ,ヒロセチョウ,長野県,諏訪郡下諏訪町,広瀬町,0,0,0,0,0,0
20361,393,3930055,ナガノケン,スワグンシモスワマチ,フジミチョウ,長野県,諏訪郡下諏訪町,富士見町,0,0,0,0,0,0
20361,393,3930091,ナガノケン,スワグンシモスワマチ,ホシガオカ,長野県,諏訪郡下諏訪町,星が丘,0,0,0,0,0,0
20361,393,3930025,ナガノケン,スワグンシモスワマチ,ホンゴウ,長野県,諏訪郡下諏訪町,本郷,0,0,0,0,0,0
20361,393,3930008,ナガノケン,スワグンシモスワマチ,マチヤシキ,長野県,諏訪郡下諏訪町,町屋敷,0,0,0,0,0,0
20361,393,3930061,ナガノケン,スワグンシモスワマチ,ミタマチ,長野県,諏訪郡下諏訪町,御田町,0,0,0,0,0,0
20361,393,3930074,ナガノケン,スワグンシモスワマチ,ミドリマチ,長野県,諏訪郡下諏訪町,緑町,0,0,0,0,0,0
20361,393,3930045,ナガノケン,スワグンシモスワマチ,ミナミシオウ,長野県,諏訪郡下諏訪町,南四王,0,0,0,0,0,0
20361,393,3930067,ナガノケン,スワグンシモスワマチ,ヤギヒガシ,長野県,諏訪郡下諏訪町,矢木東,0,0,0,0,0,0
20361,393,3930076,ナガノケン,スワグンシモスワマチ,ヤギニシ,長野県,諏訪郡下諏訪町,矢木西,0,0,0,0,0,0
20361,393,3930077,ナガノケン,スワグンシモスワマチ,ヤギマチ,長野県,諏訪郡下諏訪町,矢木町,0,0,0,0,0,0
20361,393,3930081,ナガノケン,スワグンシモスワマチ,ヤシロヒガシマチ,長野県,諏訪郡下諏訪町,社東町,0,0,0,0,0,0
20361,393,3930017,ナガノケン,スワグンシモスワマチ,ユダナカマチ,長野県,諏訪郡下諏訪町,湯田仲町,0,0,0,0,0,0
20361,393,3930016,ナガノケン,スワグンシモスワマチ,ユダマチ,長野県,諏訪郡下諏訪町,湯田町,0,0,0,0,0,0
20361,393,3930012,ナガノケン,スワグンシモスワマチ,ヨコマチ,長野県,諏訪郡下諏訪町,横町,0,0,0,0,0,0
20361,393,3930019,ナガノケン,スワグンシモスワマチ,ヨコマチキノシタ,長野県,諏訪郡下諏訪町,横町木の下,0,0,0,0,0,0
20362,39902,3990200,ナガノケン,スワグンフジミマチ,イカニケイサイガナイバアイ,長野県,諏訪郡富士見町,以下に掲載がない場合,0,0,0,0,0,0
20362,39901,3990102,ナガノケン,スワグンフジミマチ,オチアイ(3060、3600-4600バンチ),長野県,諏訪郡富士見町,落合(3060、3600〜4600番地),1,0,0,0,0,0
20362,39902,3990214,ナガノケン,スワグンフジミマチ,オチアイ(ソノタ),長野県,諏訪郡富士見町,落合(その他),1,0,0,0,0,0
20362,39902,3990213,ナガノケン,スワグンフジミマチ,オッコト,長野県,諏訪郡富士見町,乙事,0,0,0,0,0,0
20362,39901,3990101,ナガノケン,スワグンフジミマチ,サカイ,長野県,諏訪郡富士見町,境,0,0,0,0,0,0
20362,39902,3990212,ナガノケン,スワグンフジミマチ,タツザワ,長野県,諏訪郡富士見町,立沢,0,0,0,0,0,0
20362,391,3910021,ナガノケン,スワグンフジミマチ,フジミ(ニュウカサコ),長野県,諏訪郡富士見町,富士見(入笠湖),1,0,0,0,0,0
20362,39902,3990211,ナガノケン,スワグンフジミマチ,フジミ(ソノタ),長野県,諏訪郡富士見町,富士見(その他),1,0,0,0,0,0
20363,39101,3910100,ナガノケン,スワグンハラムラ,イカニケイサイガナイバアイ,長野県,諏訪郡原村,以下に掲載がない場合,0,0,0,0,0,0
20363,39101,3910101,ナガノケン,スワグンハラムラ,オオクボ,長野県,諏訪郡原村,大久保,0,0,0,0,0,0
20363,39101,3910105,ナガノケン,スワグンハラムラ,カシワギ,長野県,諏訪郡原村,柏木,0,0,0,0,0,0
20363,39101,3910111,ナガノケン,スワグンハラムラ,カミサト,長野県,諏訪郡原村,上里,0,0,0,0,0,0
20363,39101,3910106,ナガノケン,スワグンハラムラ,ショウブザワ,長野県,諏訪郡原村,菖蒲沢,0,0,0,0,0,0
20363,39101,3910108,ナガノケン,スワグンハラムラ,ナカシンデン,長野県,諏訪郡原村,中新田,0,0,0,0,0,0
20363,39101,3910112,ナガノケン,スワグンハラムラ,ノウジョウ,長野県,諏訪郡原村,農場,0,0,0,0,0,0
20363,39101,3910104,ナガノケン,スワグンハラムラ,ハライザワ,長野県,諏訪郡原村,払沢,0,0,0,0,0,0
20363,39101,3910115,ナガノケン,スワグンハラムラ,ハラヤマ,長野県,諏訪郡原村,原山,0,0,0,0,0,0
20363,39101,3910116,ナガノケン,スワグンハラムラ,ハンノキ,長野県,諏訪郡原村,判の木,0,0,0,0,0,0
20363,39101,3910114,ナガノケン,スワグンハラムラ,ペンション,長野県,諏訪郡原村,ペンション,0,0,0,0,0,0
20363,39101,3910109,ナガノケン,スワグンハラムラ,ミナミハラ,長野県,諏訪郡原村,南原,0,0,0,0,0,0
20363,39101,3910107,ナガノケン,スワグンハラムラ,ムロウチ,長野県,諏訪郡原村,室内,0,0,0,0,0,0
20363,39101,3910113,ナガノケン,スワグンハラムラ,ヤツガネ,長野県,諏訪郡原村,やつがね,0,0,0,0,0,0
20363,39101,3910103,ナガノケン,スワグンハラムラ,ヤツデ,長野県,諏訪郡原村,八ツ手,0,0,0,0,0,0
20363,39101,3910102,ナガノケン,スワグンハラムラ,ヤナギサワ,長野県,諏訪郡原村,柳沢,0,0,0,0,0,0
20382,39904,3990400,ナガノケン,カミイナグンタツノマチ,イカニケイサイガナイバアイ,長野県,上伊那郡辰野町,以下に掲載がない場合,0,0,0,0,0,0
20382,39904,3990424,ナガノケン,カミイナグンタツノマチ,アカハネ,長野県,上伊那郡辰野町,赤羽,0,0,0,0,0,0
20382,39904,3990428,ナガノケン,カミイナグンタツノマチ,イナトミ,長野県,上伊那郡辰野町,伊那富,0,0,0,0,0,0
20382,39906,3990601,ナガノケン,カミイナグンタツノマチ,オノ,長野県,上伊那郡辰野町,小野,0,0,0,0,0,0
20382,39905,3990512,ナガノケン,カミイナグンタツノマチ,カミジマ,長野県,上伊那郡辰野町,上島,0,0,0,0,0,0
20382,39904,3990423,ナガノケン,カミイナグンタツノマチ,サワソコ,長野県,上伊那郡辰野町,沢底,0,0,0,0,0,0
20382,39904,3990421,ナガノケン,カミイナグンタツノマチ,タツノ,長野県,上伊那郡辰野町,辰野,0,0,0,0,0,0
20382,39904,3990427,ナガノケン,カミイナグンタツノマチ,チュウオウ,長野県,上伊那郡辰野町,中央,0,0,0,0,0,0
20382,39904,3990425,ナガノケン,カミイナグンタツノマチ,ヒグチ,長野県,上伊那郡辰野町,樋口,0,0,0,0,0,0
20382,39904,3990422,ナガノケン,カミイナグンタツノマチ,ヒライデ,長野県,上伊那郡辰野町,平出,0,0,0,0,0,0
20382,39904,3990426,ナガノケン,カミイナグンタツノマチ,ミヤキ,長野県,上伊那郡辰野町,宮木,0,0,0,0,0,0
20382,39905,3990511,ナガノケン,カミイナグンタツノマチ,ヨコカワ,長野県,上伊那郡辰野町,横川,0,0,0,0,0,0
20383,39946,3994600,ナガノケン,カミイナグンミノワマチ,イカニケイサイガナイバアイ,長野県,上伊那郡箕輪町,以下に掲載がない場合,0,0,0,0,0,0
20383,39946,3994605,ナガノケン,カミイナグンミノワマチ,ナカゾネ,長野県,上伊那郡箕輪町,中曽根,0,0,0,0,0,0
20383,39946,3994601,ナガノケン,カミイナグンミノワマチ,ナカミノワ,長野県,上伊那郡箕輪町,中箕輪,0,0,0,0,0,0
20383,39946,3994602,ナガノケン,カミイナグンミノワマチ,ヒガシミノワ,長野県,上伊那郡箕輪町,東箕輪,0,0,0,0,0,0
20383,39946,3994604,ナガノケン,カミイナグンミノワマチ,フクヨ,長野県,上伊那郡箕輪町,福与,0,0,0,0,0,0
20383,39946,3994603,ナガノケン,カミイナグンミノワマチ,ミッカマチ,長野県,上伊那郡箕輪町,三日町,0,0,0,0,0,0
20384,39937,3993700,ナガノケン,カミイナグンイイジママチ,イカニケイサイガナイバアイ,長野県,上伊那郡飯島町,以下に掲載がない場合,0,0,0,0,0,0
20384,39937,3993702,ナガノケン,カミイナグンイイジママチ,イイジマ,長野県,上伊那郡飯島町,飯島,0,0,0,0,0,0
20384,39937,3993701,ナガノケン,カミイナグンイイジママチ,タギリ,長野県,上伊那郡飯島町,田切,0,0,0,0,0,0
20384,39937,3993705,ナガノケン,カミイナグンイイジママチ,ナナクボ,長野県,上伊那郡飯島町,七久保,0,0,0,0,0,0
20384,39937,3993703,ナガノケン,カミイナグンイイジママチ,ヒッソリ,長野県,上伊那郡飯島町,日曽利,0,0,0,0,0,0
20384,39937,3993704,ナガノケン,カミイナグンイイジママチ,ホンゴウ,長野県,上伊那郡飯島町,本郷,0,0,0,0,0,0
20385,39945,3994511,ナガノケン,カミイナグンミナミミノワムラ,ミナミミノワムライチエン,長野県,上伊那郡南箕輪村,南箕輪村一円,0,0,0,0,0,0
20386,39938,3993800,ナガノケン,カミイナグンナカガワムラ,イカニケイサイガナイバアイ,長野県,上伊那郡中川村,以下に掲載がない場合,0,0,0,0,0,0
20386,39938,3993801,ナガノケン,カミイナグンナカガワムラ,オオクサ,長野県,上伊那郡中川村,大草,0,0,0,0,0,0
20386,39938,3993802,ナガノケン,カミイナグンナカガワムラ,カタギリ,長野県,上伊那郡中川村,片桐,0,0,0,0,0,0
20386,39938,3993803,ナガノケン,カミイナグンナカガワムラ,カツラシマ,長野県,上伊那郡中川村,葛島,0,0,0,0,0,0
20386,39938,3993804,ナガノケン,カミイナグンナカガワムラ,シトク,長野県,上伊那郡中川村,四徳,0,0,0,0,0,0
20388,39943,3994301,ナガノケン,カミイナグンミヤダムラ,ミヤダムライチエン,長野県,上伊那郡宮田村,宮田村一円,0,0,0,0,0,0
20402,39933,3993300,ナガノケン,シモイナグンマツカワマチ,イカニケイサイガナイバアイ,長野県,下伊那郡松川町,以下に掲載がない場合,0,0,0,0,0,0
20402,39933,3993302,ナガノケン,シモイナグンマツカワマチ,イクタ,長野県,下伊那郡松川町,生田,0,0,0,0,0,0
20402,39933,3993301,ナガノケン,シモイナグンマツカワマチ,カミカタギリ,長野県,下伊那郡松川町,上片桐,0,0,0,0,0,0
20403,39931,3993100,ナガノケン,シモイナグンタカモリマチ,イカニケイサイガナイバアイ,長野県,下伊那郡高森町,以下に掲載がない場合,0,0,0,0,0,0
20403,39931,3993107,ナガノケン,シモイナグンタカモリマチ,イズハラ,長野県,下伊那郡高森町,出原,0,0,0,0,0,0
20403,39931,3993105,ナガノケン,シモイナグンタカモリマチ,ウシマキ,長野県,下伊那郡高森町,牛牧,0,0,0,0,0,0
20403,39931,3993106,ナガノケン,シモイナグンタカモリマチ,オオジマサン,長野県,下伊那郡高森町,大島山,0,0,0,0,0,0
20403,39931,3993104,ナガノケン,シモイナグンタカモリマチ,カミイチダ,長野県,下伊那郡高森町,上市田,0,0,0,0,0,0
20403,39931,3993103,ナガノケン,シモイナグンタカモリマチ,シモイチダ,長野県,下伊那郡高森町,下市田,0,0,0,0,0,0
20403,39931,3993101,ナガノケン,シモイナグンタカモリマチ,ヤマブキ,長野県,下伊那郡高森町,山吹,0,0,0,0,0,0
20403,39931,3993102,ナガノケン,シモイナグンタカモリマチ,ヨシダ,長野県,下伊那郡高森町,吉田,0,0,0,0,0,0
20404,39915,3991500,ナガノケン,シモイナグンアナンチョウ,イカニケイサイガナイバアイ,長野県,下伊那郡阿南町,以下に掲載がない場合,0,0,0,0,0,0
20404,39915,3991501,ナガノケン,シモイナグンアナンチョウ,キタジョウ,長野県,下伊那郡阿南町,北條,0,0,0,0,0,0
20404,39915,3991505,ナガノケン,シモイナグンアナンチョウ,トミクサ,長野県,下伊那郡阿南町,富草,0,0,0,0,0,0
20404,39916,3991612,ナガノケン,シモイナグンアナンチョウ,ニイノ,長野県,下伊那郡阿南町,新野,0,0,0,0,0,0
20404,39915,3991504,ナガノケン,シモイナグンアナンチョウ,ニシジョウ,長野県,下伊那郡阿南町,西條,0,0,0,0,0,0
20404,39915,3991502,ナガノケン,シモイナグンアナンチョウ,ヒガシジョウ,長野県,下伊那郡阿南町,東條,0,0,0,0,0,0
20404,39915,3991503,ナガノケン,シモイナグンアナンチョウ,ミナミジョウ,長野県,下伊那郡阿南町,南條,0,0,0,0,0,0
20404,39916,3991611,ナガノケン,シモイナグンアナンチョウ,ワゴウ,長野県,下伊那郡阿南町,和合,0,0,0,0,0,0
20407,39503,3950300,ナガノケン,シモイナグンアチムラ,イカニケイサイガナイバアイ,長野県,下伊那郡阿智村,以下に掲載がない場合,0,0,0,0,0,0
20407,39503,3950301,ナガノケン,シモイナグンアチムラ,カスガ,長野県,下伊那郡阿智村,春日,0,0,0,0,0,0
20407,39503,3950302,ナガノケン,シモイナグンアチムラ,ゴカ,長野県,下伊那郡阿智村,伍和,0,0,0,0,0,0
20407,39503,3950305,ナガノケン,シモイナグンアチムラ,コマ,長野県,下伊那郡阿智村,駒,0,0,0,0,0,0
20407,39503,3950303,ナガノケン,シモイナグンアチムラ,コマバ,長野県,下伊那郡阿智村,駒場,0,0,0,0,0,0
20407,39504,3950401,ナガノケン,シモイナグンアチムラ,セイナイジ,長野県,下伊那郡阿智村,清内路,0,0,0,0,0,0
20407,39503,3950304,ナガノケン,シモイナグンアチムラ,チサト,長野県,下伊那郡阿智村,智里,0,0,0,0,0,0
20407,39505,3950501,ナガノケン,シモイナグンアチムラ,ナミアイ,長野県,下伊那郡阿智村,浪合,0,0,0,0,0,0
20409,39506,3950601,ナガノケン,シモイナグンヒラヤムラ,ヒラヤムライチエン,長野県,下伊那郡平谷村,平谷村一円,0,0,0,0,0,0
20410,39507,3950701,ナガノケン,シモイナグンネバムラ,ネバムライチエン,長野県,下伊那郡根羽村,根羽村一円,0,0,0,0,0,0
20411,39921,3992100,ナガノケン,シモイナグンシモジョウムラ,イカニケイサイガナイバアイ,長野県,下伊那郡下條村,以下に掲載がない場合,0,0,0,0,0,0
20411,39921,3992102,ナガノケン,シモイナグンシモジョウムラ,ヒサワ,長野県,下伊那郡下條村,陽皐,0,0,0,0,0,0
20411,39921,3992101,ナガノケン,シモイナグンシモジョウムラ,ムツザワ,長野県,下伊那郡下條村,睦沢,0,0,0,0,0,0
20412,39916,3991601,ナガノケン,シモイナグンウルギムラ,ウルギムライチエン,長野県,下伊那郡売木村,売木村一円,0,0,0,0,0,0
20413,39912,3991200,ナガノケン,シモイナグンテンリュウムラ,イカニケイサイガナイバアイ,長野県,下伊那郡天龍村,以下に掲載がない場合,0,0,0,0,0,0
20413,39912,3991202,ナガノケン,シモイナグンテンリュウムラ,カミハラ,長野県,下伊那郡天龍村,神原,0,0,0,0,0,0
20413,39912,3991203,ナガノケン,シモイナグンテンリュウムラ,ナガシマ,長野県,下伊那郡天龍村,長島,0,0,0,0,0,0
20413,39912,3991201,ナガノケン,シモイナグンテンリュウムラ,ヒラオカ,長野県,下伊那郡天龍村,平岡,0,0,0,0,0,0
20414,39918,3991801,ナガノケン,シモイナグンヤスオカムラ,ヤスオカムライチエン,長野県,下伊那郡泰阜村,泰阜村一円,0,0,0,0,0,0
20415,39511,3951100,ナガノケン,シモイナグンタカギムラ,イカニケイサイガナイバアイ,長野県,下伊那郡喬木村,以下に掲載がない場合,0,0,0,0,0,0
20415,39511,3951101,ナガノケン,シモイナグンタカギムラ,アジマ,長野県,下伊那郡喬木村,阿島,0,0,0,0,0,0
20415,39511,3951108,ナガノケン,シモイナグンタカギムラ,イクマ,長野県,下伊那郡喬木村,伊久間,0,0,0,0,0,0
20415,39511,3951104,ナガノケン,シモイナグンタカギムラ,ウジノリ,長野県,下伊那郡喬木村,氏乗,0,0,0,0,0,0
20415,39511,3951103,ナガノケン,シモイナグンタカギムラ,オオシマ,長野県,下伊那郡喬木村,大島,0,0,0,0,0,0
20415,39511,3951105,ナガノケン,シモイナグンタカギムラ,オオワチ,長野県,下伊那郡喬木村,大和知,0,0,0,0,0,0
20415,39511,3951107,ナガノケン,シモイナグンタカギムラ,オガワ,長野県,下伊那郡喬木村,小川,0,0,0,0,0,0
20415,39511,3951102,ナガノケン,シモイナグンタカギムラ,カカス,長野県,下伊那郡喬木村,加々須,0,0,0,0,0,0
20415,39511,3951106,ナガノケン,シモイナグンタカギムラ,トミダ,長野県,下伊那郡喬木村,富田,0,0,0,0,0,0
20416,39932,3993200,ナガノケン,シモイナグントヨオカムラ,イカニケイサイガナイバアイ,長野県,下伊那郡豊丘村,以下に掲載がない場合,0,0,0,0,0,0
20416,39933,3993311,ナガノケン,シモイナグントヨオカムラ,カワノ(7530-7590バンチ),長野県,下伊那郡豊丘村,河野(7530〜7590番地),1,0,0,0,0,0
20416,39932,3993201,ナガノケン,シモイナグントヨオカムラ,カワノ(ソノタ),長野県,下伊那郡豊丘村,河野(その他),1,0,0,0,0,0
20416,39932,3993202,ナガノケン,シモイナグントヨオカムラ,クマシロ,長野県,下伊那郡豊丘村,神稲,0,0,0,0,0,0
20417,39935,3993500,ナガノケン,シモイナグンオオシカムラ,イカニケイサイガナイバアイ,長野県,下伊那郡大鹿村,以下に掲載がない場合,0,0,0,0,0,0
20417,39935,3993502,ナガノケン,シモイナグンオオシカムラ,オオカワラ,長野県,下伊那郡大鹿村,大河原,0,0,0,0,0,0
20417,39935,3993501,ナガノケン,シモイナグンオオシカムラ,カシオ,長野県,下伊那郡大鹿村,鹿塩,0,0,0,0,0,0
20422,39956,3995600,ナガノケン,キソグンアゲマツマチ,イカニケイサイガナイバアイ,長野県,木曽郡上松町,以下に掲載がない場合,0,0,0,0,0,0
20422,39956,3995601,ナガノケン,キソグンアゲマツマチ,アゲマツ,長野県,木曽郡上松町,上松,0,0,0,0,0,0
20422,39956,3995609,ナガノケン,キソグンアゲマツマチ,アサヒマチ,長野県,木曽郡上松町,旭町,0,0,0,0,0,0
20422,39956,3995603,ナガノケン,キソグンアゲマツマチ,エキマエドオリ,長野県,木曽郡上松町,駅前通り,0,0,1,0,0,0
20422,39956,3995607,ナガノケン,キソグンアゲマツマチ,オガワ,長野県,木曽郡上松町,小川,0,0,0,0,0,0
20422,39956,3995608,ナガノケン,キソグンアゲマツマチ,オギハラ,長野県,木曽郡上松町,荻原,0,0,0,0,0,0
20422,39956,3995605,ナガノケン,キソグンアゲマツマチ,サカエマチ,長野県,木曽郡上松町,栄町,0,0,1,0,0,0
20422,39956,3995604,ナガノケン,キソグンアゲマツマチ,ショウジママチ,長野県,木曽郡上松町,正島町,0,0,1,0,0,0
20422,39956,3995602,ナガノケン,キソグンアゲマツマチ,ホンマチドオリ,長野県,木曽郡上松町,本町通り,0,0,1,0,0,0
20422,39956,3995606,ナガノケン,キソグンアゲマツマチ,ミドリマチ,長野県,木曽郡上松町,緑町,0,0,1,0,0,0
20423,39953,3995300,ナガノケン,キソグンナギソマチ,イカニケイサイガナイバアイ,長野県,木曽郡南木曽町,以下に掲載がない場合,0,0,0,0,0,0
20423,39953,3995302,ナガノケン,キソグンナギソマチ,アヅマ,長野県,木曽郡南木曽町,吾妻,0,0,0,0,0,0
20423,39953,3995303,ナガノケン,キソグンナギソマチ,タダチ,長野県,木曽郡南木曽町,田立,0,0,0,0,0,0
20423,39953,3995301,ナガノケン,キソグンナギソマチ,ヨミカキ,長野県,木曽郡南木曽町,読書,0,0,0,0,0,0
20425,39962,3996200,ナガノケン,キソグンキソムラ,イカニケイサイガナイバアイ,長野県,木曽郡木祖村,以下に掲載がない場合,0,0,0,0,0,0
20425,39962,3996203,ナガノケン,キソグンキソムラ,オギソ,長野県,木曽郡木祖村,小木曽,0,0,0,0,0,0
20425,39962,3996202,ナガノケン,キソグンキソムラ,スゲ,長野県,木曽郡木祖村,菅,0,0,0,0,0,0
20425,39962,3996201,ナガノケン,キソグンキソムラ,ヤブハラ,長野県,木曽郡木祖村,薮原,0,0,0,0,0,0
20429,39702,3970201,ナガノケン,キソグンオウタキムラ,オウタキムライチエン,長野県,木曽郡王滝村,王滝村一円,0,0,0,0,0,0
20430,39955,3995500,ナガノケン,キソグンオオクワムラ,イカニケイサイガナイバアイ,長野県,木曽郡大桑村,以下に掲載がない場合,0,0,0,0,0,0
20430,39955,3995502,ナガノケン,キソグンオオクワムラ,スハラ,長野県,木曽郡大桑村,須原,0,0,0,0,0,0
20430,39955,3995501,ナガノケン,キソグンオオクワムラ,トノ,長野県,木曽郡大桑村,殿,0,0,0,0,0,0
20430,39955,3995503,ナガノケン,キソグンオオクワムラ,ナガノ,長野県,木曽郡大桑村,長野,0,0,0,0,0,0
20430,39955,3995504,ナガノケン,キソグンオオクワムラ,ノジリ,長野県,木曽郡大桑村,野尻,0,0,0,0,0,0
20432,397,3970000,ナガノケン,キソグンキソマチ,イカニケイサイガナイバアイ,長野県,木曽郡木曽町,以下に掲載がない場合,0,0,0,0,0,0
20432,39703,3970301,ナガノケン,キソグンキソマチ,カイダコウゲンスエカワ,長野県,木曽郡木曽町,開田高原末川,0,0,0,0,0,0
20432,39703,3970302,ナガノケン,キソグンキソマチ,カイダコウゲンニシノ,長野県,木曽郡木曽町,開田高原西野,0,0,0,0,0,0
20432,397,3970002,ナガノケン,キソグンキソマチ,シンカイ,長野県,木曽郡木曽町,新開,0,0,0,0,0,0
20432,397,3970003,ナガノケン,キソグンキソマチ,シンカイフク,長野県,木曽郡木曽町,新開福,0,0,0,0,0,0
20432,39961,3996101,ナガノケン,キソグンキソマチ,ヒヨシ,長野県,木曽郡木曽町,日義,0,0,0,0,0,0
20432,397,3970001,ナガノケン,キソグンキソマチ,フクシマ,長野県,木曽郡木曽町,福島,0,0,0,0,0,0
20432,39701,3970101,ナガノケン,キソグンキソマチ,ミタケ,長野県,木曽郡木曽町,三岳,0,0,0,0,0,0
20446,39977,3997700,ナガノケン,ヒガシチクマグンオミムラ,イカニケイサイガナイバアイ,長野県,東筑摩郡麻績村,以下に掲載がない場合,0,0,0,0,0,0
20446,39977,3997701,ナガノケン,ヒガシチクマグンオミムラ,オ,長野県,東筑摩郡麻績村,麻,0,0,0,0,0,0
20446,39977,3997702,ナガノケン,ヒガシチクマグンオミムラ,ヒ,長野県,東筑摩郡麻績村,日,0,0,0,0,0,0
20448,39972,3997200,ナガノケン,ヒガシチクマグンイクサカムラ,イカニケイサイガナイバアイ,長野県,東筑摩郡生坂村,以下に掲載がない場合,0,0,0,0,0,0
20448,39972,3997201,ナガノケン,ヒガシチクマグンイクサカムラ,イクサカムラノツギニバンチガクルバアイ,長野県,東筑摩郡生坂村,生坂村の次に番地がくる場合,0,0,0,0,0,0
20448,39972,3997202,ナガノケン,ヒガシチクマグンイクサカムラ,キタリクゴウ,長野県,東筑摩郡生坂村,北陸郷,0,0,0,0,0,0
20448,39973,3997311,ナガノケン,ヒガシチクマグンイクサカムラ,ヒガシヒロツ,長野県,東筑摩郡生坂村,東広津,0,0,0,0,0,0
20450,39013,3901301,ナガノケン,ヒガシチクマグンヤマガタムラ,ヤマガタムライチエン,長野県,東筑摩郡山形村,山形村一円,0,0,0,0,0,0
20451,39011,3901100,ナガノケン,ヒガシチクマグンアサヒムラ,イカニケイサイガナイバアイ,長野県,東筑摩郡朝日村,以下に掲載がない場合,0,0,0,0,0,0
20451,39011,3901102,ナガノケン,ヒガシチクマグンアサヒムラ,オノザワ,長野県,東筑摩郡朝日村,小野沢,0,0,0,0,0,0
20451,39011,3901104,ナガノケン,ヒガシチクマグンアサヒムラ,コミ,長野県,東筑摩郡朝日村,古見,0,0,0,0,0,0
20451,39011,3901101,ナガノケン,ヒガシチクマグンアサヒムラ,ニシセバ,長野県,東筑摩郡朝日村,西洗馬,0,0,0,0,0,0
20451,39011,3901103,ナガノケン,ヒガシチクマグンアサヒムラ,ハリオ,長野県,東筑摩郡朝日村,針尾,0,0,0,0,0,0
20452,39975,3997500,ナガノケン,ヒガシチクマグンチクホクムラ,イカニケイサイガナイバアイ,長野県,東筑摩郡筑北村,以下に掲載がない場合,0,0,0,0,0,0
20452,39975,3997503,ナガノケン,ヒガシチクマグンチクホクムラ,オオサワシンデン,長野県,東筑摩郡筑北村,大沢新田,0,0,0,0,0,0
20452,39977,3997711,ナガノケン,ヒガシチクマグンチクホクムラ,サカイ,長野県,東筑摩郡筑北村,坂井,0,0,0,0,0,0
20452,39976,3997601,ナガノケン,ヒガシチクマグンチクホクムラ,サカキタ,長野県,東筑摩郡筑北村,坂北,0,0,0,0,0,0
20452,39975,3997501,ナガノケン,ヒガシチクマグンチクホクムラ,ニシジョウ,長野県,東筑摩郡筑北村,西条,0,0,0,0,0,0
20452,39975,3997502,ナガノケン,ヒガシチクマグンチクホクムラ,ヒガシジョウ,長野県,東筑摩郡筑北村,東条,0,0,0,0,0,0
20452,39975,3997504,ナガノケン,ヒガシチクマグンチクホクムラ,ミダレハシ,長野県,東筑摩郡筑北村,乱橋,0,0,0,0,0,0
20481,39986,3998600,ナガノケン,キタアヅミグンイケダマチ,イカニケイサイガナイバアイ,長野県,北安曇郡池田町,以下に掲載がない場合,0,0,0,0,0,0
20481,39986,3998602,ナガノケン,キタアヅミグンイケダマチ,アイソメ,長野県,北安曇郡池田町,会染,0,0,0,0,0,0
20481,39986,3998601,ナガノケン,キタアヅミグンイケダマチ,イケダ,長野県,北安曇郡池田町,池田,0,0,0,0,0,0
20481,39986,3998603,ナガノケン,キタアヅミグンイケダマチ,ナカウ,長野県,北安曇郡池田町,中鵜,0,0,0,0,0,0
20481,39986,3998604,ナガノケン,キタアヅミグンイケダマチ,ヒロツ,長野県,北安曇郡池田町,広津,0,0,0,0,0,0
20481,39986,3998605,ナガノケン,キタアヅミグンイケダマチ,リクゴウ,長野県,北安曇郡池田町,陸郷,0,0,0,0,0,0
20482,39985,3998501,ナガノケン,キタアヅミグンマツカワムラ,マツカワムライチエン,長野県,北安曇郡松川村,松川村一円,0,0,0,0,0,0
20485,39993,3999300,ナガノケン,キタアヅミグンハクバムラ,イカニケイサイガナイバアイ,長野県,北安曇郡白馬村,以下に掲載がない場合,0,0,0,0,0,0
20485,39992,3999211,ナガノケン,キタアヅミグンハクバムラ,カミシロ,長野県,北安曇郡白馬村,神城,0,0,0,0,0,0
20485,39993,3999301,ナガノケン,キタアヅミグンハクバムラ,ホクジョウ,長野県,北安曇郡白馬村,北城,0,0,0,0,0,0
20486,39994,3999400,ナガノケン,キタアヅミグンオタリムラ,イカニケイサイガナイバアイ,長野県,北安曇郡小谷村,以下に掲載がない場合,0,0,0,0,0,0
20486,39996,3999601,ナガノケン,キタアヅミグンオタリムラ,キタオタリ,長野県,北安曇郡小谷村,北小谷,0,0,0,0,0,0
20486,39994,3999422,ナガノケン,キタアヅミグンオタリムラ,チクニオツ,長野県,北安曇郡小谷村,千国乙,0,0,0,0,0,0
20486,39994,3999421,ナガノケン,キタアヅミグンオタリムラ,ナカオタリヘイ,長野県,北安曇郡小谷村,中小谷丙,0,0,0,0,0,0
20486,39995,3999511,ナガノケン,キタアヅミグンオタリムラ,ナカツチ,長野県,北安曇郡小谷村,中土,0,0,0,0,0,0
21206,39951,5080502,ギフケン,ナカツガワシ,マゴメ,岐阜県,中津川市,馬籠,0,0,0,0,0,0
21206,39951,5080501,ギフケン,ナカツガワシ,ヤマグチ,岐阜県,中津川市,山口,0,0,0,0,0,0
20402,39933,3993304,ナガノケン,シモイナグンマツカワマチ,オオジマ,長野県,下伊那郡松川町,大島,0,0,0,0,1,5 39
20402,39933,3993303,ナガノケン,シモイナグンマツカワマチ,モトオオジマ,長野県,下伊那郡松川町,元大島,0,0,0,0,1,5 39
20202,399,3990036,ナガノケン,マツモトシ,ムライマチミナミ,長野県,松本市,村井町南,0,0,1,0,1,2 39 | [
"y.dream.nest@icloud.com"
] | y.dream.nest@icloud.com |
8315a7cb937f1b86b30bb22599caaae7a371e7c0 | 1b2407f35191917818ea7f276079aa8f62429770 | /nova/tests/unit/virt/test_hardware.py | 9d85366dd1edae4fdf8faabf36f4efe2a65ce33c | [
"Apache-2.0"
] | permissive | ISCAS-VDI/nova-base | 67838b54230d250b71fd1067c4a754afbc258883 | dbb6bba94f8a3eae5ed420d8af3431ab116c3fa7 | refs/heads/master | 2021-01-20T19:08:51.403722 | 2016-06-07T06:46:54 | 2016-06-07T06:46:54 | 60,588,545 | 0 | 1 | Apache-2.0 | 2020-07-24T00:41:15 | 2016-06-07T06:38:23 | Python | UTF-8 | Python | false | false | 122,166 | py | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import uuid
import mock
from oslo_serialization import jsonutils
import six
from nova import context
from nova import exception
from nova import objects
from nova.objects import base as base_obj
from nova.objects import fields
from nova.pci import stats
from nova import test
from nova.virt import hardware as hw
class InstanceInfoTests(test.NoDBTestCase):
def test_instance_info_default(self):
ii = hw.InstanceInfo()
self.assertIsNone(ii.state)
self.assertIsNone(ii.id)
self.assertEqual(0, ii.max_mem_kb)
self.assertEqual(0, ii.mem_kb)
self.assertEqual(0, ii.num_cpu)
self.assertEqual(0, ii.cpu_time_ns)
def test_instance_info(self):
ii = hw.InstanceInfo(state='fake-state',
max_mem_kb=1,
mem_kb=2,
num_cpu=3,
cpu_time_ns=4,
id='fake-id')
self.assertEqual('fake-state', ii.state)
self.assertEqual('fake-id', ii.id)
self.assertEqual(1, ii.max_mem_kb)
self.assertEqual(2, ii.mem_kb)
self.assertEqual(3, ii.num_cpu)
self.assertEqual(4, ii.cpu_time_ns)
def test_instance_infoi_equals(self):
ii1 = hw.InstanceInfo(state='fake-state',
max_mem_kb=1,
mem_kb=2,
num_cpu=3,
cpu_time_ns=4,
id='fake-id')
ii2 = hw.InstanceInfo(state='fake-state',
max_mem_kb=1,
mem_kb=2,
num_cpu=3,
cpu_time_ns=4,
id='fake-id')
ii3 = hw.InstanceInfo(state='fake-estat',
max_mem_kb=11,
mem_kb=22,
num_cpu=33,
cpu_time_ns=44,
id='fake-di')
self.assertEqual(ii1, ii2)
self.assertNotEqual(ii1, ii3)
class CpuSetTestCase(test.NoDBTestCase):
def test_get_vcpu_pin_set(self):
self.flags(vcpu_pin_set="1-3,5,^2")
cpuset_ids = hw.get_vcpu_pin_set()
self.assertEqual(set([1, 3, 5]), cpuset_ids)
def test_parse_cpu_spec_none_returns_none(self):
self.flags(vcpu_pin_set=None)
cpuset_ids = hw.get_vcpu_pin_set()
self.assertIsNone(cpuset_ids)
def test_parse_cpu_spec_valid_syntax_works(self):
cpuset_ids = hw.parse_cpu_spec("1")
self.assertEqual(set([1]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1,2")
self.assertEqual(set([1, 2]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec(", , 1 , ,, 2, ,")
self.assertEqual(set([1, 2]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1-1")
self.assertEqual(set([1]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec(" 1 - 1, 1 - 2 , 1 -3")
self.assertEqual(set([1, 2, 3]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1,^2")
self.assertEqual(set([1]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1-2, ^1")
self.assertEqual(set([2]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1-3,5,^2")
self.assertEqual(set([1, 3, 5]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec(" 1 - 3 , ^2, 5")
self.assertEqual(set([1, 3, 5]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec(" 1,1, ^1")
self.assertEqual(set([]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("^0-1")
self.assertEqual(set([]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("0-3,^1-2")
self.assertEqual(set([0, 3]), cpuset_ids)
def test_parse_cpu_spec_invalid_syntax_raises(self):
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
" -1-3,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3-,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"-3,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3,5,^2^")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3,5,^2-")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"--13,^^5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"a-3,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-a,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3,b,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3,5,^c")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"3 - 1, 5 , ^ 2 ")
def test_format_cpu_spec(self):
cpus = set([])
spec = hw.format_cpu_spec(cpus)
self.assertEqual("", spec)
cpus = []
spec = hw.format_cpu_spec(cpus)
self.assertEqual("", spec)
cpus = set([1, 3])
spec = hw.format_cpu_spec(cpus)
self.assertEqual("1,3", spec)
cpus = [1, 3]
spec = hw.format_cpu_spec(cpus)
self.assertEqual("1,3", spec)
cpus = set([1, 2, 4, 6])
spec = hw.format_cpu_spec(cpus)
self.assertEqual("1-2,4,6", spec)
cpus = [1, 2, 4, 6]
spec = hw.format_cpu_spec(cpus)
self.assertEqual("1-2,4,6", spec)
cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48])
spec = hw.format_cpu_spec(cpus)
self.assertEqual("10-11,13-16,19-20,40,42,48", spec)
cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]
spec = hw.format_cpu_spec(cpus)
self.assertEqual("10-11,13-16,19-20,40,42,48", spec)
cpus = set([1, 2, 4, 6])
spec = hw.format_cpu_spec(cpus, allow_ranges=False)
self.assertEqual("1,2,4,6", spec)
cpus = [1, 2, 4, 6]
spec = hw.format_cpu_spec(cpus, allow_ranges=False)
self.assertEqual("1,2,4,6", spec)
cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48])
spec = hw.format_cpu_spec(cpus, allow_ranges=False)
self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec)
cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]
spec = hw.format_cpu_spec(cpus, allow_ranges=False)
self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec)
class VCPUTopologyTest(test.NoDBTestCase):
def test_validate_config(self):
testdata = [
{ # Flavor sets preferred topology only
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
}),
"image": {
"properties": {}
},
"expect": (
8, 2, 1, 65536, 65536, 65536
)
},
{ # Image topology overrides flavor
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
"hw:cpu_max_threads": "2",
}),
"image": {
"properties": {
"hw_cpu_sockets": "4",
"hw_cpu_cores": "2",
"hw_cpu_threads": "2",
}
},
"expect": (
4, 2, 2, 65536, 65536, 2,
)
},
{ # Partial image topology overrides flavor
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_sockets": "2",
}
},
"expect": (
2, -1, -1, 65536, 65536, 65536,
)
},
{ # Restrict use of threads
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_threads": "2",
}),
"image": {
"properties": {
"hw_cpu_max_threads": "1",
}
},
"expect": (
-1, -1, -1, 65536, 65536, 1,
)
},
{ # Force use of at least two sockets
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {}
},
"expect": (
-1, -1, -1, 65536, 8, 1
)
},
{ # Image limits reduce flavor
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_cores": "4",
}
},
"expect": (
-1, -1, -1, 65536, 4, 1
)
},
{ # Image limits kill flavor preferred
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "2",
"hw:cpu_cores": "8",
"hw:cpu_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_cores": "4",
}
},
"expect": (
-1, -1, -1, 65536, 4, 65536
)
},
{ # Image limits cannot exceed flavor
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_cores": "16",
}
},
"expect": exception.ImageVCPULimitsRangeExceeded,
},
{ # Image preferred cannot exceed flavor
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_cores": "16",
}
},
"expect": exception.ImageVCPUTopologyRangeExceeded,
},
]
for topo_test in testdata:
image_meta = objects.ImageMeta.from_dict(topo_test["image"])
if type(topo_test["expect"]) == tuple:
(preferred,
maximum) = hw._get_cpu_topology_constraints(
topo_test["flavor"], image_meta)
self.assertEqual(topo_test["expect"][0], preferred.sockets)
self.assertEqual(topo_test["expect"][1], preferred.cores)
self.assertEqual(topo_test["expect"][2], preferred.threads)
self.assertEqual(topo_test["expect"][3], maximum.sockets)
self.assertEqual(topo_test["expect"][4], maximum.cores)
self.assertEqual(topo_test["expect"][5], maximum.threads)
else:
self.assertRaises(topo_test["expect"],
hw._get_cpu_topology_constraints,
topo_test["flavor"],
image_meta)
def test_possible_topologies(self):
testdata = [
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 8,
"maxcores": 8,
"maxthreads": 2,
"expect": [
[8, 1, 1],
[4, 2, 1],
[2, 4, 1],
[1, 8, 1],
[4, 1, 2],
[2, 2, 2],
[1, 4, 2],
]
},
{
"allow_threads": False,
"vcpus": 8,
"maxsockets": 8,
"maxcores": 8,
"maxthreads": 2,
"expect": [
[8, 1, 1],
[4, 2, 1],
[2, 4, 1],
[1, 8, 1],
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1024,
"maxthreads": 2,
"expect": [
[8, 1, 1],
[4, 2, 1],
[2, 4, 1],
[1, 8, 1],
[4, 1, 2],
[2, 2, 2],
[1, 4, 2],
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1,
"maxthreads": 2,
"expect": [
[8, 1, 1],
[4, 1, 2],
]
},
{
"allow_threads": True,
"vcpus": 7,
"maxsockets": 8,
"maxcores": 8,
"maxthreads": 2,
"expect": [
[7, 1, 1],
[1, 7, 1],
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 2,
"maxcores": 1,
"maxthreads": 1,
"expect": exception.ImageVCPULimitsRangeImpossible,
},
{
"allow_threads": False,
"vcpus": 8,
"maxsockets": 2,
"maxcores": 1,
"maxthreads": 4,
"expect": exception.ImageVCPULimitsRangeImpossible,
},
]
for topo_test in testdata:
if type(topo_test["expect"]) == list:
actual = []
for topology in hw._get_possible_cpu_topologies(
topo_test["vcpus"],
objects.VirtCPUTopology(
sockets=topo_test["maxsockets"],
cores=topo_test["maxcores"],
threads=topo_test["maxthreads"]),
topo_test["allow_threads"]):
actual.append([topology.sockets,
topology.cores,
topology.threads])
self.assertEqual(topo_test["expect"], actual)
else:
self.assertRaises(topo_test["expect"],
hw._get_possible_cpu_topologies,
topo_test["vcpus"],
objects.VirtCPUTopology(
sockets=topo_test["maxsockets"],
cores=topo_test["maxcores"],
threads=topo_test["maxthreads"]),
topo_test["allow_threads"])
def test_sorting_topologies(self):
testdata = [
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 8,
"maxcores": 8,
"maxthreads": 2,
"sockets": 4,
"cores": 2,
"threads": 1,
"expect": [
[4, 2, 1], # score = 2
[8, 1, 1], # score = 1
[2, 4, 1], # score = 1
[1, 8, 1], # score = 1
[4, 1, 2], # score = 1
[2, 2, 2], # score = 1
[1, 4, 2], # score = 1
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1024,
"maxthreads": 2,
"sockets": -1,
"cores": 4,
"threads": -1,
"expect": [
[2, 4, 1], # score = 1
[1, 4, 2], # score = 1
[8, 1, 1], # score = 0
[4, 2, 1], # score = 0
[1, 8, 1], # score = 0
[4, 1, 2], # score = 0
[2, 2, 2], # score = 0
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1,
"maxthreads": 2,
"sockets": -1,
"cores": -1,
"threads": 2,
"expect": [
[4, 1, 2], # score = 1
[8, 1, 1], # score = 0
]
},
{
"allow_threads": False,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1,
"maxthreads": 2,
"sockets": -1,
"cores": -1,
"threads": 2,
"expect": [
[8, 1, 1], # score = 0
]
},
]
for topo_test in testdata:
actual = []
possible = hw._get_possible_cpu_topologies(
topo_test["vcpus"],
objects.VirtCPUTopology(sockets=topo_test["maxsockets"],
cores=topo_test["maxcores"],
threads=topo_test["maxthreads"]),
topo_test["allow_threads"])
tops = hw._sort_possible_cpu_topologies(
possible,
objects.VirtCPUTopology(sockets=topo_test["sockets"],
cores=topo_test["cores"],
threads=topo_test["threads"]))
for topology in tops:
actual.append([topology.sockets,
topology.cores,
topology.threads])
self.assertEqual(topo_test["expect"], actual)
def test_best_config(self):
testdata = [
{ # Flavor sets preferred topology only
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1"
}),
"image": {
"properties": {}
},
"expect": [8, 2, 1],
},
{ # Image topology overrides flavor
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
"hw:cpu_maxthreads": "2",
}),
"image": {
"properties": {
"hw_cpu_sockets": "4",
"hw_cpu_cores": "2",
"hw_cpu_threads": "2",
}
},
"expect": [4, 2, 2],
},
{ # Image topology overrides flavor
"allow_threads": False,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
"hw:cpu_maxthreads": "2",
}),
"image": {
"properties": {
"hw_cpu_sockets": "4",
"hw_cpu_cores": "2",
"hw_cpu_threads": "2",
}
},
"expect": [8, 2, 1],
},
{ # Partial image topology overrides flavor
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1"
}),
"image": {
"properties": {
"hw_cpu_sockets": "2"
}
},
"expect": [2, 8, 1],
},
{ # Restrict use of threads
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_threads": "1"
}),
"image": {
"properties": {}
},
"expect": [16, 1, 1]
},
{ # Force use of at least two sockets
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {}
},
"expect": [16, 1, 1]
},
{ # Image limits reduce flavor
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_sockets": "8",
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_sockets": 4,
}
},
"expect": [4, 4, 1]
},
{ # Image limits kill flavor preferred
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "2",
"hw:cpu_cores": "8",
"hw:cpu_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_cores": 4,
}
},
"expect": [16, 1, 1]
},
{ # NUMA needs threads, only cores requested by flavor
"allow_threads": True,
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_cores": "2",
}),
"image": {
"properties": {
"hw_cpu_max_cores": 2,
}
},
"numa_topology": objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=1024,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=2)),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]), memory=1024)]),
"expect": [1, 2, 2]
},
{ # NUMA needs threads, but more than requested by flavor - the
# least amount of threads wins
"allow_threads": True,
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_threads": "2",
}),
"image": {
"properties": {}
},
"numa_topology": objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=4))]),
"expect": [2, 1, 2]
},
{ # NUMA needs threads, but more than limit in flavor - the
# least amount of threads which divides into the vcpu
# count wins. So with desired 4, max of 3, and
# vcpu count of 4, we should get 2 threads.
"allow_threads": True,
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_max_sockets": "5",
"hw:cpu_max_cores": "2",
"hw:cpu_max_threads": "3",
}),
"image": {
"properties": {}
},
"numa_topology": objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=4))]),
"expect": [2, 1, 2]
},
{ # NUMA needs threads, but thread count does not
# divide into flavor vcpu count, so we must
# reduce thread count to closest divisor
"allow_threads": True,
"flavor": objects.Flavor(vcpus=6, memory_mb=2048,
extra_specs={
}),
"image": {
"properties": {}
},
"numa_topology": objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=4))]),
"expect": [2, 1, 3]
},
{ # NUMA needs different number of threads per cell - the least
# amount of threads wins
"allow_threads": True,
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={}),
"image": {
"properties": {}
},
"numa_topology": objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=1024,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=2, threads=2)),
objects.InstanceNUMACell(
id=1, cpuset=set([4, 5, 6, 7]), memory=1024,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=4))]),
"expect": [4, 1, 2]
},
]
for topo_test in testdata:
image_meta = objects.ImageMeta.from_dict(topo_test["image"])
topology = hw._get_desirable_cpu_topologies(
topo_test["flavor"],
image_meta,
topo_test["allow_threads"],
topo_test.get("numa_topology"))[0]
self.assertEqual(topo_test["expect"][0], topology.sockets)
self.assertEqual(topo_test["expect"][1], topology.cores)
self.assertEqual(topo_test["expect"][2], topology.threads)
class NUMATopologyTest(test.NoDBTestCase):
def test_topology_constraints(self):
testdata = [
{
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
}),
"image": {
},
"expect": None,
},
{
"flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={
"hw:numa_nodes": 2
}),
"image": {
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=1024),
objects.InstanceNUMACell(
id=1, cpuset=set([4, 5, 6, 7]), memory=1024),
]),
},
{
"flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={
"hw:mem_page_size": 2048
}),
"image": {
},
"expect": objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=2048, pagesize=2048)
]),
},
{
# vcpus is not a multiple of nodes, so it
# is an error to not provide cpu/mem mapping
"flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={
"hw:numa_nodes": 3
}),
"image": {
},
"expect": exception.ImageNUMATopologyAsymmetric,
},
{
"flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={
"hw:numa_nodes": 3,
"hw:numa_cpus.0": "0-3",
"hw:numa_mem.0": "1024",
"hw:numa_cpus.1": "4,6",
"hw:numa_mem.1": "512",
"hw:numa_cpus.2": "5,7",
"hw:numa_mem.2": "512",
}),
"image": {
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=1024),
objects.InstanceNUMACell(
id=1, cpuset=set([4, 6]), memory=512),
objects.InstanceNUMACell(
id=2, cpuset=set([5, 7]), memory=512)
]),
},
{
"flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={
}),
"image": {
"properties": {
"hw_numa_nodes": 3,
"hw_numa_cpus.0": "0-3",
"hw_numa_mem.0": "1024",
"hw_numa_cpus.1": "4,6",
"hw_numa_mem.1": "512",
"hw_numa_cpus.2": "5,7",
"hw_numa_mem.2": "512",
},
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=1024),
objects.InstanceNUMACell(
id=1, cpuset=set([4, 6]), memory=512),
objects.InstanceNUMACell(
id=2, cpuset=set([5, 7]), memory=512)
]),
},
{
# Request a CPU that is out of range
# wrt vCPU count
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 1,
"hw:numa_cpus.0": "0-16",
"hw:numa_mem.0": "2048",
}),
"image": {
},
"expect": exception.ImageNUMATopologyCPUOutOfRange,
},
{
# Request the same CPU in two nodes
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-7",
"hw:numa_mem.0": "1024",
"hw:numa_cpus.1": "0-7",
"hw:numa_mem.1": "1024",
}),
"image": {
},
"expect": exception.ImageNUMATopologyCPUDuplicates,
},
{
# Request with some CPUs not assigned
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-2",
"hw:numa_mem.0": "1024",
"hw:numa_cpus.1": "3-4",
"hw:numa_mem.1": "1024",
}),
"image": {
},
"expect": exception.ImageNUMATopologyCPUsUnassigned,
},
{
# Request too little memory vs flavor total
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-3",
"hw:numa_mem.0": "512",
"hw:numa_cpus.1": "4-7",
"hw:numa_mem.1": "512",
}),
"image": {
},
"expect": exception.ImageNUMATopologyMemoryOutOfRange,
},
{
# Request too much memory vs flavor total
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-3",
"hw:numa_mem.0": "1576",
"hw:numa_cpus.1": "4-7",
"hw:numa_mem.1": "1576",
}),
"image": {
},
"expect": exception.ImageNUMATopologyMemoryOutOfRange,
},
{
# Request missing mem.0
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-3",
"hw:numa_mem.1": "1576",
}),
"image": {
},
"expect": exception.ImageNUMATopologyIncomplete,
},
{
# Request missing cpu.0
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_mem.0": "1576",
"hw:numa_cpus.1": "4-7",
}),
"image": {
},
"expect": exception.ImageNUMATopologyIncomplete,
},
{
# Image attempts to override flavor
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
}),
"image": {
"properties": {
"hw_numa_nodes": 4}
},
"expect": exception.ImageNUMATopologyForbidden,
},
{
# NUMA + CPU pinning requested in the flavor
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:cpu_policy": fields.CPUAllocationPolicy.DEDICATED
}),
"image": {
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
},
{
# no NUMA + CPU pinning requested in the flavor
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_policy": fields.CPUAllocationPolicy.DEDICATED
}),
"image": {
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
},
{
# NUMA + CPU pinning requested in the image
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2
}),
"image": {
"properties": {
"hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED
}},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
},
{
# no NUMA + CPU pinning requested in the image
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={}),
"image": {
"properties": {
"hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED
}},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
},
{
# Invalid CPU pinning override
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:cpu_policy": fields.CPUAllocationPolicy.SHARED
}),
"image": {
"properties": {
"hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED}
},
"expect": exception.ImageCPUPinningForbidden,
},
{
# Invalid CPU pinning policy with realtime
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_policy": fields.CPUAllocationPolicy.SHARED,
"hw:cpu_realtime": "yes",
}),
"image": {
"properties": {}
},
"expect": exception.RealtimeConfigurationInvalid,
},
{
# Invalid CPU thread pinning override
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:cpu_policy": fields.CPUAllocationPolicy.DEDICATED,
"hw:cpu_thread_policy":
fields.CPUThreadAllocationPolicy.ISOLATE,
}),
"image": {
"properties": {
"hw_cpu_policy": fields.CPUAllocationPolicy.DEDICATED,
"hw_cpu_thread_policy":
fields.CPUThreadAllocationPolicy.REQUIRE,
}
},
"expect": exception.ImageCPUThreadPolicyForbidden,
},
{
# CPU thread pinning override set to default value
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 1,
"hw:cpu_policy": fields.CPUAllocationPolicy.DEDICATED,
"hw:cpu_thread_policy":
fields.CPUThreadAllocationPolicy.PREFER,
}),
"image": {},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=
fields.CPUThreadAllocationPolicy.PREFER)])
},
{
# Invalid CPU pinning policy with CPU thread pinning
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_policy": fields.CPUAllocationPolicy.SHARED,
"hw:cpu_thread_policy":
fields.CPUThreadAllocationPolicy.ISOLATE,
}),
"image": {
"properties": {}
},
"expect": exception.CPUThreadPolicyConfigurationInvalid,
},
{
# Invalid vCPUs mask with realtime
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_policy": "dedicated",
"hw:cpu_realtime": "yes",
}),
"image": {
"properties": {}
},
"expect": exception.RealtimeMaskNotFoundOrInvalid,
},
]
for testitem in testdata:
image_meta = objects.ImageMeta.from_dict(testitem["image"])
if testitem["expect"] is None:
topology = hw.numa_get_constraints(
testitem["flavor"], image_meta)
self.assertIsNone(topology)
elif type(testitem["expect"]) == type:
self.assertRaises(testitem["expect"],
hw.numa_get_constraints,
testitem["flavor"],
image_meta)
else:
topology = hw.numa_get_constraints(
testitem["flavor"], image_meta)
self.assertIsNotNone(topology)
self.assertEqual(len(testitem["expect"].cells),
len(topology.cells))
for i in range(len(topology.cells)):
self.assertEqual(testitem["expect"].cells[i].id,
topology.cells[i].id)
self.assertEqual(testitem["expect"].cells[i].cpuset,
topology.cells[i].cpuset)
self.assertEqual(testitem["expect"].cells[i].memory,
topology.cells[i].memory)
self.assertEqual(testitem["expect"].cells[i].pagesize,
topology.cells[i].pagesize)
self.assertEqual(testitem["expect"].cells[i].cpu_pinning,
topology.cells[i].cpu_pinning)
def test_host_usage_contiguous(self):
hpages0_4K = objects.NUMAPagesTopology(size_kb=4, total=256, used=0)
hpages0_2M = objects.NUMAPagesTopology(size_kb=2048, total=0, used=1)
hpages1_4K = objects.NUMAPagesTopology(size_kb=4, total=128, used=2)
hpages1_2M = objects.NUMAPagesTopology(size_kb=2048, total=0, used=3)
hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[
hpages0_4K, hpages0_2M],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([4, 6]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[
hpages1_4K, hpages1_2M],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=2, cpuset=set([5, 7]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
instance1 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=256),
objects.InstanceNUMACell(id=1, cpuset=set([4]), memory=256),
])
instance2 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256),
objects.InstanceNUMACell(id=1, cpuset=set([5, 7]), memory=256),
])
hostusage = hw.numa_usage_from_instances(
hosttopo, [instance1, instance2])
self.assertEqual(len(hosttopo), len(hostusage))
self.assertIsInstance(hostusage.cells[0], objects.NUMACell)
self.assertEqual(hosttopo.cells[0].cpuset,
hostusage.cells[0].cpuset)
self.assertEqual(hosttopo.cells[0].memory,
hostusage.cells[0].memory)
self.assertEqual(hostusage.cells[0].cpu_usage, 5)
self.assertEqual(hostusage.cells[0].memory_usage, 512)
self.assertEqual(hostusage.cells[0].mempages, [
hpages0_4K, hpages0_2M])
self.assertIsInstance(hostusage.cells[1], objects.NUMACell)
self.assertEqual(hosttopo.cells[1].cpuset,
hostusage.cells[1].cpuset)
self.assertEqual(hosttopo.cells[1].memory,
hostusage.cells[1].memory)
self.assertEqual(hostusage.cells[1].cpu_usage, 3)
self.assertEqual(hostusage.cells[1].memory_usage, 512)
self.assertEqual(hostusage.cells[1].mempages, [
hpages1_4K, hpages1_2M])
self.assertEqual(256, hpages0_4K.total)
self.assertEqual(0, hpages0_4K.used)
self.assertEqual(0, hpages0_2M.total)
self.assertEqual(1, hpages0_2M.used)
self.assertIsInstance(hostusage.cells[2], objects.NUMACell)
self.assertEqual(hosttopo.cells[2].cpuset,
hostusage.cells[2].cpuset)
self.assertEqual(hosttopo.cells[2].memory,
hostusage.cells[2].memory)
self.assertEqual(hostusage.cells[2].cpu_usage, 0)
self.assertEqual(hostusage.cells[2].memory_usage, 0)
self.assertEqual(128, hpages1_4K.total)
self.assertEqual(2, hpages1_4K.used)
self.assertEqual(0, hpages1_2M.total)
self.assertEqual(3, hpages1_2M.used)
def test_host_usage_sparse(self):
hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=5, cpuset=set([4, 6]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=6, cpuset=set([5, 7]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
instance1 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=256),
objects.InstanceNUMACell(id=6, cpuset=set([4]), memory=256),
])
instance2 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256,
cpu_usage=0, memory_usage=0, mempages=[]),
objects.InstanceNUMACell(id=5, cpuset=set([5, 7]), memory=256,
cpu_usage=0, memory_usage=0, mempages=[]),
])
hostusage = hw.numa_usage_from_instances(
hosttopo, [instance1, instance2])
self.assertEqual(len(hosttopo), len(hostusage))
self.assertIsInstance(hostusage.cells[0], objects.NUMACell)
self.assertEqual(hosttopo.cells[0].id,
hostusage.cells[0].id)
self.assertEqual(hosttopo.cells[0].cpuset,
hostusage.cells[0].cpuset)
self.assertEqual(hosttopo.cells[0].memory,
hostusage.cells[0].memory)
self.assertEqual(hostusage.cells[0].cpu_usage, 5)
self.assertEqual(hostusage.cells[0].memory_usage, 512)
self.assertIsInstance(hostusage.cells[1], objects.NUMACell)
self.assertEqual(hosttopo.cells[1].id,
hostusage.cells[1].id)
self.assertEqual(hosttopo.cells[1].cpuset,
hostusage.cells[1].cpuset)
self.assertEqual(hosttopo.cells[1].memory,
hostusage.cells[1].memory)
self.assertEqual(hostusage.cells[1].cpu_usage, 2)
self.assertEqual(hostusage.cells[1].memory_usage, 256)
self.assertIsInstance(hostusage.cells[2], objects.NUMACell)
self.assertEqual(hosttopo.cells[2].cpuset,
hostusage.cells[2].cpuset)
self.assertEqual(hosttopo.cells[2].memory,
hostusage.cells[2].memory)
self.assertEqual(hostusage.cells[2].cpu_usage, 1)
self.assertEqual(hostusage.cells[2].memory_usage, 256)
def test_host_usage_culmulative_with_free(self):
hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024,
cpu_usage=2, memory_usage=512, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([4, 6]), memory=512,
cpu_usage=1, memory_usage=512, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=2, cpuset=set([5, 7]), memory=256,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
instance1 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=256),
objects.InstanceNUMACell(id=2, cpuset=set([4]), memory=256)])
hostusage = hw.numa_usage_from_instances(
hosttopo, [instance1])
self.assertIsInstance(hostusage.cells[0], objects.NUMACell)
self.assertEqual(hostusage.cells[0].cpu_usage, 5)
self.assertEqual(hostusage.cells[0].memory_usage, 1024)
self.assertIsInstance(hostusage.cells[1], objects.NUMACell)
self.assertEqual(hostusage.cells[1].cpu_usage, 2)
self.assertEqual(hostusage.cells[1].memory_usage, 768)
self.assertIsInstance(hostusage.cells[2], objects.NUMACell)
self.assertEqual(hostusage.cells[2].cpu_usage, 1)
self.assertEqual(hostusage.cells[2].memory_usage, 256)
# Test freeing of resources
hostusage = hw.numa_usage_from_instances(
hostusage, [instance1], free=True)
self.assertEqual(hostusage.cells[0].cpu_usage, 2)
self.assertEqual(hostusage.cells[0].memory_usage, 512)
self.assertEqual(hostusage.cells[1].cpu_usage, 1)
self.assertEqual(hostusage.cells[1].memory_usage, 512)
self.assertEqual(hostusage.cells[2].cpu_usage, 0)
self.assertEqual(hostusage.cells[2].memory_usage, 0)
def _topo_usage_reserved_page_size(self):
reserved = hw.numa_get_reserved_huge_pages()
hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([0, 1]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[
objects.NUMAPagesTopology(
size_kb=2048,
total=512,
used=128,
reserved=reserved[0][2048])],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([2, 3]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[
objects.NUMAPagesTopology(
size_kb=1048576,
total=5,
used=2,
reserved=reserved[1][1048576])],
siblings=[], pinned_cpus=set([])),
])
instance1 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=256, pagesize=2048),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]), memory=1024, pagesize=1048576),
])
return hosttopo, instance1
def test_numa_get_reserved_huge_pages(self):
reserved = hw.numa_get_reserved_huge_pages()
self.assertEqual({}, reserved)
self.flags(reserved_huge_pages=[
{'node': 3, 'size': 2048, 'count': 128},
{'node': 3, 'size': '1GB', 'count': 4},
{'node': 6, 'size': '2MB', 'count': 64},
{'node': 9, 'size': '1GB', 'count': 1}])
reserved = hw.numa_get_reserved_huge_pages()
self.assertEqual({2048: 128, 1048576: 4}, reserved[3])
self.assertEqual({2048: 64}, reserved[6])
self.assertEqual({1048576: 1}, reserved[9])
def test_reserved_hugepgaes_success(self):
self.flags(reserved_huge_pages=[
{'node': 0, 'size': 2048, 'count': 128},
{'node': 1, 'size': 1048576, 'count': 1}])
hosttopo, instance1 = self._topo_usage_reserved_page_size()
hostusage = hw.numa_usage_from_instances(
hosttopo, [instance1])
self.assertEqual(hostusage.cells[0].mempages[0].size_kb, 2048)
self.assertEqual(hostusage.cells[0].mempages[0].total, 512)
self.assertEqual(hostusage.cells[0].mempages[0].used, 256)
# 128 already used + 128 used by instance + 128 reserved
self.assertEqual(hostusage.cells[0].mempages[0].free, 128)
self.assertEqual(hostusage.cells[1].mempages[0].size_kb, 1048576)
self.assertEqual(hostusage.cells[1].mempages[0].total, 5)
self.assertEqual(hostusage.cells[1].mempages[0].used, 3)
# 2 already used + 1 used by instance + 1 reserved
self.assertEqual(hostusage.cells[1].mempages[0].free, 1)
def test_reserved_huge_pages_invalid_format(self):
self.flags(reserved_huge_pages=[{'node': 0, 'size': 2048}])
self.assertRaises(
exception.InvalidReservedMemoryPagesOption,
self._topo_usage_reserved_page_size)
def test_reserved_huge_pages_invalid_value(self):
self.flags(reserved_huge_pages=["0:foo:bar"])
self.assertRaises(
exception.InvalidReservedMemoryPagesOption,
self._topo_usage_reserved_page_size)
def test_topo_usage_none(self):
hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([0, 1]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([2, 3]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
instance1 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256),
objects.InstanceNUMACell(id=2, cpuset=set([2]), memory=256),
])
hostusage = hw.numa_usage_from_instances(
None, [instance1])
self.assertIsNone(hostusage)
hostusage = hw.numa_usage_from_instances(
hosttopo, [])
self.assertEqual(hostusage.cells[0].cpu_usage, 0)
self.assertEqual(hostusage.cells[0].memory_usage, 0)
self.assertEqual(hostusage.cells[1].cpu_usage, 0)
self.assertEqual(hostusage.cells[1].memory_usage, 0)
hostusage = hw.numa_usage_from_instances(
hosttopo, None)
self.assertEqual(hostusage.cells[0].cpu_usage, 0)
self.assertEqual(hostusage.cells[0].memory_usage, 0)
self.assertEqual(hostusage.cells[1].cpu_usage, 0)
self.assertEqual(hostusage.cells[1].memory_usage, 0)
def assertNUMACellMatches(self, expected_cell, got_cell):
attrs = ('cpuset', 'memory', 'id')
if isinstance(expected_cell, objects.NUMATopology):
attrs += ('cpu_usage', 'memory_usage')
for attr in attrs:
self.assertEqual(getattr(expected_cell, attr),
getattr(got_cell, attr))
def test_json(self):
expected = objects.NUMATopology(
cells=[
objects.NUMACell(id=1, cpuset=set([1, 2]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=2, cpuset=set([3, 4]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([]))])
got = objects.NUMATopology.obj_from_db_obj(expected._to_json())
for exp_cell, got_cell in zip(expected.cells, got.cells):
self.assertNUMACellMatches(exp_cell, got_cell)
class VirtNUMATopologyCellUsageTestCase(test.NoDBTestCase):
def test_fit_instance_cell_success_no_limit(self):
host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))
instance_cell = objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=1024)
fitted_cell = hw._numa_fit_instance_cell(host_cell, instance_cell)
self.assertIsInstance(fitted_cell, objects.InstanceNUMACell)
self.assertEqual(host_cell.id, fitted_cell.id)
def test_fit_instance_cell_success_w_limit(self):
host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024,
cpu_usage=2,
memory_usage=1024,
mempages=[], siblings=[],
pinned_cpus=set([]))
limit_cell = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
instance_cell = objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=1024)
fitted_cell = hw._numa_fit_instance_cell(
host_cell, instance_cell, limit_cell=limit_cell)
self.assertIsInstance(fitted_cell, objects.InstanceNUMACell)
self.assertEqual(host_cell.id, fitted_cell.id)
def test_fit_instance_cell_self_overcommit(self):
host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))
limit_cell = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
instance_cell = objects.InstanceNUMACell(
id=0, cpuset=set([1, 2, 3]), memory=4096)
fitted_cell = hw._numa_fit_instance_cell(
host_cell, instance_cell, limit_cell=limit_cell)
self.assertIsNone(fitted_cell)
def test_fit_instance_cell_fail_w_limit(self):
host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024,
cpu_usage=2,
memory_usage=1024,
mempages=[], siblings=[],
pinned_cpus=set([]))
instance_cell = objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=4096)
limit_cell = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
fitted_cell = hw._numa_fit_instance_cell(
host_cell, instance_cell, limit_cell=limit_cell)
self.assertIsNone(fitted_cell)
instance_cell = objects.InstanceNUMACell(
id=0, cpuset=set([1, 2, 3, 4, 5]), memory=1024)
fitted_cell = hw._numa_fit_instance_cell(
host_cell, instance_cell, limit_cell=limit_cell)
self.assertIsNone(fitted_cell)
class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
def setUp(self):
super(VirtNUMAHostTopologyTestCase, self).setUp()
self.host = objects.NUMATopology(
cells=[
objects.NUMACell(id=1, cpuset=set([1, 2]), memory=2048,
cpu_usage=2, memory_usage=2048,
mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(id=2, cpuset=set([3, 4]), memory=2048,
cpu_usage=2, memory_usage=2048,
mempages=[], siblings=[],
pinned_cpus=set([]))])
self.limits = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
self.instance1 = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=2048)])
self.instance2 = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1, 2, 3, 4]), memory=1024)])
self.instance3 = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=1024)])
def test_get_fitting_success_no_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host, self.instance1)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.host = hw.numa_usage_from_instances(self.host,
[fitted_instance1])
fitted_instance2 = hw.numa_fit_instance_to_host(
self.host, self.instance3)
self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology)
def test_get_fitting_success_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
self.host, self.instance3, self.limits)
self.assertIsInstance(fitted_instance, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance.cells[0].id)
def test_get_fitting_fails_no_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
self.host, self.instance2, self.limits)
self.assertIsNone(fitted_instance)
def test_get_fitting_culmulative_fails_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host, self.instance1, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instances(self.host,
[fitted_instance1])
fitted_instance2 = hw.numa_fit_instance_to_host(
self.host, self.instance2, self.limits)
self.assertIsNone(fitted_instance2)
def test_get_fitting_culmulative_success_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host, self.instance1, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instances(self.host,
[fitted_instance1])
fitted_instance2 = hw.numa_fit_instance_to_host(
self.host, self.instance3, self.limits)
self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology)
self.assertEqual(2, fitted_instance2.cells[0].id)
def test_get_fitting_pci_success(self):
pci_request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': '8086'}])
pci_reqs = [pci_request]
pci_stats = stats.PciDeviceStats()
with mock.patch.object(stats.PciDeviceStats,
'support_requests', return_value= True):
fitted_instance1 = hw.numa_fit_instance_to_host(self.host,
self.instance1,
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsInstance(fitted_instance1,
objects.InstanceNUMATopology)
def test_get_fitting_pci_fail(self):
pci_request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': '8086'}])
pci_reqs = [pci_request]
pci_stats = stats.PciDeviceStats()
with mock.patch.object(stats.PciDeviceStats,
'support_requests', return_value= False):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host,
self.instance1,
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsNone(fitted_instance1)
class NumberOfSerialPortsTest(test.NoDBTestCase):
def test_flavor(self):
flavor = objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={"hw:serial_port_count": 3})
image_meta = objects.ImageMeta.from_dict({})
num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
self.assertEqual(3, num_ports)
def test_image_meta(self):
flavor = objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={})
image_meta = objects.ImageMeta.from_dict(
{"properties": {"hw_serial_port_count": 2}})
num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
self.assertEqual(2, num_ports)
def test_flavor_invalid_value(self):
flavor = objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={"hw:serial_port_count": 'foo'})
image_meta = objects.ImageMeta.from_dict({})
self.assertRaises(exception.ImageSerialPortNumberInvalid,
hw.get_number_of_serial_ports,
flavor, image_meta)
def test_image_meta_smaller_than_flavor(self):
flavor = objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={"hw:serial_port_count": 3})
image_meta = objects.ImageMeta.from_dict(
{"properties": {"hw_serial_port_count": 2}})
num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
self.assertEqual(2, num_ports)
def test_flavor_smaller_than_image_meta(self):
flavor = objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={"hw:serial_port_count": 3})
image_meta = objects.ImageMeta.from_dict(
{"properties": {"hw_serial_port_count": 4}})
self.assertRaises(exception.ImageSerialPortNumberExceedFlavorValue,
hw.get_number_of_serial_ports,
flavor, image_meta)
class HelperMethodsTestCase(test.NoDBTestCase):
def setUp(self):
super(HelperMethodsTestCase, self).setUp()
self.hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([0, 1]), memory=512,
memory_usage=0, cpu_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([2, 3]), memory=512,
memory_usage=0, cpu_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
self.instancetopo = objects.InstanceNUMATopology(
instance_uuid='fake-uuid',
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=256, pagesize=2048,
cpu_pinning={0: 0, 1: 1}),
objects.InstanceNUMACell(
id=1, cpuset=set([2]), memory=256, pagesize=2048,
cpu_pinning={2: 3}),
])
self.context = context.RequestContext('fake-user',
'fake-project')
def _check_usage(self, host_usage):
self.assertEqual(2, host_usage.cells[0].cpu_usage)
self.assertEqual(256, host_usage.cells[0].memory_usage)
self.assertEqual(1, host_usage.cells[1].cpu_usage)
self.assertEqual(256, host_usage.cells[1].memory_usage)
def test_dicts_json(self):
host = {'numa_topology': self.hosttopo._to_json()}
instance = {'numa_topology': self.instancetopo._to_json()}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_dicts_instance_json(self):
host = {'numa_topology': self.hosttopo}
instance = {'numa_topology': self.instancetopo._to_json()}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, objects.NUMATopology)
self._check_usage(res)
def test_dicts_instance_json_old(self):
host = {'numa_topology': self.hosttopo}
instance = {'numa_topology':
jsonutils.dumps(self.instancetopo._to_dict())}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, objects.NUMATopology)
self._check_usage(res)
def test_dicts_host_json(self):
host = {'numa_topology': self.hosttopo._to_json()}
instance = {'numa_topology': self.instancetopo}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_dicts_host_json_old(self):
host = {'numa_topology': jsonutils.dumps(
self.hosttopo._to_dict())}
instance = {'numa_topology': self.instancetopo}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_object_host_instance_json(self):
host = objects.ComputeNode(numa_topology=self.hosttopo._to_json())
instance = {'numa_topology': self.instancetopo._to_json()}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_object_host_instance(self):
host = objects.ComputeNode(numa_topology=self.hosttopo._to_json())
instance = {'numa_topology': self.instancetopo}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_instance_with_fetch(self):
host = objects.ComputeNode(numa_topology=self.hosttopo._to_json())
fake_uuid = str(uuid.uuid4())
instance = {'uuid': fake_uuid}
with mock.patch.object(objects.InstanceNUMATopology,
'get_by_instance_uuid', return_value=None) as get_mock:
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self.assertTrue(get_mock.called)
def test_object_instance_with_load(self):
host = objects.ComputeNode(numa_topology=self.hosttopo._to_json())
fake_uuid = str(uuid.uuid4())
instance = objects.Instance(context=self.context, uuid=fake_uuid)
with mock.patch.object(objects.InstanceNUMATopology,
'get_by_instance_uuid', return_value=None) as get_mock:
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self.assertTrue(get_mock.called)
def test_instance_serialized_by_build_request_spec(self):
host = objects.ComputeNode(numa_topology=self.hosttopo._to_json())
fake_uuid = str(uuid.uuid4())
instance = objects.Instance(context=self.context, id=1, uuid=fake_uuid,
numa_topology=self.instancetopo)
# NOTE (ndipanov): This emulates scheduler.utils.build_request_spec
# We can remove this test once we no longer use that method.
instance_raw = jsonutils.to_primitive(
base_obj.obj_to_primitive(instance))
res = hw.get_host_numa_usage_from_instance(host, instance_raw)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_attr_host(self):
class Host(object):
def __init__(obj):
obj.numa_topology = self.hosttopo._to_json()
host = Host()
instance = {'numa_topology': self.instancetopo._to_json()}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_never_serialize_result(self):
host = {'numa_topology': self.hosttopo._to_json()}
instance = {'numa_topology': self.instancetopo}
res = hw.get_host_numa_usage_from_instance(host, instance,
never_serialize_result=True)
self.assertIsInstance(res, objects.NUMATopology)
self._check_usage(res)
def test_dict_numa_topology_to_obj(self):
fake_uuid = str(uuid.uuid4())
instance = objects.Instance(context=self.context, id=1, uuid=fake_uuid,
numa_topology=self.instancetopo)
instance_dict = base_obj.obj_to_primitive(instance)
instance_numa_topo = hw.instance_topology_from_instance(instance_dict)
for expected_cell, actual_cell in zip(self.instancetopo.cells,
instance_numa_topo.cells):
for k in expected_cell.fields:
self.assertEqual(getattr(expected_cell, k),
getattr(actual_cell, k))
class VirtMemoryPagesTestCase(test.NoDBTestCase):
def test_cell_instance_pagesize(self):
cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=2048)
self.assertEqual(0, cell.id)
self.assertEqual(set([0]), cell.cpuset)
self.assertEqual(1024, cell.memory)
self.assertEqual(2048, cell.pagesize)
def test_numa_pagesize_usage_from_cell(self):
instcell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=512, pagesize=2048)
hostcell = objects.NUMACell(
id=0, cpuset=set([0]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[objects.NUMAPagesTopology(
size_kb=2048,
total=512,
used=0)],
siblings=[], pinned_cpus=set([]))
topo = hw._numa_pagesize_usage_from_cell(hostcell, instcell, 1)
self.assertEqual(2048, topo[0].size_kb)
self.assertEqual(512, topo[0].total)
self.assertEqual(256, topo[0].used)
def _test_get_requested_mempages_pagesize(self, spec=None, props=None):
flavor = objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs=spec or {})
image_meta = objects.ImageMeta.from_dict({"properties": props or {}})
return hw._numa_get_pagesize_constraints(flavor, image_meta)
def test_get_requested_mempages_pagesize_from_flavor_swipe(self):
self.assertEqual(
hw.MEMPAGES_SMALL, self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "small"}))
self.assertEqual(
hw.MEMPAGES_LARGE, self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "large"}))
self.assertEqual(
hw.MEMPAGES_ANY, self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "any"}))
def test_get_requested_mempages_pagesize_from_flavor_specific(self):
self.assertEqual(
2048,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "2048"}))
def test_get_requested_mempages_pagesize_from_flavor_invalid(self):
self.assertRaises(
exception.MemoryPageSizeInvalid,
self._test_get_requested_mempages_pagesize,
{"hw:mem_page_size": "foo"})
self.assertRaises(
exception.MemoryPageSizeInvalid,
self._test_get_requested_mempages_pagesize,
{"hw:mem_page_size": "-42"})
def test_get_requested_mempages_pagesizes_from_flavor_suffix_sweep(self):
self.assertEqual(
2048,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "2048KB"}))
self.assertEqual(
2048,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "2MB"}))
self.assertEqual(
1048576,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "1GB"}))
def test_get_requested_mempages_pagesize_from_image_flavor_any(self):
self.assertEqual(
2048,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "any"},
props={"hw_mem_page_size": "2048"}))
def test_get_requested_mempages_pagesize_from_image_flavor_large(self):
self.assertEqual(
2048,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "large"},
props={"hw_mem_page_size": "2048"}))
def test_get_requested_mempages_pagesize_from_image_forbidden(self):
self.assertRaises(
exception.MemoryPageSizeForbidden,
self._test_get_requested_mempages_pagesize,
{"hw:mem_page_size": "small"},
{"hw_mem_page_size": "2048"})
def test_get_requested_mempages_pagesize_from_image_forbidden2(self):
self.assertRaises(
exception.MemoryPageSizeForbidden,
self._test_get_requested_mempages_pagesize,
{}, {"hw_mem_page_size": "2048"})
def test_cell_accepts_request_wipe(self):
host_cell = objects.NUMACell(
id=0, cpuset=set([0]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=262144, used=0),
],
siblings=[], pinned_cpus=set([]))
inst_cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_SMALL)
self.assertEqual(
4,
hw._numa_cell_supports_pagesize_request(host_cell, inst_cell))
inst_cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_ANY)
self.assertEqual(
4,
hw._numa_cell_supports_pagesize_request(host_cell, inst_cell))
inst_cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_LARGE)
self.assertIsNone(hw._numa_cell_supports_pagesize_request(
host_cell, inst_cell))
def test_cell_accepts_request_large_pass(self):
inst_cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_LARGE)
host_cell = objects.NUMACell(
id=0, cpuset=set([0]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=256, used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512, used=0)
],
siblings=[], pinned_cpus=set([]))
self.assertEqual(
2048,
hw._numa_cell_supports_pagesize_request(host_cell, inst_cell))
def test_cell_accepts_request_custom_pass(self):
inst_cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=2048)
host_cell = objects.NUMACell(
id=0, cpuset=set([0]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=256, used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512, used=0)
],
siblings=[], pinned_cpus=set([]))
self.assertEqual(
2048,
hw._numa_cell_supports_pagesize_request(host_cell, inst_cell))
def test_cell_accepts_request_remainder_memory(self):
# Test memory can't be divided with no rem by mempage's size_kb
inst_cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024 + 1, pagesize=2048)
host_cell = objects.NUMACell(
id=0, cpuset=set([0]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=256, used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512, used=0)
],
siblings=[], pinned_cpus=set([]))
self.assertIsNone(hw._numa_cell_supports_pagesize_request(
host_cell, inst_cell))
def test_cell_accepts_request_host_mempages(self):
# Test pagesize not in host's mempages
inst_cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=4096)
host_cell = objects.NUMACell(
id=0, cpuset=set([0]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=256, used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512, used=0)
],
siblings=[], pinned_cpus=set([]))
self.assertRaises(exception.MemoryPageSizeNotSupported,
hw._numa_cell_supports_pagesize_request,
host_cell, inst_cell)
class _CPUPinningTestCaseBase(object):
def assertEqualTopology(self, expected, got):
for attr in ('sockets', 'cores', 'threads'):
self.assertEqual(getattr(expected, attr), getattr(got, attr),
"Mismatch on %s" % attr)
def assertInstanceCellPinned(self, instance_cell, cell_ids=None):
default_cell_id = 0
self.assertIsNotNone(instance_cell)
if cell_ids is None:
self.assertEqual(default_cell_id, instance_cell.id)
else:
self.assertIn(instance_cell.id, cell_ids)
self.assertEqual(len(instance_cell.cpuset),
len(instance_cell.cpu_pinning))
def assertPinningPreferThreads(self, instance_cell, host_cell):
"""Make sure we are preferring threads.
We do this by assessing that at least 2 CPUs went to the same core
if that was even possible to begin with.
"""
max_free_siblings = max(map(len, host_cell.free_siblings))
if len(instance_cell) > 1 and max_free_siblings > 1:
cpu_to_sib = {}
for sib in host_cell.free_siblings:
for cpu in sib:
cpu_to_sib[cpu] = tuple(sorted(sib))
pins_per_sib = collections.defaultdict(int)
for inst_p, host_p in instance_cell.cpu_pinning.items():
pins_per_sib[cpu_to_sib[host_p]] += 1
self.assertTrue(max(pins_per_sib.values()) > 1,
"Seems threads were not preferred by the pinning "
"logic.")
class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
def test_get_pinning_inst_too_large_cpu(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]),
memory=2048, memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_inst_too_large_mem(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]),
memory=2048, memory_usage=1024,
siblings=[], mempages=[],
pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_inst_not_avail(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
pinned_cpus=set([0]),
siblings=[], mempages=[])
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_no_sibling_fits_empty(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]),
memory=2048, memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 3)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_no_sibling_fits_w_usage(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
pinned_cpus=set([1]), mempages=[],
siblings=[])
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=1024)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_pinning = {0: 0, 1: 2, 2: 3}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_instance_siblings_fits(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=4, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_instance_siblings_host_siblings_fits_empty(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_instance_siblings_host_siblings_fits_empty_2(self):
host_pin = objects.NUMACell(
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=4, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 8)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_instance_siblings_host_siblings_fits_w_usage(self):
host_pin = objects.NUMACell(
id=0,
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
pinned_cpus=set([1, 2, 5, 6]),
siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])],
mempages=[])
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {0: 0, 1: 3, 2: 4, 3: 7}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_host_siblings_fit_single_core(self):
host_pin = objects.NUMACell(
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_host_siblings_fit(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
got_pinning = {x: x for x in range(0, 4)}
self.assertEqual(got_pinning, inst_pin.cpu_pinning)
def test_get_pinning_require_policy_too_few_siblings(self):
host_pin = objects.NUMACell(
id=0,
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
pinned_cpus=set([0, 1, 2]),
siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])],
mempages=[])
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]),
memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.REQUIRE)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_require_policy_fits(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]),
memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.REQUIRE)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_require_policy_fits_w_usage(self):
host_pin = objects.NUMACell(
id=0,
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
pinned_cpus=set([0, 1]),
siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])],
mempages=[])
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]),
memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.REQUIRE)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_instance_odd_fit(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3]),
set([4, 5]), set([6, 7])],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3, 4]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=5, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_instance_fit_optimize_threads(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1, 2, 3]),
set([4, 5, 6, 7])],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3, 4, 5]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_instance_odd_fit_w_usage(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3]),
set([4, 5]), set([6, 7])],
mempages=[], pinned_cpus=set([0, 2, 5]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_instance_odd_fit_orphan_only(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3]),
set([4, 5]), set([6, 7])],
mempages=[], pinned_cpus=set([0, 2, 5, 6]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=4, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_large_instance_odd_fit(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14,
15]),
memory=4096, memory_usage=0,
siblings=[set([0, 8]), set([1, 9]),
set([2, 10]), set([3, 11]),
set([4, 12]), set([5, 13]),
set([6, 14]), set([7, 15])],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3, 4]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
self.assertPinningPreferThreads(inst_pin, host_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=5, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_isolate_policy_too_few_fully_free_cores(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([1]))
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1]),
memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_isolate_policy_no_fully_free_cores(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([1, 2]))
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1]),
memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_isolate_policy_fits(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1]),
memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_isolate_policy_fits_ht_host(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1]),
memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_isolate_policy_fits_w_usage(self):
host_pin = objects.NUMACell(
id=0,
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
pinned_cpus=set([0, 1]),
siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])],
mempages=[])
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1]),
memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=1)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
def test_host_numa_fit_instance_to_host_single_cell(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1]), memory=2048,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([2, 3]), memory=2048,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))]
)
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
def test_host_numa_fit_instance_to_host_single_cell_w_usage(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1]),
pinned_cpus=set([0]), memory=2048,
memory_usage=0, siblings=[],
mempages=[]),
objects.NUMACell(id=1, cpuset=set([2, 3]), memory=2048,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(1,))
def test_host_numa_fit_instance_to_host_single_cell_fail(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1]), memory=2048,
pinned_cpus=set([0]), memory_usage=0,
siblings=[], mempages=[]),
objects.NUMACell(id=1, cpuset=set([2, 3]), memory=2048,
pinned_cpus=set([2]), memory_usage=0,
siblings=[], mempages=[])])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fit(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]),
memory=2048, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([]))])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
objects.InstanceNUMACell(
cpuset=set([2, 3]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
def test_host_numa_fit_instance_to_host_barely_fit(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, pinned_cpus=set([0]),
siblings=[], mempages=[],
memory_usage=0),
objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]),
memory=2048, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([4, 5, 6])),
objects.NUMACell(id=2, cpuset=set([8, 9, 10, 11]),
memory=2048, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([10, 11]))])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
objects.InstanceNUMACell(
cpuset=set([2, 3]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 2))
def test_host_numa_fit_instance_to_host_fail_capacity(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([0])),
objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([4, 5, 6]))])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
objects.InstanceNUMACell(
cpuset=set([2, 3]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fail_topology(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([]))])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
objects.InstanceNUMACell(
cpuset=set([2, 3]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
objects.InstanceNUMACell(
cpuset=set([4, 5]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
self.assertIsNone(inst_topo)
def test_cpu_pinning_usage_from_instances(self):
host_pin = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, cpu_usage=0,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))])
inst_pin_1 = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), id=0, memory=2048,
cpu_pinning={0: 0, 1: 3},
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_pin_2 = objects.InstanceNUMATopology(
cells = [objects.InstanceNUMACell(
cpuset=set([0, 1]), id=0, memory=2048,
cpu_pinning={0: 1, 1: 2},
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
host_pin = hw.numa_usage_from_instances(
host_pin, [inst_pin_1, inst_pin_2])
self.assertEqual(set([0, 1, 2, 3]),
host_pin.cells[0].pinned_cpus)
def test_cpu_pinning_usage_from_instances_free(self):
host_pin = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, cpu_usage=0, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([0, 1, 3]))])
inst_pin_1 = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0]), memory=1024, cpu_pinning={0: 1}, id=0,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_pin_2 = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=1024, id=0,
cpu_pinning={0: 0, 1: 3},
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
host_pin = hw.numa_usage_from_instances(
host_pin, [inst_pin_1, inst_pin_2], free=True)
self.assertEqual(set(), host_pin.cells[0].pinned_cpus)
def test_host_usage_from_instances_fail(self):
host_pin = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, cpu_usage=0,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))])
inst_pin_1 = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048, id=0,
cpu_pinning={0: 0, 1: 3},
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
inst_pin_2 = objects.InstanceNUMATopology(
cells = [objects.InstanceNUMACell(
cpuset=set([0, 1]), id=0, memory=2048,
cpu_pinning={0: 0, 1: 2},
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
self.assertRaises(exception.CPUPinningInvalid,
hw.numa_usage_from_instances, host_pin,
[inst_pin_1, inst_pin_2])
def test_host_usage_from_instances_isolate(self):
host_pin = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, cpu_usage=0,
memory_usage=0,
siblings=[set([0, 2]), set([1, 3])],
mempages=[], pinned_cpus=set([]))])
inst_pin_1 = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048, id=0,
cpu_pinning={0: 0, 1: 1},
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
new_cell = hw.numa_usage_from_instances(host_pin, [inst_pin_1])
self.assertEqual(host_pin.cells[0].cpuset,
new_cell.cells[0].pinned_cpus)
self.assertEqual(new_cell.cells[0].cpu_usage, 4)
def test_host_usage_from_instances_isolate_free(self):
host_pin = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, cpu_usage=4,
memory_usage=0,
siblings=[set([0, 2]), set([1, 3])],
mempages=[],
pinned_cpus=set([0, 1, 2, 3]))])
inst_pin_1 = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048, id=0,
cpu_pinning={0: 0, 1: 1},
cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
new_cell = hw.numa_usage_from_instances(host_pin,
[inst_pin_1],
free=True)
self.assertEqual(set([]), new_cell.cells[0].pinned_cpus)
self.assertEqual(new_cell.cells[0].cpu_usage, 0)
class CPURealtimeTestCase(test.NoDBTestCase):
def test_success_flavor(self):
flavor = {"extra_specs": {"hw:cpu_realtime_mask": "^1"}}
image = objects.ImageMeta.from_dict({})
rt, em = hw.vcpus_realtime_topology(set([0, 1, 2]), flavor, image)
self.assertEqual(set([0, 2]), rt)
self.assertEqual(set([1]), em)
def test_success_image(self):
flavor = {"extra_specs": {}}
image = objects.ImageMeta.from_dict(
{"properties": {"hw_cpu_realtime_mask": "^0-1"}})
rt, em = hw.vcpus_realtime_topology(set([0, 1, 2]), flavor, image)
self.assertEqual(set([2]), rt)
self.assertEqual(set([0, 1]), em)
def test_no_mask_configured(self):
flavor = {"extra_specs": {}}
image = objects.ImageMeta.from_dict({"properties": {}})
self.assertRaises(
exception.RealtimeMaskNotFoundOrInvalid,
hw.vcpus_realtime_topology, set([0, 1, 2]), flavor, image)
def test_mask_badly_configured(self):
flavor = {"extra_specs": {"hw:cpu_realtime_mask": "^0-2"}}
image = objects.ImageMeta.from_dict({"properties": {}})
self.assertRaises(
exception.RealtimeMaskNotFoundOrInvalid,
hw.vcpus_realtime_topology, set([0, 1, 2]), flavor, image)
| [
"wangfeng@nfs.iscas.ac.cn"
] | wangfeng@nfs.iscas.ac.cn |
855afa2e49764022f6489b06d28787448896f33d | 84a96dbd96e926ebb5c658e3cb897db276c32d6c | /tensorflow/python/keras/optimizer_v2/adamax.py | 9166f637c1e9a7f1cefb35436a6db667ff59ab84 | [
"Apache-2.0"
] | permissive | MothCreations/gavlanWheels | bc9189092847369ad291d1c7d3f4144dd2239359 | 01d8a43b45a26afec27b971f686f79c108fe08f9 | refs/heads/master | 2022-12-06T09:27:49.458800 | 2020-10-13T21:56:40 | 2020-10-13T21:56:40 | 249,206,716 | 6 | 5 | Apache-2.0 | 2022-11-21T22:39:47 | 2020-03-22T14:57:45 | C++ | UTF-8 | Python | false | false | 8,043 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adamax for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend_config
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.optimizers.Adamax')
class Adamax(optimizer_v2.OptimizerV2):
"""Optimizer that implements the Adamax algorithm.
It is a variant of Adam based on the infinity norm.
Default parameters follow those provided in the paper.
Adamax is sometimes superior to adam, specially in models with embeddings.
Initialization:
```
m_0 <- 0 (Initialize initial 1st moment vector)
v_0 <- 0 (Initialize the exponentially weighted infinity norm)
t <- 0 (Initialize timestep)
```
The update rule for `variable` with gradient `g` uses an optimization
described at the end of section 7.1 of the paper:
```
t <- t + 1
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
v_t <- max(beta2 * v_{t-1}, abs(g))
variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
```
Similar to AdamOptimizer, the epsilon is added for numerical stability
(especially to get rid of division by zero when v_t = 0).
Contrast to AdamOptimizer, the sparse implementation of this algorithm
(used when the gradient is an IndexedSlices object, typically because of
`tf.gather` or an embedding lookup in the forward pass) only updates
variable slices and corresponding `m_t`, `v_t` terms when that part of
the variable was used in the forward pass. This means that the sparse
behavior is contrast to the dense behavior (similar to some momentum
implementations which ignore momentum unless a variable slice was actually
used).
References
see Section 7 of [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
([pdf](http://arxiv.org/pdf/1412.6980.pdf)).
"""
_HAS_ALL_REDUCE_SUM_GRAD = True
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
name='Adamax',
**kwargs):
"""Construct a new Adamax optimizer.
Args:
learning_rate: A `Tensor`, floating point value, or a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`. The learning rate.
beta_1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor. The exponential decay
rate for the exponentially weighted infinity norm.
epsilon: A small constant for numerical stability.
name: Optional name for the operations created when applying gradients.
Defaults to "Adamax".
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
"""
super(Adamax, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self.epsilon = epsilon or backend_config.epsilon()
def _create_slots(self, var_list):
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
self.add_slot(var, 'm') # Create slots for the first moments.
for var in var_list:
self.add_slot(var, 'v') # Create slots for the second moments.
def _prepare_local(self, var_device, var_dtype, apply_state):
super(Adamax, self)._prepare_local(var_device, var_dtype, apply_state)
local_step = math_ops.cast(self.iterations + 1, var_dtype)
beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype))
beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype))
beta_1_power = math_ops.pow(beta_1_t, local_step)
lr_t = apply_state[(var_device, var_dtype)]['lr_t']
apply_state[(var_device, var_dtype)].update(
dict(
neg_scaled_lr=-lr_t / (1 - beta_1_power),
epsilon=ops.convert_to_tensor_v2(self.epsilon, var_dtype),
beta_1_t=beta_1_t,
beta_1_power=beta_1_power,
one_minus_beta_1_t=1 - beta_1_t,
beta_2_t=beta_2_t,
zero=array_ops.zeros((), dtype=dtypes.int64)))
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
return training_ops.resource_apply_ada_max(
var.handle,
m.handle,
v.handle,
coefficients['beta_1_power'],
coefficients['lr_t'],
coefficients['beta_1_t'],
coefficients['beta_2_t'],
coefficients['epsilon'],
grad,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, 'm')
m_slice = array_ops.gather(m, indices, axis=coefficients['zero'])
m_t_slice = (m_slice * coefficients['beta_1_t'] +
grad * coefficients['one_minus_beta_1_t'])
with ops.control_dependencies([m_t_slice]):
m_t = self._resource_scatter_update(m, indices, m_t_slice)
# u_t = max(beta2 * u, abs(g_t))
v = self.get_slot(var, 'v')
v_slice = array_ops.gather(v, indices, axis=coefficients['zero'])
v_t_slice = math_ops.maximum(v_slice * coefficients['beta_2_t'],
math_ops.abs(grad))
with ops.control_dependencies([v_t_slice]):
v_t = self._resource_scatter_update(v, indices, v_t_slice)
# theta_t = theta - lr / (1 - beta1^t) * m_t / u_t
var_slice = coefficients['neg_scaled_lr'] * (
m_t_slice / (v_t_slice + coefficients['epsilon']))
with ops.control_dependencies([var_slice]):
var_update = self._resource_scatter_add(var, indices, var_slice)
return control_flow_ops.group(*[var_update, m_t, v_t])
def get_config(self):
config = super(Adamax, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._serialize_hyperparameter('decay'),
'beta_1': self._serialize_hyperparameter('beta_1'),
'beta_2': self._serialize_hyperparameter('beta_2'),
'epsilon': self.epsilon,
})
return config
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
5e873a127a5b816d0d8b4e502262fb066ca2608d | 2d82d4c6574bd6d32f2cf1c781615f7951f55f66 | /muntjac/demo/sampler/features/trees/TreeSingleSelect.py | d07a2e355077b83ac72f563a77bb096ebf2612c1 | [
"Apache-2.0"
] | permissive | metaperl/muntjac | f83f745ee03942a61af92ee7fba7285aa9c46f3c | 8db97712edd81b4d25deaaa48587d2a08010f2c8 | refs/heads/master | 2021-01-15T22:04:25.057862 | 2012-11-09T03:52:59 | 2012-11-09T03:52:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py |
from muntjac.ui.tree import Tree
from muntjac.demo.sampler.features.trees.TreeMultiSelect import TreeMultiSelect
from muntjac.demo.sampler.features.trees.TreeActions import TreeActions
from muntjac.demo.sampler.APIResource import APIResource
from muntjac.demo.sampler.Feature import Feature, Version
class TreeSingleSelect(Feature):
def getSinceVersion(self):
return Version.OLD
def getName(self):
return 'Tree, single selection'
def getDescription(self):
return ('In this example, you can select any single tree node and '
'modify its \'name\' property. Click again to de-select.')
def getRelatedAPI(self):
return [APIResource(Tree)]
def getRelatedFeatures(self):
return [TreeMultiSelect, TreeActions]
def getRelatedResources(self):
# TODO Auto-generated method stub
return None
| [
"r.w.lincoln@gmail.com"
] | r.w.lincoln@gmail.com |
3fb6faabd50d6e4fec8f682bbab921a976447f7b | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/Autodesk/Revit/UI/__init___parts/TaskDialogResult.py | 6464317c49a8ed24e79672214c635e272cf50cb8 | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | class TaskDialogResult(Enum,IComparable,IFormattable,IConvertible):
"""
Enum to specify the task dialog result.
enum TaskDialogResult,values: Cancel (2),Close (8),CommandLink1 (1001),CommandLink2 (1002),CommandLink3 (1003),CommandLink4 (1004),No (7),None (0),Ok (1),Retry (4),Yes (6)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Cancel=None
Close=None
CommandLink1=None
CommandLink2=None
CommandLink3=None
CommandLink4=None
No=None
None=None
Ok=None
Retry=None
value__=None
Yes=None
| [
"magnetscoil@gmail.com"
] | magnetscoil@gmail.com |
a6a2cdc64be78791ddd99b63741b386489d36ecf | 5746d26f891270c1bb407a244d9a942534298d96 | /fastreid/data/build.py | e7005a90fcb391336d9acc7f3280546059c9cbf6 | [
"Apache-2.0"
] | permissive | winterxx/fast-reid | 1463253c43876249dd55a3adb0a3e71fa8037aa3 | 727a7468311949efbbc7be360c2c1afaf440bb22 | refs/heads/master | 2022-10-20T17:50:39.245472 | 2020-06-16T03:46:22 | 2020-06-16T03:46:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,131 | py | # encoding: utf-8
"""
@author: l1aoxingyu
@contact: sherlockliao01@gmail.com
"""
import torch
from torch._six import container_abcs, string_classes, int_classes
from torch.utils.data import DataLoader
from . import samplers
from .common import CommDataset
from .datasets import DATASET_REGISTRY
from .transforms import build_transforms
def build_reid_train_loader(cfg):
train_transforms = build_transforms(cfg, is_train=True)
train_items = list()
for d in cfg.DATASETS.NAMES:
dataset = DATASET_REGISTRY.get(d)(combineall=cfg.DATASETS.COMBINEALL)
dataset.show_train()
train_items.extend(dataset.train)
train_set = CommDataset(train_items, train_transforms, relabel=True)
num_workers = cfg.DATALOADER.NUM_WORKERS
batch_size = cfg.SOLVER.IMS_PER_BATCH
num_instance = cfg.DATALOADER.NUM_INSTANCE
if cfg.DATALOADER.PK_SAMPLER:
if cfg.DATALOADER.NAIVE_WAY:
data_sampler = samplers.NaiveIdentitySampler(train_set.img_items, batch_size, num_instance)
else:
data_sampler = samplers.BalancedIdentitySampler(train_set.img_items, batch_size, num_instance)
else:
data_sampler = samplers.TrainingSampler(len(train_set))
batch_sampler = torch.utils.data.sampler.BatchSampler(data_sampler, batch_size, True)
train_loader = torch.utils.data.DataLoader(
train_set,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=fast_batch_collator,
)
return train_loader
def build_reid_test_loader(cfg, dataset_name):
test_transforms = build_transforms(cfg, is_train=False)
dataset = DATASET_REGISTRY.get(dataset_name)()
dataset.show_test()
test_items = dataset.query + dataset.gallery
test_set = CommDataset(test_items, test_transforms, relabel=False)
num_workers = cfg.DATALOADER.NUM_WORKERS
batch_size = cfg.TEST.IMS_PER_BATCH
data_sampler = samplers.InferenceSampler(len(test_set))
batch_sampler = torch.utils.data.BatchSampler(data_sampler, batch_size, False)
test_loader = DataLoader(
test_set,
batch_sampler=batch_sampler,
num_workers=num_workers,
collate_fn=fast_batch_collator)
return test_loader, len(dataset.query)
def trivial_batch_collator(batch):
"""
A batch collator that does nothing.
"""
return batch
def fast_batch_collator(batched_inputs):
"""
A simple batch collator for most common reid tasks
"""
elem = batched_inputs[0]
if isinstance(elem, torch.Tensor):
out = torch.zeros((len(batched_inputs), *elem.size()), dtype=elem.dtype)
for i, tensor in enumerate(batched_inputs):
out[i] += tensor
return out
elif isinstance(elem, container_abcs.Mapping):
return {key: fast_batch_collator([d[key] for d in batched_inputs]) for key in elem}
elif isinstance(elem, float):
return torch.tensor(batched_inputs, dtype=torch.float64)
elif isinstance(elem, int_classes):
return torch.tensor(batched_inputs)
elif isinstance(elem, string_classes):
return batched_inputs
| [
"sherlockliao01@gmail.com"
] | sherlockliao01@gmail.com |
5350c7e34a18d8cebb9c9bcc45be9ec798fde418 | 684a7d56589f7b96002646dfc26ba2de52eb7d80 | /source/callback/eval_mscoco.py | ecc1b5db5b7e1be53cffa13367cc079c536f8f70 | [
"Apache-2.0"
] | permissive | adewin/lambda-deep-learning-demo | 7a42b935ca1ab1e92a0170bf28c7e526cffa5cb6 | ebbbd63c0abf87a1a4155b17cef145039b7a1ef7 | refs/heads/master | 2020-07-08T13:15:51.476791 | 2019-04-26T21:25:44 | 2019-04-26T21:25:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,269 | py | """
Copyright 2018 Lambda Labs. All Rights Reserved.
Licensed under
==========================================================================
"""
import os
import numpy as np
from scipy import misc
import tensorflow as tf
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from .callback import Callback
DATASET_DIR = "/mnt/data/data/mscoco"
# DATASET_META = "val2017"
DATASET_META = "val2014"
COCO_ID_MAP = np.asarray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76,
77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88,
89, 90])
class EvalMSCOCO(Callback):
def __init__(self, config):
super(EvalMSCOCO, self).__init__(config)
self.detection = []
self.image_ids = []
def before_run(self, sess):
self.graph = tf.get_default_graph()
def after_run(self, sess):
print("Detection Finished ...")
# for item in self.detection:
# print(item)
if len(self.detection) > 0:
annotation_file = os.path.join(
DATASET_DIR,
"annotations",
"instances_" + DATASET_META + ".json")
coco = COCO(annotation_file)
coco_results = coco.loadRes(self.detection)
# DETECTION_FILE = "/home/ubuntu/data/mscoco/results/SSD_512x512_score/detections_minival_ssd512_results.json"
# coco_results = coco.loadRes(DETECTION_FILE)
cocoEval = COCOeval(coco, coco_results, "bbox")
cocoEval.params.imgIds = self.image_ids
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
else:
print("Found no valid detection. Consider re-train your model.")
def after_step(self, sess, outputs_dict, feed_dict=None):
num_images = len(outputs_dict["image_id"])
# print(num_images)
# print('----------------------')
for i in range(num_images):
file_name = outputs_dict["file_name"][i][0]
# print(file_name)
num_detections = len(outputs_dict["labels"][i])
translation = outputs_dict["translations"][i]
scale = outputs_dict["scales"][i]
input_image = misc.imread(file_name)
h, w = input_image.shape[:2]
# COCO evaluation is based on per detection
for d in range(num_detections):
box = outputs_dict["bboxes"][i][d]
box = box * [float(w), float(h), float(w), float(h)]
box[0] = np.clip(box[0], 0, w)
box[1] = np.clip(box[1], 0, h)
box[2] = np.clip(box[2], 0, w)
box[3] = np.clip(box[3], 0, h)
box[2] = box[2] - box[0]
box[3] = box[3] - box[1]
result = {
"image_id": outputs_dict["image_id"][i][0],
"category_id": COCO_ID_MAP[outputs_dict["labels"][i][d]],
"bbox": box,
"score": outputs_dict["scores"][i][d]
}
self.detection.append(result)
self.image_ids.append(outputs_dict["image_id"][i][0])
def build(config):
return EvalMSCOCO(config)
| [
"cl.chuanli@gmail.com"
] | cl.chuanli@gmail.com |
d7a7d2dea431fd8d3dc35b48022975e66ec20183 | f281d0d6431c1b45c6e5ebfff5856c374af4b130 | /DAY001~099/DAY46-BOJ2533-사회망 서비스(SNS)/younghoon.py | 38fcc0c3116cb0e7f54d7a6dd1b9073362b4c486 | [] | no_license | tachyon83/code-rhino | ec802dc91dce20980fac401b26165a487494adb4 | b1af000f5798cd12ecdab36aeb9c7a36f91c1101 | refs/heads/master | 2022-08-13T09:10:16.369287 | 2022-07-30T11:27:34 | 2022-07-30T11:27:34 | 292,142,812 | 5 | 6 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | '''
이해못했습니당 ㅠ
dp공부를 더하고 봐야겠어요
'''
import sys
sys.setrecursionlimit(10**9)
N=int(sys.stdin.readline())
Tree=[[] for _ in range(N+1)]
check=[0 for _ in range(N+1)]
for _ in range(N-1):
u,v=map(int,sys.stdin.readline().split())
Tree[u].append(v)
Tree[v].append(u)
DP=[[0,0] for _ in range(N+1)]
check=[True for _ in range(N+1)]
def DFS(cur):
check[cur]=False
DP[cur][0]=1
DP[cur][1]=0
for i in Tree[cur]:
if check[i]:
DFS(i)
DP[cur][0]+=DP[i][1]
DP[cur][1]+=max(DP[i][0],DP[i][1])
DFS(1)
print(N-max(DP[1][0],DP[1][1]))
| [
"noreply@github.com"
] | tachyon83.noreply@github.com |
9a74952d6bcbbe9d6c9c34e92a33ccbe56808a6b | 70e9a7da3d4e2a41b30544516e166dab2495253c | /payment_trustcode/controllers/main.py | 050edccfc195b6c8248b0c16dcd7b39d3caf7dfa | [
"MIT"
] | permissive | Trust-Code/odoo-brasil | bf06ea58a4e0376cb5c297c18bf48eaf97104e54 | d456a10e32f56e259061afbd989942ea1aae2c2d | refs/heads/16.0 | 2023-08-31T16:06:21.038792 | 2023-01-26T19:31:31 | 2023-01-26T19:31:31 | 72,882,959 | 206 | 253 | MIT | 2023-08-18T17:05:49 | 2016-11-04T20:28:03 | Python | UTF-8 | Python | false | false | 757 | py | import logging
from odoo import http
from odoo.http import request
from werkzeug.utils import redirect
_logger = logging.getLogger(__name__)
class IuguController(http.Controller):
_notify_url = '/iugu/notificacao/'
@http.route(
'/iugu/notificacao/', type='http', auth="none",
methods=['GET', 'POST'], csrf=False)
def iugu_form_feedback(self, **post):
request.env['payment.transaction'].sudo().form_feedback(post, 'iugu')
return "<status>OK</status>"
@http.route(
'/iugu/checkout/redirect', type='http',
auth='none', methods=['GET', 'POST'])
def iugu_checkout_redirect(self, **post):
post = post
if 'secure_url' in post:
return redirect(post['secure_url'])
| [
"danimaribeiro@gmail.com"
] | danimaribeiro@gmail.com |
18480acf2489cd737fa1a54137dc34a18873c149 | fc43470de13ff8f03105efc2a3660a1ed6a1a553 | /LeetCode/207_CourseSchedule.py | 168da2eb51e923ae31c007d923cd1b462ab06a0c | [] | no_license | youseop/Problem_solutions | 5a05597f188b4ef8f7d8483b46bf05fbf2158d01 | 1fba638d9520bca4354bca01f194f80b159e26aa | refs/heads/master | 2023-06-24T05:12:45.060086 | 2021-07-24T14:22:33 | 2021-07-24T14:22:33 | 298,317,735 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | class Solution:
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
from collections import deque as dq
course = list(0 for _ in range(numCourses))
need = dict()
for i in range(numCourses):
need[i] = []
for a, b in prerequisites:
course[a] += 1
need[b].append(a)
cnt = 0
point = dq([])
for i in range(numCourses):
if course[i] == 0:
point.append(i)
cnt += 1
while point:
a = point.popleft()
for i in need[a]:
course[i] -= 1
if course[i] == 0:
point.append(i)
cnt += 1
if cnt == numCourses:
return True
else:
return False
| [
"66366941+youseop@users.noreply.github.com"
] | 66366941+youseop@users.noreply.github.com |
aa316048e3e95c342aba666bf410bbb7cf9b543b | a667b52cb8d2ec857c55d33f04fc0e81d36dc681 | /options/data/real/Reco16_Run179101_DV.py | 4672a9ee45d24e2d7f95dfea3e8b6b5847ee3163 | [] | no_license | wenyanyin/CP_violation_simulation | 639d73333a3795654275cb43cc7dad7c742d1be1 | 7b93b2fe1050fb30d0b809b758cd5a3b2824b875 | refs/heads/master | 2022-04-29T14:19:23.744004 | 2022-04-01T13:05:18 | 2022-04-01T13:05:18 | 168,570,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | from Configurables import DaVinci
from Gaudi.Configuration import importOptions
DaVinci().DataType = '2016'
DaVinci().InputType = 'RDST'
DaVinci().DDDBtag = 'dddb-20150724'
DaVinci().CondDBtag = 'cond-20170325'
importOptions('$APPCONFIGOPTS/DaVinci/DV-RawEventJuggler-0_3-to-4_2.py')
| [
"Michael.Alexander@glasgow.ac.uk"
] | Michael.Alexander@glasgow.ac.uk |
8c5587a7e9df6274c708d09a4a745a6b314aece2 | 02c14fbb23c4c0aef37557e5ba80c688baff4561 | /cloudml-template/examples/census-classification/trainer/task.py | 877b58e2b4c21ede11847e1ae17307ddfe46be1a | [
"Apache-2.0"
] | permissive | cfezequiel/cloudml-samples | c7555c1b6119de41011014d83c2f752428e20f7f | 332662361dc522caad7382499d5a48d3a3a40c6c | refs/heads/master | 2020-05-02T16:07:59.513696 | 2019-04-13T08:01:56 | 2019-04-13T08:01:56 | 178,061,163 | 3 | 0 | Apache-2.0 | 2019-04-13T08:01:57 | 2019-03-27T19:27:26 | Python | UTF-8 | Python | false | false | 11,839 | py | #!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
from datetime import datetime
import tensorflow as tf
import metadata
import input
import model
# ******************************************************************************
# YOU MAY MODIFY THIS FUNCTION TO ADD/REMOVE PARAMS OR CHANGE THE DEFAULT VALUES
# ******************************************************************************
def initialise_hyper_params(args_parser):
"""
Define the arguments with the default values,
parses the arguments passed to the task,
and set the HYPER_PARAMS global variable
Args:
args_parser
"""
# Data files arguments
args_parser.add_argument(
'--train-files',
help='GCS or local paths to training data',
nargs='+',
required=True
)
args_parser.add_argument(
'--eval-files',
help='GCS or local paths to evaluation data',
nargs='+',
required=True
)
args_parser.add_argument(
'--feature-stats-file',
help='GCS or local paths to feature statistics json file',
nargs='+',
default=None
)
###########################################
# Experiment arguments - training
args_parser.add_argument(
'--train-steps',
help="""
Steps to run the training job for. If --num-epochs and --train-size are not specified,
this must be. Otherwise the training job will run indefinitely.
if --num-epochs and --train-size are specified, then --train-steps will be:
(train-size/train-batch-size) * num-epochs\
""",
default=1000,
type=int
)
args_parser.add_argument(
'--train-batch-size',
help='Batch size for each training step',
type=int,
default=200
)
args_parser.add_argument(
'--train-size',
help='Size of training set (instance count)',
type=int,
default=32561 # 63122
)
args_parser.add_argument(
'--num-epochs',
help="""\
Maximum number of training data epochs on which to train.
If both --train-size and --num-epochs are specified,
--train-steps will be: (train-size/train-batch-size) * num-epochs.\
""",
default=100,
type=int,
)
###########################################
# Experiment arguments - evaluation
args_parser.add_argument(
'--eval-every-secs',
help='How long to wait before running the next evaluation',
default=120,
type=int
)
args_parser.add_argument(
'--eval-steps',
help="""\
Number of steps to run evaluation for at each checkpoint',
Set to None to evaluate on the whole evaluation data
""",
default=None,
type=int
)
args_parser.add_argument(
'--eval-batch-size',
help='Batch size for evaluation steps',
type=int,
default=200
)
###########################################
# features processing arguments
args_parser.add_argument(
'--num-buckets',
help='Number of buckets into which to discretize numeric columns',
default=10,
type=int
)
args_parser.add_argument(
'--embedding-size',
help='Number of embedding dimensions for categorical columns. value of 0 means no embedding',
default=4,
type=int
)
###########################################
# Estimator arguments
args_parser.add_argument(
'--learning-rate',
help="Learning rate value for the optimizers",
default=0.1,
type=float
)
args_parser.add_argument(
'--hidden-units',
help="""\
Hidden layer sizes to use for DNN feature columns, provided in comma-separated layers.
If --scale-factor > 0, then only the size of the first layer will be used to compute
the sizes of subsequent layers \
""",
default='30,30,30'
)
args_parser.add_argument(
'--layer-sizes-scale-factor',
help="""\
Determine how the size of the layers in the DNN decays.
If value = 0 then the provided --hidden-units will be taken as is\
""",
default=0.7,
type=float
)
args_parser.add_argument(
'--num-layers',
help='Number of layers in the DNN. If --scale-factor > 0, then this parameter is ignored',
default=4,
type=int
)
args_parser.add_argument(
'--dropout-prob',
help="The probability we will drop out a given coordinate",
default=None
)
args_parser.add_argument(
'--encode-one-hot',
help="""\
If set to True, the categorical columns will be encoded as One-Hot indicators in the deep part of the DNN model.
Otherwise, the categorical columns will only be used in the wide part of the DNN model
""",
action='store_true',
default=True,
)
args_parser.add_argument(
'--as-wide-columns',
help="""\
If set to True, the categorical columns will be used in the wide part of the DNN model
""",
action='store_true',
default=True,
)
###########################################
# Saved model arguments
args_parser.add_argument(
'--job-dir',
help='GCS location to write checkpoints and export models',
required=True
)
args_parser.add_argument(
'--reuse-job-dir',
action='store_true',
default=False,
help="""\
Flag to decide if the model checkpoint should
be re-used from the job-dir. If False then the
job-dir will be deleted"""
)
args_parser.add_argument(
'--export-format',
help='The input format of the exported SavedModel binary',
choices=['JSON', 'CSV', 'EXAMPLE'],
default='JSON'
)
###########################################
# Argument to turn on all logging
args_parser.add_argument(
'--verbosity',
choices=[
'DEBUG',
'ERROR',
'FATAL',
'INFO',
'WARN'
],
default='INFO',
)
return args_parser.parse_args()
# ******************************************************************************
# YOU NEED NOT TO CHANGE THE FUNCTION TO RUN THE EXPERIMENT
# ******************************************************************************
def run_experiment(run_config):
"""Train, evaluate, and export the model using tf.estimator.train_and_evaluate API"""
train_input_fn = input.generate_input_fn(
file_names_pattern=HYPER_PARAMS.train_files,
mode=tf.estimator.ModeKeys.TRAIN,
num_epochs=HYPER_PARAMS.num_epochs,
batch_size=HYPER_PARAMS.train_batch_size
)
eval_input_fn = input.generate_input_fn(
file_names_pattern=HYPER_PARAMS.eval_files,
mode=tf.estimator.ModeKeys.EVAL,
batch_size=HYPER_PARAMS.eval_batch_size
)
exporter = tf.estimator.FinalExporter(
'estimator',
input.SERVING_FUNCTIONS[HYPER_PARAMS.export_format],
as_text=False # change to true if you want to export the model as readable text
)
# compute the number of training steps based on num_epoch, train_size, and train_batch_size
if HYPER_PARAMS.train_size is not None and HYPER_PARAMS.num_epochs is not None:
train_steps = (HYPER_PARAMS.train_size / HYPER_PARAMS.train_batch_size) * \
HYPER_PARAMS.num_epochs
else:
train_steps = HYPER_PARAMS.train_steps
train_spec = tf.estimator.TrainSpec(
train_input_fn,
max_steps=int(train_steps)
)
eval_spec = tf.estimator.EvalSpec(
eval_input_fn,
steps=HYPER_PARAMS.eval_steps,
exporters=[exporter],
throttle_secs=HYPER_PARAMS.eval_every_secs,
)
print("* experiment configurations")
print("===========================")
print("Train size: {}".format(HYPER_PARAMS.train_size))
print("Epoch count: {}".format(HYPER_PARAMS.num_epochs))
print("Train batch size: {}".format(HYPER_PARAMS.train_batch_size))
print("Training steps: {} ({})".format(int(train_steps),
"supplied" if HYPER_PARAMS.train_size is None else "computed"))
print("Evaluate every {} seconds".format(HYPER_PARAMS.eval_every_secs))
print("===========================")
if metadata.TASK_TYPE == "classification":
estimator = model.create_classifier(
config=run_config
)
elif metadata.TASK_TYPE == "regression":
estimator = model.create_regressor(
config=run_config
)
else:
estimator = model.create_estimator(
config=run_config
)
# train and evaluate
tf.estimator.train_and_evaluate(
estimator,
train_spec,
eval_spec
)
# ******************************************************************************
# THIS IS ENTRY POINT FOR THE TRAINER TASK
# ******************************************************************************
def main():
print('')
print('Hyper-parameters:')
print(HYPER_PARAMS)
print('')
# Set python level verbosity
tf.logging.set_verbosity(HYPER_PARAMS.verbosity)
# Set C++ Graph Execution level verbosity
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(tf.logging.__dict__[HYPER_PARAMS.verbosity] / 10)
# Directory to store output model and checkpoints
model_dir = HYPER_PARAMS.job_dir
# If job_dir_reuse is False then remove the job_dir if it exists
print("Resume training:", HYPER_PARAMS.reuse_job_dir)
if not HYPER_PARAMS.reuse_job_dir:
if tf.gfile.Exists(model_dir):
tf.gfile.DeleteRecursively(model_dir)
print("Deleted job_dir {} to avoid re-use".format(model_dir))
else:
print("No job_dir available to delete")
else:
print("Reusing job_dir {} if it exists".format(model_dir))
run_config = tf.estimator.RunConfig(
tf_random_seed=19830610,
log_step_count_steps=1000,
save_checkpoints_secs=HYPER_PARAMS.eval_every_secs, # change frequency of saving checkpoints
keep_checkpoint_max=3,
model_dir=model_dir
)
run_config = run_config.replace(model_dir=model_dir)
print("Model Directory:", run_config.model_dir)
# Run the train and evaluate experiment
time_start = datetime.utcnow()
print("")
print("Experiment started at {}".format(time_start.strftime("%H:%M:%S")))
print(".......................................")
run_experiment(run_config)
time_end = datetime.utcnow()
print(".......................................")
print("Experiment finished at {}".format(time_end.strftime("%H:%M:%S")))
print("")
time_elapsed = time_end - time_start
print("Experiment elapsed time: {} seconds".format(time_elapsed.total_seconds()))
print("")
args_parser = argparse.ArgumentParser()
HYPER_PARAMS = initialise_hyper_params(args_parser)
if __name__ == '__main__':
main()
| [
"khalid.m.salama@gmail.com"
] | khalid.m.salama@gmail.com |
65b74da4eede267474cc9a56a3e2901994358f6a | 06c367fe2d2233c6efb64f323e15bebd7f48c625 | /saleor/product/urls.py | c3159a1923f41052ea09b230dba75e681991402d | [
"BSD-3-Clause"
] | permissive | AkioSky/FishMart | ce630bc4addf63bc105e4f3e13e92c15b119b558 | 1d01d7e79812dc7cccb1b26ffc6457af6104d9f2 | refs/heads/master | 2022-12-11T16:13:38.277080 | 2019-04-22T03:44:22 | 2019-04-22T03:44:22 | 182,615,627 | 0 | 0 | BSD-3-Clause | 2022-12-08T01:44:37 | 2019-04-22T03:20:03 | Python | UTF-8 | Python | false | false | 585 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.product_list, name='list'),
url(r'^(?P<slug>[a-z0-9-_]+?)-(?P<product_id>[0-9]+)/$',
views.product_details, name='details'),
url(r'^category/(?P<slug>[a-z0-9-_]+?)-(?P<category_id>[0-9]+)/$',
views.category_index, name='category'),
url(r'(?P<slug>[a-z0-9-_]+?)-(?P<product_id>[0-9]+)/add/$',
views.product_add_to_cart, name='add-to-cart'),
url(r'^collection/(?P<slug>[a-z0-9-_/]+?)-(?P<pk>[0-9]+)/$',
views.collection_index, name='collection')]
| [
"whitebirdinbluesky1990@gmail.com"
] | whitebirdinbluesky1990@gmail.com |
5221a3cd94c2ccee498f920816799911eaf1cb15 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/277/103384/submittedfiles/principal.py | e2edee6fa3b2efd552caa85372bac86a66d41972 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | # 1. Ler uma matriz m,n de inteiros
while(True):
m = int(input('Digite a quantidade de linhas: '))
if m >= 1 and m <= 100:
break
else:
print('Numero invalido. Digite entre 1 e 100 (inclusive)')
while(True):
n = int(input('Digite a quantidade de colunas: '))
if n >= 1 and n <= 100:
break
else:
print('Numero invalido. Digite entre 1 e 100 (inclusive)')
# 2. Receber um intinerario a percorrer
# - uma lista de inteiros
# 3. Calcular o custo do trajeto informado | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
3aacfc56559b1cc3148120eb9b169bee48276dd4 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1536+690/sdB_pg_1536+690_lc.py | 110380af2de8d87ee8d50b1b95d32ee08d79b9b4 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[234.203458,68.869067], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_pg_1536+690/sdB_pg_1536+690_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
ea3a6a6002d4c5c1dd9374b6e70e4f7feb25bf0a | 41f39d013ae3cb2b3ca4230c77b9037cc9c894f6 | /gym/gym/envs/tests/spec_list.py | b9596408afb70ecdad341bf20530ecc72bf0ef7c | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | sokol1412/rllab_hierarchical_rl | 162aec9bb06e271d12333fa072fb44d692c26301 | 6d46c02e32c3d7e9ac55d753d6a3823ff86c5a57 | refs/heads/master | 2020-03-07T07:37:39.510301 | 2018-08-19T11:54:56 | 2018-08-19T11:54:56 | 127,353,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | from gym.gym import envs
import os
import logging
logger = logging.getLogger(__name__)
def should_skip_env_spec_for_tests(spec):
# We skip tests for envs that require dependencies or are otherwise
# troublesome to run frequently
ep = spec._entry_point
# Skip mujoco tests for pull request CI
skip_mujoco = not (os.environ.get('MUJOCO_KEY_BUNDLE') or os.path.exists(os.path.expanduser('~/.mujoco')))
if skip_mujoco and ep.startswith('gym.envs.mujoco:'):
return True
if ( 'GoEnv' in ep or
'HexEnv' in ep or
ep.startswith('gym.envs.box2d:') or
ep.startswith('gym.envs.box2d:') or
ep.startswith('gym.envs.parameter_tuning:') or
ep.startswith('gym.envs.safety:Semisuper') or
(ep.startswith("gym.envs.atari") and not spec.id.startswith("Pong") and not spec.id.startswith("Seaquest"))
):
logger.warning("Skipping tests for env {}".format(ep))
return True
return False
spec_list = [spec for spec in sorted(envs.registry.all(), key=lambda x: x.id) if spec._entry_point is not None and not should_skip_env_spec_for_tests(spec)]
| [
"wlasek1412@gmail.com"
] | wlasek1412@gmail.com |
e279ab7f4f8a7694db03f67ed2b49a5684138c0b | 70bee1e4e770398ae7ad9323bd9ea06f279e2796 | /openapi_client/models/types_console_certificate_settings.py | ce30f655952ee5e232890ddeccf76d3bd9a86194 | [] | no_license | hi-artem/twistlock-py | c84b420b1e582b3c4cf3631eb72dac6d659d4746 | 9888e905f5b9d3cc00f9b84244588c0992f8e4f4 | refs/heads/main | 2023-07-18T07:57:57.705014 | 2021-08-22T04:36:33 | 2021-08-22T04:36:33 | 398,637,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,974 | py | # coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class TypesConsoleCertificateSettings(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'check_revocation': 'bool',
'console_ca_cert': 'str',
'console_custom_cert': 'CommonSecret',
'hpkp': 'TypesHPKPSettings'
}
attribute_map = {
'check_revocation': 'checkRevocation',
'console_ca_cert': 'consoleCaCert',
'console_custom_cert': 'consoleCustomCert',
'hpkp': 'hpkp'
}
def __init__(self, check_revocation=None, console_ca_cert=None, console_custom_cert=None, hpkp=None, local_vars_configuration=None): # noqa: E501
"""TypesConsoleCertificateSettings - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._check_revocation = None
self._console_ca_cert = None
self._console_custom_cert = None
self._hpkp = None
self.discriminator = None
if check_revocation is not None:
self.check_revocation = check_revocation
if console_ca_cert is not None:
self.console_ca_cert = console_ca_cert
if console_custom_cert is not None:
self.console_custom_cert = console_custom_cert
if hpkp is not None:
self.hpkp = hpkp
@property
def check_revocation(self):
"""Gets the check_revocation of this TypesConsoleCertificateSettings. # noqa: E501
CheckRevocation indicates whether cert revocation status is required. # noqa: E501
:return: The check_revocation of this TypesConsoleCertificateSettings. # noqa: E501
:rtype: bool
"""
return self._check_revocation
@check_revocation.setter
def check_revocation(self, check_revocation):
"""Sets the check_revocation of this TypesConsoleCertificateSettings.
CheckRevocation indicates whether cert revocation status is required. # noqa: E501
:param check_revocation: The check_revocation of this TypesConsoleCertificateSettings. # noqa: E501
:type check_revocation: bool
"""
self._check_revocation = check_revocation
@property
def console_ca_cert(self):
"""Gets the console_ca_cert of this TypesConsoleCertificateSettings. # noqa: E501
ConsoleCACert is a custom CA certificate for the console. # noqa: E501
:return: The console_ca_cert of this TypesConsoleCertificateSettings. # noqa: E501
:rtype: str
"""
return self._console_ca_cert
@console_ca_cert.setter
def console_ca_cert(self, console_ca_cert):
"""Sets the console_ca_cert of this TypesConsoleCertificateSettings.
ConsoleCACert is a custom CA certificate for the console. # noqa: E501
:param console_ca_cert: The console_ca_cert of this TypesConsoleCertificateSettings. # noqa: E501
:type console_ca_cert: str
"""
self._console_ca_cert = console_ca_cert
@property
def console_custom_cert(self):
"""Gets the console_custom_cert of this TypesConsoleCertificateSettings. # noqa: E501
:return: The console_custom_cert of this TypesConsoleCertificateSettings. # noqa: E501
:rtype: CommonSecret
"""
return self._console_custom_cert
@console_custom_cert.setter
def console_custom_cert(self, console_custom_cert):
"""Sets the console_custom_cert of this TypesConsoleCertificateSettings.
:param console_custom_cert: The console_custom_cert of this TypesConsoleCertificateSettings. # noqa: E501
:type console_custom_cert: CommonSecret
"""
self._console_custom_cert = console_custom_cert
@property
def hpkp(self):
"""Gets the hpkp of this TypesConsoleCertificateSettings. # noqa: E501
:return: The hpkp of this TypesConsoleCertificateSettings. # noqa: E501
:rtype: TypesHPKPSettings
"""
return self._hpkp
@hpkp.setter
def hpkp(self, hpkp):
"""Sets the hpkp of this TypesConsoleCertificateSettings.
:param hpkp: The hpkp of this TypesConsoleCertificateSettings. # noqa: E501
:type hpkp: TypesHPKPSettings
"""
self._hpkp = hpkp
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TypesConsoleCertificateSettings):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TypesConsoleCertificateSettings):
return True
return self.to_dict() != other.to_dict()
| [
"aakatev@virtru.com"
] | aakatev@virtru.com |
bf08004a8dc519aed4684436e2e47168caab9220 | f3b233e5053e28fa95c549017bd75a30456eb50c | /mcl1_input/L37/37-67_wat_20Abox/set_1ns_equi_m.py | 9a30abf2ff76750baf8c5f509e5e70f035965f6b | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | import os
dir = '/mnt/scratch/songlin3/run/mcl1/L37/wat_20Abox/ti_one-step/37_67/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_m.in'
temp_pbs = filesdir + 'temp_1ns_equi_m.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_m.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../37-67_merged.prmtop .")
os.system("cp ../0.5_equi_0_3.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
1055636efad751d50373b46a2e7170cabd726762 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_124/121.py | 1ee579e02a9298b03bc8eb97a5888b8339bc66a9 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | import math
def test(rank, left, right, n, x, y) :
if n == 0 :
if x > 0 :
if right > y :
return 1.0
else :
return 0.0
else :
if left > y :
return 1.0
else :
return 0.0
# left case
leftres = 0
if left < rank * 2 :
leftres = test(rank, left + 1, right, n - 1, x, y)
# right case
rightres = 0
if right < rank * 2 :
rightres = test(rank, left, right + 1, n - 1, x, y)
if left < rank * 2 :
if right < rank * 2 :
return leftres * 0.5 + rightres * 0.5
else :
return leftres
else :
return rightres
t = int(raw_input())
for casenum in range(1, t + 1) :
n, x, y = [int(z) for z in raw_input().split()]
if (x == 0) and (y == 0) :
print "Case #%d: 1.0" % casenum
continue
rank = (abs(x) + y) / 2
maxn = (2 * rank * rank) + (3 * rank ) + 1
minn = (2 * rank - 1) * rank
if n >= maxn :
print "Case #%d: 1.0" % casenum
continue
elif y == rank * 2 :
print "Case #%d: 0.0" % casenum
continue
elif n <= minn :
print "Case #%d: 0.0" % casenum
else :
print "Case #%d: %F" % (casenum, test(rank, 0, 0, n - minn, x, y))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
3736b3a7e4549a3f825f3fb53da537c76c8e2684 | c0156da1c81a3a76e397974399c7345d082eca9b | /synapse/federation/send_queue.py | 4d65d4aeeab80478457d8631d3a88ff23fd13109 | [
"Apache-2.0"
] | permissive | leanhvu86/matrix-server | 1823c60fc6ba5ed489bb5720474c6b56a9aec688 | 6e16fc53dfebaeaf222ff5a371ccffcc65de3818 | refs/heads/master | 2023-05-09T01:21:37.774510 | 2021-05-21T15:10:48 | 2021-05-21T15:10:48 | 369,569,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,268 | py | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A federation sender that forwards things to be sent across replication to
a worker process.
It assumes there is a single worker process feeding off of it.
Each row in the replication stream consists of a type and some json, where the
types indicate whether they are presence, or edus, etc.
Ephemeral or non-event data are queued up in-memory. When the worker requests
updates since a particular point, all in-memory data since before that point is
dropped. We also expire things in the queue after 5 minutes, to ensure that a
dead worker doesn't cause the queues to grow limitlessly.
Events are replicated via a separate events stream.
"""
import logging
from collections import namedtuple
from typing import Dict, List, Tuple, Type
from sortedcontainers import SortedDict
from twisted.internet import defer
from synapse.api.presence import UserPresenceState
from synapse.metrics import LaterGauge
from synapse.util.metrics import Measure
from .units import Edu
logger = logging.getLogger(__name__)
class FederationRemoteSendQueue(object):
"""A drop in replacement for FederationSender"""
def __init__(self, hs):
self.server_name = hs.hostname
self.clock = hs.get_clock()
self.notifier = hs.get_notifier()
self.is_mine_id = hs.is_mine_id
# We may have multiple federation sender instances, so we need to track
# their positions separately.
self._sender_instances = hs.config.worker.federation_shard_config.instances
self._sender_positions = {}
# Pending presence map user_id -> UserPresenceState
self.presence_map = {} # type: Dict[str, UserPresenceState]
# Stream position -> list[user_id]
self.presence_changed = SortedDict() # type: SortedDict[int, List[str]]
# Stores the destinations we need to explicitly send presence to about a
# given user.
# Stream position -> (user_id, destinations)
self.presence_destinations = (
SortedDict()
) # type: SortedDict[int, Tuple[str, List[str]]]
# (destination, key) -> EDU
self.keyed_edu = {} # type: Dict[Tuple[str, tuple], Edu]
# stream position -> (destination, key)
self.keyed_edu_changed = (
SortedDict()
) # type: SortedDict[int, Tuple[str, tuple]]
self.edus = SortedDict() # type: SortedDict[int, Edu]
# stream ID for the next entry into presence_changed/keyed_edu_changed/edus.
self.pos = 1
# map from stream ID to the time that stream entry was generated, so that we
# can clear out entries after a while
self.pos_time = SortedDict() # type: SortedDict[int, int]
# EVERYTHING IS SAD. In particular, python only makes new scopes when
# we make a new function, so we need to make a new function so the inner
# lambda binds to the queue rather than to the name of the queue which
# changes. ARGH.
def register(name, queue):
LaterGauge(
"synapse_federation_send_queue_%s_size" % (queue_name,),
"",
[],
lambda: len(queue),
)
for queue_name in [
"presence_map",
"presence_changed",
"keyed_edu",
"keyed_edu_changed",
"edus",
"pos_time",
"presence_destinations",
]:
register(queue_name, getattr(self, queue_name))
self.clock.looping_call(self._clear_queue, 30 * 1000)
def _next_pos(self):
pos = self.pos
self.pos += 1
self.pos_time[self.clock.time_msec()] = pos
return pos
def _clear_queue(self):
"""Clear the queues for anything older than N minutes"""
FIVE_MINUTES_AGO = 5 * 60 * 1000
now = self.clock.time_msec()
keys = self.pos_time.keys()
time = self.pos_time.bisect_left(now - FIVE_MINUTES_AGO)
if not keys[:time]:
return
position_to_delete = max(keys[:time])
for key in keys[:time]:
del self.pos_time[key]
self._clear_queue_before_pos(position_to_delete)
def _clear_queue_before_pos(self, position_to_delete):
"""Clear all the queues from before a given position"""
with Measure(self.clock, "send_queue._clear"):
# Delete things out of presence maps
keys = self.presence_changed.keys()
i = self.presence_changed.bisect_left(position_to_delete)
for key in keys[:i]:
del self.presence_changed[key]
user_ids = {
user_id for uids in self.presence_changed.values() for user_id in uids
}
keys = self.presence_destinations.keys()
i = self.presence_destinations.bisect_left(position_to_delete)
for key in keys[:i]:
del self.presence_destinations[key]
user_ids.update(
user_id for user_id, _ in self.presence_destinations.values()
)
to_del = [
user_id for user_id in self.presence_map if user_id not in user_ids
]
for user_id in to_del:
del self.presence_map[user_id]
# Delete things out of keyed edus
keys = self.keyed_edu_changed.keys()
i = self.keyed_edu_changed.bisect_left(position_to_delete)
for key in keys[:i]:
del self.keyed_edu_changed[key]
live_keys = set()
for edu_key in self.keyed_edu_changed.values():
live_keys.add(edu_key)
keys_to_del = [
edu_key for edu_key in self.keyed_edu if edu_key not in live_keys
]
for edu_key in keys_to_del:
del self.keyed_edu[edu_key]
# Delete things out of edu map
keys = self.edus.keys()
i = self.edus.bisect_left(position_to_delete)
for key in keys[:i]:
del self.edus[key]
def notify_new_events(self, current_id):
"""As per FederationSender"""
# We don't need to replicate this as it gets sent down a different
# stream.
pass
def build_and_send_edu(self, destination, edu_type, content, key=None):
"""As per FederationSender"""
if destination == self.server_name:
logger.info("Not sending EDU to ourselves")
return
pos = self._next_pos()
edu = Edu(
origin=self.server_name,
destination=destination,
edu_type=edu_type,
content=content,
)
if key:
assert isinstance(key, tuple)
self.keyed_edu[(destination, key)] = edu
self.keyed_edu_changed[pos] = (destination, key)
else:
self.edus[pos] = edu
self.notifier.on_new_replication_data()
def send_read_receipt(self, receipt):
"""As per FederationSender
Args:
receipt (synapse.types.ReadReceipt):
"""
# nothing to do here: the replication listener will handle it.
return defer.succeed(None)
def send_presence(self, states):
"""As per FederationSender
Args:
states (list(UserPresenceState))
"""
pos = self._next_pos()
# We only want to send presence for our own users, so lets always just
# filter here just in case.
local_states = list(filter(lambda s: self.is_mine_id(s.user_id), states))
self.presence_map.update({state.user_id: state for state in local_states})
self.presence_changed[pos] = [state.user_id for state in local_states]
self.notifier.on_new_replication_data()
def send_presence_to_destinations(self, states, destinations):
"""As per FederationSender
Args:
states (list[UserPresenceState])
destinations (list[str])
"""
for state in states:
pos = self._next_pos()
self.presence_map.update({state.user_id: state for state in states})
self.presence_destinations[pos] = (state.user_id, destinations)
self.notifier.on_new_replication_data()
def send_device_messages(self, destination):
"""As per FederationSender"""
# We don't need to replicate this as it gets sent down a different
# stream.
def get_current_token(self):
return self.pos - 1
def federation_ack(self, instance_name, token):
if self._sender_instances:
# If we have configured multiple federation sender instances we need
# to track their positions separately, and only clear the queue up
# to the token all instances have acked.
self._sender_positions[instance_name] = token
token = min(self._sender_positions.values())
self._clear_queue_before_pos(token)
async def get_replication_rows(
self, instance_name: str, from_token: int, to_token: int, target_row_count: int
) -> Tuple[List[Tuple[int, Tuple]], int, bool]:
"""Get rows to be sent over federation between the two tokens
Args:
instance_name: the name of the current process
from_token: the previous stream token: the starting point for fetching the
updates
to_token: the new stream token: the point to get updates up to
target_row_count: a target for the number of rows to be returned.
Returns: a triplet `(updates, new_last_token, limited)`, where:
* `updates` is a list of `(token, row)` entries.
* `new_last_token` is the new position in stream.
* `limited` is whether there are more updates to fetch.
"""
# TODO: Handle target_row_count.
# To handle restarts where we wrap around
if from_token > self.pos:
from_token = -1
# list of tuple(int, BaseFederationRow), where the first is the position
# of the federation stream.
rows = [] # type: List[Tuple[int, BaseFederationRow]]
# Fetch changed presence
i = self.presence_changed.bisect_right(from_token)
j = self.presence_changed.bisect_right(to_token) + 1
dest_user_ids = [
(pos, user_id)
for pos, user_id_list in self.presence_changed.items()[i:j]
for user_id in user_id_list
]
for (key, user_id) in dest_user_ids:
rows.append((key, PresenceRow(state=self.presence_map[user_id])))
# Fetch presence to send to destinations
i = self.presence_destinations.bisect_right(from_token)
j = self.presence_destinations.bisect_right(to_token) + 1
for pos, (user_id, dests) in self.presence_destinations.items()[i:j]:
rows.append(
(
pos,
PresenceDestinationsRow(
state=self.presence_map[user_id], destinations=list(dests)
),
)
)
# Fetch changes keyed edus
i = self.keyed_edu_changed.bisect_right(from_token)
j = self.keyed_edu_changed.bisect_right(to_token) + 1
# We purposefully clobber based on the key here, python dict comprehensions
# always use the last value, so this will correctly point to the last
# stream position.
keyed_edus = {v: k for k, v in self.keyed_edu_changed.items()[i:j]}
for ((destination, edu_key), pos) in keyed_edus.items():
rows.append(
(
pos,
KeyedEduRow(
key=edu_key, edu=self.keyed_edu[(destination, edu_key)]
),
)
)
# Fetch changed edus
i = self.edus.bisect_right(from_token)
j = self.edus.bisect_right(to_token) + 1
edus = self.edus.items()[i:j]
for (pos, edu) in edus:
rows.append((pos, EduRow(edu)))
# Sort rows based on pos
rows.sort()
return (
[(pos, (row.TypeId, row.to_data())) for pos, row in rows],
to_token,
False,
)
class BaseFederationRow(object):
"""Base class for rows to be sent in the federation stream.
Specifies how to identify, serialize and deserialize the different types.
"""
TypeId = "" # Unique string that ids the type. Must be overridden in sub classes.
@staticmethod
def from_data(data):
"""Parse the data from the federation stream into a row.
Args:
data: The value of ``data`` from FederationStreamRow.data, type
depends on the type of stream
"""
raise NotImplementedError()
def to_data(self):
"""Serialize this row to be sent over the federation stream.
Returns:
The value to be sent in FederationStreamRow.data. The type depends
on the type of stream.
"""
raise NotImplementedError()
def add_to_buffer(self, buff):
"""Add this row to the appropriate field in the buffer ready for this
to be sent over federation.
We use a buffer so that we can batch up events that have come in at
the same time and send them all at once.
Args:
buff (BufferedToSend)
"""
raise NotImplementedError()
class PresenceRow(
BaseFederationRow, namedtuple("PresenceRow", ("state",)) # UserPresenceState
):
TypeId = "p"
@staticmethod
def from_data(data):
return PresenceRow(state=UserPresenceState.from_dict(data))
def to_data(self):
return self.state.as_dict()
def add_to_buffer(self, buff):
buff.presence.append(self.state)
class PresenceDestinationsRow(
BaseFederationRow,
namedtuple(
"PresenceDestinationsRow",
("state", "destinations"), # UserPresenceState # list[str]
),
):
TypeId = "pd"
@staticmethod
def from_data(data):
return PresenceDestinationsRow(
state=UserPresenceState.from_dict(data["state"]), destinations=data["dests"]
)
def to_data(self):
return {"state": self.state.as_dict(), "dests": self.destinations}
def add_to_buffer(self, buff):
buff.presence_destinations.append((self.state, self.destinations))
class KeyedEduRow(
BaseFederationRow,
namedtuple(
"KeyedEduRow",
("key", "edu"), # tuple(str) - the edu key passed to send_edu # Edu
),
):
"""Streams EDUs that have an associated key that is ued to clobber. For example,
typing EDUs clobber based on room_id.
"""
TypeId = "k"
@staticmethod
def from_data(data):
return KeyedEduRow(key=tuple(data["key"]), edu=Edu(**data["edu"]))
def to_data(self):
return {"key": self.key, "edu": self.edu.get_internal_dict()}
def add_to_buffer(self, buff):
buff.keyed_edus.setdefault(self.edu.destination, {})[self.key] = self.edu
class EduRow(BaseFederationRow, namedtuple("EduRow", ("edu",))): # Edu
"""Streams EDUs that don't have keys. See KeyedEduRow
"""
TypeId = "e"
@staticmethod
def from_data(data):
return EduRow(Edu(**data))
def to_data(self):
return self.edu.get_internal_dict()
def add_to_buffer(self, buff):
buff.edus.setdefault(self.edu.destination, []).append(self.edu)
_rowtypes = (
PresenceRow,
PresenceDestinationsRow,
KeyedEduRow,
EduRow,
) # type: Tuple[Type[BaseFederationRow], ...]
TypeToRow = {Row.TypeId: Row for Row in _rowtypes}
ParsedFederationStreamData = namedtuple(
"ParsedFederationStreamData",
(
"presence", # list(UserPresenceState)
"presence_destinations", # list of tuples of UserPresenceState and destinations
"keyed_edus", # dict of destination -> { key -> Edu }
"edus", # dict of destination -> [Edu]
),
)
def process_rows_for_federation(transaction_queue, rows):
"""Parse a list of rows from the federation stream and put them in the
transaction queue ready for sending to the relevant homeservers.
Args:
transaction_queue (FederationSender)
rows (list(synapse.replication.tcp.streams.federation.FederationStream.FederationStreamRow))
"""
# The federation stream contains a bunch of different types of
# rows that need to be handled differently. We parse the rows, put
# them into the appropriate collection and then send them off.
buff = ParsedFederationStreamData(
presence=[], presence_destinations=[], keyed_edus={}, edus={},
)
# Parse the rows in the stream and add to the buffer
for row in rows:
if row.type not in TypeToRow:
logger.error("Unrecognized federation row type %r", row.type)
continue
RowType = TypeToRow[row.type]
parsed_row = RowType.from_data(row.data)
parsed_row.add_to_buffer(buff)
if buff.presence:
transaction_queue.send_presence(buff.presence)
for state, destinations in buff.presence_destinations:
transaction_queue.send_presence_to_destinations(
states=[state], destinations=destinations
)
for destination, edu_map in buff.keyed_edus.items():
for key, edu in edu_map.items():
transaction_queue.send_edu(edu, key)
for destination, edu_list in buff.edus.items():
for edu in edu_list:
transaction_queue.send_edu(edu, None)
| [
"leanhvu86@gmail.com"
] | leanhvu86@gmail.com |
73e9576f7c22061ccd62f5111e442fa156109f2d | 38cacbe9ec2f8ea4540f5aed31da60ac3595c08b | /tests/utils.py | b2b5b9aa0b054a7399086693022be53f9da50f24 | [
"Apache-2.0"
] | permissive | naure/YaP | 14bac663cdf31bda58dd5288f1f297ffa164a742 | e4f9c8b00a463b4fedceb6d9241dd9c723607562 | refs/heads/master | 2020-04-06T06:56:17.027479 | 2016-08-23T19:34:15 | 2016-08-23T19:34:15 | 24,347,882 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 968 | py | import difflib
def red(s):
return '\033[91m' + s + '\033[0m'
def green(s):
return '\033[92m' + s + '\033[0m'
def color_diffline(line):
if line.startswith('-'): # Red
return red(line)
if line.startswith('+'): # Green
return green(line)
return line
def diff(a, b, **kwargs):
return '\n'.join(map(
color_diffline,
difflib.unified_diff(
a.splitlines(), b.splitlines(), **kwargs
)))
def diff_paths(pa, pb):
with open(pa) as fa, open(pb) as fb:
a = fa.read()
b = fb.read()
if a != b:
return diff(a, b, fromfile=pa, tofile=pb)
else:
return False
def compare_paths(ref_path, test_path, what='Output'):
test_diff = diff_paths(ref_path, test_path)
if test_diff:
print(red('{} {} is different than reference {}'.format(
what, test_path, ref_path)))
print(test_diff)
return 1
else:
return 0
| [
"devnull@localhost"
] | devnull@localhost |
60112dc2b60f73bf2d1353313923ac3433aa5be3 | 30cffb7452220c2ac2961dd2e0f42e3b359a59c0 | /simscale_sdk/models/ground_relative.py | 9f70ec5cb7f1b769bb77f048419ec92fffa6e3f3 | [
"MIT"
] | permissive | vpurcarea/simscale-python-sdk | 0bf892d8824f8d4599caa0f345d5ba28e038f5eb | 6f2d12b2d21142bd854042c0fb402c2c797629e4 | refs/heads/master | 2023-03-14T04:31:06.226337 | 2021-03-03T16:20:01 | 2021-03-03T16:20:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,290 | py | # coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class GroundRelative(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'topological_reference': 'TopologicalReference'
}
attribute_map = {
'type': 'type',
'topological_reference': 'topologicalReference'
}
def __init__(self, type='GROUND_RELATIVE', topological_reference=None, local_vars_configuration=None): # noqa: E501
"""GroundRelative - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._topological_reference = None
self.discriminator = None
self.type = type
if topological_reference is not None:
self.topological_reference = topological_reference
@property
def type(self):
"""Gets the type of this GroundRelative. # noqa: E501
:return: The type of this GroundRelative. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this GroundRelative.
:param type: The type of this GroundRelative. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def topological_reference(self):
"""Gets the topological_reference of this GroundRelative. # noqa: E501
:return: The topological_reference of this GroundRelative. # noqa: E501
:rtype: TopologicalReference
"""
return self._topological_reference
@topological_reference.setter
def topological_reference(self, topological_reference):
"""Sets the topological_reference of this GroundRelative.
:param topological_reference: The topological_reference of this GroundRelative. # noqa: E501
:type: TopologicalReference
"""
self._topological_reference = topological_reference
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GroundRelative):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, GroundRelative):
return True
return self.to_dict() != other.to_dict()
| [
"simscale"
] | simscale |
6e14b31178f64e5288bfaa4ef92323615e3ab96c | ed6cb6f58b36f16d38e0f7909173c67c15575a3f | /smart_compress/models/base.py | a381b2ad5ac4abdb3dfa1faf51075fcd05ebbe9f | [] | no_license | gthparch/etri-quant | 4698e50ed1f835ced5b69f928cda5bc2e357657f | 36cfb72f50937b65762fac0f12f044714b755a66 | refs/heads/main | 2023-08-14T21:42:10.007259 | 2021-10-09T01:28:40 | 2021-10-09T01:28:40 | 415,169,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,904 | py | from abc import abstractmethod
from argparse import ArgumentParser, Namespace
from typing import Iterator, Type, Union
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from argparse_utils.mapping import mapping_action
from smart_compress.util.pytorch.hooks import wrap_optimizer
from torch import nn
from torch.optim import SGD, Adam, AdamW
from torch.optim.lr_scheduler import MultiStepLR
from torch.optim.optimizer import Optimizer
def make_optimizer_args(
hparams: Namespace,
**kwargs,
):
optimizer_args = dict(
lr=hparams.learning_rate,
momentum=hparams.momentum,
weight_decay=hparams.weight_decay,
)
if hparams.beta1 and hparams.beta2:
optimizer_args.update(dict(betas=(hparams.beta1, hparams.beta2)))
if hparams.epsilon:
optimizer_args.update(dict(eps=hparams.epsilon))
optimizer_args.update(kwargs)
return optimizer_args
def make_multistep_scheduler(optimizer: Optimizer, hparams: Namespace):
return MultiStepLR(
optimizer,
milestones=hparams.scheduler_milestones,
gamma=hparams.scheduler_gamma,
)
class BaseModule(pl.LightningModule):
@staticmethod
def add_argparse_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument(
"--optimizer_type",
action=mapping_action(dict(adam=Adam, adamw=AdamW, sgd=SGD)),
default="sgd",
dest="optimizer_cls",
)
parser.add_argument(
"--scheduler_type",
action=mapping_action(dict(multi_step=make_multistep_scheduler)),
dest="make_scheduler_fn",
),
parser.add_argument("--scheduler_gamma", type=float, default=0.1)
parser.add_argument(
"--scheduler_milestones",
type=int,
nargs="+",
default=[100, 150, 200],
)
parser.add_argument("--learning_rate", type=float, default=0.1)
parser.add_argument("--weight_decay", type=float, default=0)
parser.add_argument("--momentum", type=float, default=0.9)
parser.add_argument("--beta1", type=float)
parser.add_argument("--beta2", type=float)
parser.add_argument("--epsilon", type=float)
parser.add_argument("--measure_average_grad_norm", action="store_true")
return parser
def __init__(self, *args, compression=None, **kwargs):
super().__init__()
self.compression = compression
if self.compression is None:
from smart_compress.compress.fp32 import FP32
self.compression = FP32(self.hparams)
self.save_hyperparameters()
if self.hparams.measure_average_grad_norm:
self._grads = []
def training_epoch_end(self, *args, **kwargs):
if not self.hparams.measure_average_grad_norm:
return super().training_epoch_end(*args, **kwargs)
try:
avg = torch.mean(torch.tensor(self._grads))
print(f"AVERAGE: {avg}")
except:
pass
return super().training_epoch_end(*args, **kwargs)
def loss_function(self, outputs, ground_truth):
return F.cross_entropy(outputs, ground_truth)
def accuracy_function(self, outputs, ground_truth):
return dict()
@abstractmethod
def forward(self, x):
raise Exception("Not implemented")
def calculate_loss(self, batch):
inputs, labels = batch
outputs = self(inputs)
loss = self.loss_function(outputs, labels)
if self.hparams.compress_loss:
loss.data = self.compression(loss.data, tag="loss")
return labels, loss, outputs
def training_step(self, batch, _batch_idx):
labels, loss, outputs = self.calculate_loss(batch)
self.log("train_loss", loss)
for metric, value in self.accuracy_function(outputs, labels).items():
self.log(f"train_{metric}", value, on_epoch=True, prog_bar=True)
return dict(loss=loss)
def validation_step(self, batch, _batch_idx):
labels, loss, outputs = self.calculate_loss(batch)
self.log("val_loss", loss)
for metric, value in self.accuracy_function(outputs, labels).items():
self.log(f"val_{metric}", value, on_epoch=True, prog_bar=True)
return dict(loss=loss)
def configure_optimizers(self):
base_args = make_optimizer_args(self.hparams)
params_bn = []
params_no_bn = []
for child in self.modules():
params = params_bn if type(child) == nn.BatchNorm2d else params_no_bn
params.extend(child.parameters(recurse=False))
optimizer = self.hparams.optimizer_cls(
[
dict(params=params_bn, no_weight_compression=True, **base_args),
dict(params=params_no_bn, **base_args),
]
)
if (
self.hparams.compress_weights
or self.hparams.compress_gradients
or self.hparams.compress_momentum_vectors
):
optimizer = wrap_optimizer(optimizer, self.compression, self.hparams)
if self.hparams.make_scheduler_fn:
scheduler = self.hparams.make_scheduler_fn(optimizer, self.hparams)
return [optimizer], [scheduler]
return [optimizer], []
def optimizer_zero_grad(self, *args, **kwargs):
if not self.hparams.measure_average_grad_norm:
return super().optimizer_zero_grad(*args, **kwargs)
norms = torch.tensor(
[
parameter.grad.norm()
for parameter in self.parameters()
if parameter.grad is not None
]
)
if len(norms):
self._grads.append(torch.mean(norms))
return super().optimizer_zero_grad(*args, **kwargs)
| [
"caojiashen24@gmail.com"
] | caojiashen24@gmail.com |
4e7a822623cae02b8a770d73d3e214da5a0056c0 | d8edd97f8f8dea3f9f02da6c40d331682bb43113 | /networks1216.py | 05847acaccb6e806629fb44d584e05d7924634a3 | [] | no_license | mdubouch/noise-gan | bdd5b2fff3aff70d5f464150443d51c2192eeafd | 639859ec4a2aa809d17eb6998a5a7d217559888a | refs/heads/master | 2023-07-15T09:37:57.631656 | 2021-08-27T11:02:45 | 2021-08-27T11:02:45 | 284,072,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,579 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
__version__ = 205
# Number of continuous features (E, t, dca)
n_features = 3
geom_dim = 3
class SelfAttn(nn.Module):
""" Self attention Layer"""
def __init__(self, dim):
super().__init__()
self.query_conv = nn.Conv1d(dim, dim, 1, 1, 0)
self.key_conv = nn.Conv1d(dim, dim, 1, 1, 0)
self.value_conv = nn.Conv1d(dim, dim, 1, 1, 0)
def forward(self,x):
"""
inputs :
x : input feature maps (BxCxL)
returns :
out : self attention value + input feature
attention: B x L x L
"""
Q = self.query_conv(x).permute(0,2,1) # (B, L, C)
K = self.key_conv(x) # (B, C, L)
attn = torch.softmax(torch.bmm(Q, K), dim=-1) # (B, L, L)
V = self.value_conv(x) # (B, C, L)
out = torch.bmm(V, attn) # (B, C, L)
out = out + x
return out, attn
def wire_hook(grad):
print('wg %.2e %.2e' % (grad.abs().mean().item(), grad.std().item()))
return grad
class Gen(nn.Module):
def __init__(self, ngf, latent_dims, seq_len, encoded_dim, n_wires):
super().__init__()
self.ngf = ngf
self.seq_len = seq_len
self.version = __version__
# Input: (B, latent_dims, 1)
self.act = nn.ReLU()
n512 = 256
self.lin0 = nn.Linear(latent_dims, seq_len//32*n512, bias=True)
self.n512 = n512
n256 = n512 // 2
n128 = n512 // 4
n64 = n512 // 8
n32 = n512 // 16
n16 = n512 // 32
class ResBlockUp(nn.Module):
def __init__(self, in_c, out_c):
super().__init__()
self.conv1 = nn.ConvTranspose1d(in_c, out_c, 3, 2, 1, output_padding=1)
self.conv2 = nn.ConvTranspose1d(out_c, out_c, 3, 1, 1)
self.convp = nn.ConvTranspose1d(in_c, out_c, 2, 2, 0, bias=False)
self.bn1 = nn.InstanceNorm1d(out_c)
self.bn2 = nn.InstanceNorm1d(out_c)
self.act = nn.ReLU()
def forward(self, x):
y = self.bn1(self.act(self.conv1(x)))
y = self.conv2(y)
xp = self.convp(x)
y = self.bn2(self.act(xp + y))
return y
self.convu1 = ResBlockUp(n512, n512)
self.convu2 = ResBlockUp(n512, n512)
self.convu3 = ResBlockUp(n512, n512//2)
# Common branch
self.convu4 = ResBlockUp(n512//2, n512//4)
self.convu5 = ResBlockUp(n512//4, n512//8)
# W branch
self.convuw1 = ResBlockUp(n512//2, n512//4)
self.convuw2 = ResBlockUp(n512//4, n512//8)
# P branch
self.convup1 = ResBlockUp(n512//2, n512//4)
self.convup2 = ResBlockUp(n512//4, n512//8)
self.attnw2 = SelfAttn(n512//4)
self.convw2 = nn.Conv1d(n512//4, n512//8, 7, 1, 3)
self.attnw1 = SelfAttn(n512//8)
self.convw1 = nn.Conv1d(n512//8, n_wires, 1, 1, 0)
self.attnp2 = SelfAttn(n512//4)
self.convp2 = nn.Conv1d(n512//4, n512//8, 3, 1, 1)
self.attnp1 = SelfAttn(n512//8)
self.convp1 = nn.Conv1d(n512//8, n_features, 7, 1, 3)
def forward(self, z):
#print('latent space %.2e %.2e' % (z.mean().item(), z.std().item()))
# z: random point in latent space
x = self.act(self.lin0(z).reshape(-1, self.n512, self.seq_len // 32))
x = self.convu1(x)
x = self.convu2(x)
x0 = self.convu3(x)
# Common
x = self.convu4(x0)
x = self.convu5(x)
# W
w = self.convuw1(x0)
w = self.convuw2(w)
# P
p = self.convup1(x0)
p = self.convup2(p)
w0 = torch.cat([x, w], dim=1)
w, w_attn = self.attnw2(w0)
w = self.act(self.convw2(w))
w, w_attn = self.attnw1(w)
w = self.convw1(w)
wg = torch.softmax(w, dim=1)
p0 = torch.cat([x, p], dim=1)
p, p_attn = self.attnp2(p0)
p = self.act(self.convp2(p))
p, p_attn = self.attnp1(p)
p = self.convp1(p)
#return torch.cat([self.out(p), xy], dim=1), wg
return torch.tanh(p), wg
def xy_hook(grad):
print('xy %.2e %.2e' % (grad.abs().mean().item(), grad.std().item()))
return grad
class Disc(nn.Module):
def __init__(self, ndf, seq_len, encoded_dim, n_wires):
super().__init__()
self.version = __version__
# (B, n_features, 256)
self.act = nn.LeakyReLU(0.2)
n768 = 256
n512 = 256
n256 = 256
n128 = 128
n64 = 64
nproj = 8
class ResBlock(nn.Module):
def __init__(self, channels):
super().__init__()
self.conv1 = nn.Conv1d(channels, channels, 3, 1, 1, padding_mode='circular')
self.conv2 = nn.Conv1d(channels, channels, 3, 1, 1, padding_mode='circular')
self.act = nn.LeakyReLU(0.2)
def forward(self, x):
y = self.act(self.conv1(x))
y = self.conv2(y)
y = self.act(y + x)
return y
class ResBlockDown(nn.Module):
def __init__(self, channels):
super().__init__()
self.conv1 = nn.Conv1d(channels, channels*2, 3, 2, 1, padding_mode='circular')
self.conv2 = nn.Conv1d(channels*2, channels*2, 3, 1, 1, padding_mode='circular')
self.convp = nn.Conv1d(channels, channels*2, 2, 2, 0, bias=False)
self.act = nn.LeakyReLU(0.2)
def forward(self, x):
y = self.act(self.conv1(x))
y = self.conv2(y)
xp = self.convp(x)
y = self.act(y + xp)
return y
self.convw0 = nn.Conv1d(n_wires, nproj, 1, 1, 0, bias=False)
self.conv1 = nn.Conv1d(nproj+geom_dim+n_features, n64, 17, 1, 8, padding_mode='circular')
self.attn1 = SelfAttn(n64)
self.rb2 = ResBlockDown(n64)#nn.Conv1d(n64, n64*2, 3, 2, 1, padding_mode='circular')
self.attn2 = SelfAttn(n64*2)
self.rb3 = ResBlockDown(n64*2)#nn.Conv1d(n64*2, n64*4, 3, 2, 1, padding_mode='circular')
self.attn3 = SelfAttn(n64*4)
self.rb4 = ResBlockDown(n64*4)#nn.Conv1d(n64*4, n64*8, 3, 2, 1, padding_mode='circular')
self.lin0 = nn.Linear(n64*8, 1)
self.dropout = nn.Dropout(0.2)
self.out = nn.Identity()
self.padleft = nn.ConstantPad1d((1, 0), 0.)
def forward(self, x_):
# x_ is concatenated tensor of p_ and w_, shape (batch, features+n_wires, seq_len)
# p_ shape is (batch, features, seq_len),
# w_ is AE-encoded wire (batch, encoded_dim, seq_len)
seq_len = x_.shape[2]
#dist = ((xy - nn.ConstantPad1d((1, 0), 0.0)(xy[:,:,:-1]))**2).sum(dim=1).unsqueeze(1)
p = x_[:,:n_features]
xy = x_[:,n_features:n_features+geom_dim]
wg = x_[:,n_features+geom_dim:]
w0 = self.convw0(wg)
x = torch.cat([w0, p, xy], dim=1)
x = self.act(self.conv1(x))
x, x_attn = self.attn1(x)
x = self.rb2(x)
x, x_attn = self.attn2(x)
x = self.rb3(x)
x, x_attn = self.attn3(x)
x = self.rb4(x)
x = self.lin0(x.mean(2)).squeeze()
return self.out(x)
def get_n_params(model):
return sum(p.reshape(-1).shape[0] for p in model.parameters())
| [
"m.dubouchet18@imperial.ac.uk"
] | m.dubouchet18@imperial.ac.uk |
6cd382d97e1da0433636db1787089416dd634124 | e9538b7ad6d0ce0ccfbb8e10c458f9e0b73926f6 | /tests/unit/modules/network/fortios/test_fortios_system_ipv6_neighbor_cache.py | f808f626a882242f8f534d39d78eb5419e5e659d | [] | no_license | ansible-collection-migration/misc.not_a_real_collection | b3ef8090c59de9ac30aca083c746ec3595d7f5f5 | 7ab1af924a3db4ada2f714b09bb392614344cb1e | refs/heads/master | 2020-12-18T13:48:51.849567 | 2020-01-22T17:39:18 | 2020-01-22T17:39:18 | 235,400,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,122 | py | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible_collections.misc.not_a_real_collection.plugins.modules import fortios_system_ipv6_neighbor_cache
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.modules.fortios_system_ipv6_neighbor_cache.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_ipv6_neighbor_cache_creation(mocker):
schema_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_ipv6_neighbor_cache': {
'id': '3',
'interface': 'test_value_4',
'ipv6': 'test_value_5',
'mac': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_ipv6_neighbor_cache.fortios_system(input_data, fos_instance)
expected_data = {
'id': '3',
'interface': 'test_value_4',
'ipv6': 'test_value_5',
'mac': 'test_value_6'
}
set_method_mock.assert_called_with('system', 'ipv6-neighbor-cache', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_ipv6_neighbor_cache_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_ipv6_neighbor_cache': {
'id': '3',
'interface': 'test_value_4',
'ipv6': 'test_value_5',
'mac': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_ipv6_neighbor_cache.fortios_system(input_data, fos_instance)
expected_data = {
'id': '3',
'interface': 'test_value_4',
'ipv6': 'test_value_5',
'mac': 'test_value_6'
}
set_method_mock.assert_called_with('system', 'ipv6-neighbor-cache', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_ipv6_neighbor_cache_removal(mocker):
schema_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_ipv6_neighbor_cache': {
'id': '3',
'interface': 'test_value_4',
'ipv6': 'test_value_5',
'mac': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_ipv6_neighbor_cache.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'ipv6-neighbor-cache', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_ipv6_neighbor_cache_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_ipv6_neighbor_cache': {
'id': '3',
'interface': 'test_value_4',
'ipv6': 'test_value_5',
'mac': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_ipv6_neighbor_cache.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'ipv6-neighbor-cache', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_ipv6_neighbor_cache_idempotent(mocker):
schema_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_ipv6_neighbor_cache': {
'id': '3',
'interface': 'test_value_4',
'ipv6': 'test_value_5',
'mac': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_ipv6_neighbor_cache.fortios_system(input_data, fos_instance)
expected_data = {
'id': '3',
'interface': 'test_value_4',
'ipv6': 'test_value_5',
'mac': 'test_value_6'
}
set_method_mock.assert_called_with('system', 'ipv6-neighbor-cache', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_ipv6_neighbor_cache_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_ipv6_neighbor_cache': {
'random_attribute_not_valid': 'tag',
'id': '3',
'interface': 'test_value_4',
'ipv6': 'test_value_5',
'mac': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_ipv6_neighbor_cache.fortios_system(input_data, fos_instance)
expected_data = {
'id': '3',
'interface': 'test_value_4',
'ipv6': 'test_value_5',
'mac': 'test_value_6'
}
set_method_mock.assert_called_with('system', 'ipv6-neighbor-cache', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| [
"ansible_migration@example.com"
] | ansible_migration@example.com |
bc0fa8f250c68c7f999971227b923c04b645ab61 | 4768e4ad67416e8b93344ccf647954398fd69561 | /microblog/db_helpers.py | 3a122e3cead9c5f0c55ae35f8a93a6433a264d6e | [] | no_license | cleartext/Enterprise-Microblogging-Server | ee0624c004faa8c6ade282a28949fe9a17438e89 | befff4b9ec64b395a9ef55b52ed563a4168bc07f | refs/heads/master | 2016-09-06T16:23:39.454558 | 2011-01-25T21:56:46 | 2011-01-25T21:56:46 | 1,288,330 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | """
Different database helpers, to retrive
information about users.
"""
from microblog.models import User
from microblog.exceptions import UserNotFound
def get_user_by_jid(jid, session):
jid = jid.split('/', 1)[0]
user = session.query(User).filter(User.jid==jid).scalar()
if user is None:
raise UserNotFound('User with jid "%s" not found.' % jid)
return user
def get_all_users(session):
return session.query(User)
def get_user_by_username(username, session):
user = session.query(User).filter(User.username==username).scalar()
if user is None:
raise UserNotFound('User with username "%s" not found.' % username)
return user
| [
"svetlyak.40wt@gmail.com"
] | svetlyak.40wt@gmail.com |
f5a746feee39941cb9838d70afb17bbd77fcc3db | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnsayer.py | 4694c011ef4b512409e0535d1ae2bf4e83b67b7f | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 102 | py | ii = [('AubePRP2.py', 1), ('PettTHE.py', 2), ('RoscTTI2.py', 1), ('WadeJEB.py', 1), ('GodwWLN.py', 1)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
9bfec149acfe5c6dd3952a7c0436ce971c6faaee | ff44af64d1b03f0b04b4a9482409081810223deb | /hydrus/client/gui/ClientGUIListBoxes.py | 09240e4c7b349d2205023bd7dbdfcda1d0da6520 | [
"WTFPL"
] | permissive | 106FaceEater106/hydrus | 090f81f6c0a300b116e72ace57e7567e357dccef | 8d7fd303345f6ed781e85cff63279885b2499c68 | refs/heads/master | 2022-09-10T05:02:12.060440 | 2020-05-27T21:27:52 | 2020-05-27T21:27:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104,865 | py | import collections
import os
import typing
from qtpy import QtCore as QC
from qtpy import QtWidgets as QW
from qtpy import QtGui as QG
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusExceptions
from hydrus.core import HydrusGlobals as HG
from hydrus.core import HydrusSerialisable
from hydrus.core import HydrusTags
from hydrus.client import ClientConstants as CC
from hydrus.client.media import ClientMedia
from hydrus.client import ClientSearch
from hydrus.client import ClientSerialisable
from hydrus.client import ClientTags
from hydrus.client.gui import ClientGUICommon
from hydrus.client.gui import ClientGUICore as CGC
from hydrus.client.gui import ClientGUIFunctions
from hydrus.client.gui import ClientGUIMenus
from hydrus.client.gui import ClientGUISearch
from hydrus.client.gui import ClientGUIShortcuts
from hydrus.client.gui import QtPorting as QP
class AddEditDeleteListBox( QW.QWidget ):
listBoxChanged = QC.Signal()
def __init__( self, parent, height_num_chars, data_to_pretty_callable, add_callable, edit_callable ):
self._data_to_pretty_callable = data_to_pretty_callable
self._add_callable = add_callable
self._edit_callable = edit_callable
QW.QWidget.__init__( self, parent )
self._listbox = QW.QListWidget( self )
self._listbox.setSelectionMode( QW.QListWidget.ExtendedSelection )
self._add_button = ClientGUICommon.BetterButton( self, 'add', self._Add )
self._edit_button = ClientGUICommon.BetterButton( self, 'edit', self._Edit )
self._delete_button = ClientGUICommon.BetterButton( self, 'delete', self._Delete )
self._enabled_only_on_selection_buttons = []
self._permitted_object_types = []
#
vbox = QP.VBoxLayout()
self._buttons_hbox = QP.HBoxLayout()
QP.AddToLayout( self._buttons_hbox, self._add_button, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( self._buttons_hbox, self._edit_button, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( self._buttons_hbox, self._delete_button, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, self._listbox, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, self._buttons_hbox, CC.FLAGS_EXPAND_PERPENDICULAR )
self.setLayout( vbox )
#
( width, height ) = ClientGUIFunctions.ConvertTextToPixels( self._listbox, ( 20, height_num_chars ) )
self._listbox.setMinimumWidth( width )
self._listbox.setMinimumHeight( height )
#
self._ShowHideButtons()
self._listbox.itemSelectionChanged.connect( self._ShowHideButtons )
self._listbox.itemDoubleClicked.connect( self._Edit )
def _Add( self ):
try:
data = self._add_callable()
except HydrusExceptions.VetoException:
return
self._AddData( data )
self.listBoxChanged.emit()
def _AddAllDefaults( self, defaults_callable ):
defaults = defaults_callable()
for default in defaults:
self._AddData( default )
self.listBoxChanged.emit()
def _AddData( self, data ):
self._SetNoneDupeName( data )
pretty_data = self._data_to_pretty_callable( data )
item = QW.QListWidgetItem()
item.setText( pretty_data )
item.setData( QC.Qt.UserRole, data )
self._listbox.addItem( item )
def _AddSomeDefaults( self, defaults_callable ):
defaults = defaults_callable()
selected = False
choice_tuples = [ ( self._data_to_pretty_callable( default ), default, selected ) for default in defaults ]
from hydrus.client.gui import ClientGUITopLevelWindowsPanels
from hydrus.client.gui import ClientGUIScrolledPanelsEdit
with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'select the defaults to add' ) as dlg:
panel = ClientGUIScrolledPanelsEdit.EditChooseMultiple( dlg, choice_tuples )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
defaults_to_add = panel.GetValue()
for default in defaults_to_add:
self._AddData( default )
self.listBoxChanged.emit()
def _Delete( self ):
indices = list( map( lambda idx: idx.row(), self._listbox.selectedIndexes() ) )
if len( indices ) == 0:
return
indices.sort( reverse = True )
from hydrus.client.gui import ClientGUIDialogsQuick
result = ClientGUIDialogsQuick.GetYesNo( self, 'Remove all selected?' )
if result == QW.QDialog.Accepted:
for i in indices:
QP.ListWidgetDelete( self._listbox, i )
self.listBoxChanged.emit()
def _Edit( self ):
for i in range( self._listbox.count() ):
if not QP.ListWidgetIsSelected( self._listbox, i ):
continue
data = QP.GetClientData( self._listbox, i )
try:
new_data = self._edit_callable( data )
except HydrusExceptions.VetoException:
break
QP.ListWidgetDelete( self._listbox, i )
self._SetNoneDupeName( new_data )
pretty_new_data = self._data_to_pretty_callable( new_data )
item = QW.QListWidgetItem()
item.setText( pretty_new_data )
item.setData( QC.Qt.UserRole, new_data )
self._listbox.addItem( item )
self._listbox.insertItem( i, item )
self.listBoxChanged.emit()
def _Duplicate( self ):
dupe_data = self._GetExportObject()
if dupe_data is not None:
dupe_data = dupe_data.Duplicate()
self._ImportObject( dupe_data )
def _ExportToClipboard( self ):
export_object = self._GetExportObject()
if export_object is not None:
json = export_object.DumpToString()
HG.client_controller.pub( 'clipboard', 'text', json )
def _ExportToPng( self ):
export_object = self._GetExportObject()
if export_object is not None:
from hydrus.client.gui import ClientGUITopLevelWindowsPanels
from hydrus.client.gui import ClientGUISerialisable
with ClientGUITopLevelWindowsPanels.DialogNullipotent( self, 'export to png' ) as dlg:
panel = ClientGUISerialisable.PngExportPanel( dlg, export_object )
dlg.SetPanel( panel )
dlg.exec()
def _ExportToPngs( self ):
export_object = self._GetExportObject()
if export_object is None:
return
if not isinstance( export_object, HydrusSerialisable.SerialisableList ):
self._ExportToPng()
return
from hydrus.client.gui import ClientGUITopLevelWindowsPanels
from hydrus.client.gui import ClientGUISerialisable
with ClientGUITopLevelWindowsPanels.DialogNullipotent( self, 'export to pngs' ) as dlg:
panel = ClientGUISerialisable.PngsExportPanel( dlg, export_object )
dlg.SetPanel( panel )
dlg.exec()
def _GetExportObject( self ):
to_export = HydrusSerialisable.SerialisableList()
for obj in self.GetData( only_selected = True ):
to_export.append( obj )
if len( to_export ) == 0:
return None
elif len( to_export ) == 1:
return to_export[0]
else:
return to_export
def _ImportFromClipboard( self ):
try:
raw_text = HG.client_controller.GetClipboardText()
except HydrusExceptions.DataMissing as e:
QW.QMessageBox.critical( self, 'Error', str(e) )
return
try:
obj = HydrusSerialisable.CreateFromString( raw_text )
self._ImportObject( obj )
except Exception as e:
QW.QMessageBox.critical( self, 'Error', 'I could not understand what was in the clipboard' )
def _ImportFromPng( self ):
with QP.FileDialog( self, 'select the png or pngs with the encoded data', acceptMode = QW.QFileDialog.AcceptOpen, fileMode = QW.QFileDialog.ExistingFiles, wildcard = 'PNG (*.png)|*.png' ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
for path in dlg.GetPaths():
try:
payload = ClientSerialisable.LoadFromPng( path )
except Exception as e:
QW.QMessageBox.critical( self, 'Error', str(e) )
return
try:
obj = HydrusSerialisable.CreateFromNetworkBytes( payload )
self._ImportObject( obj )
except:
QW.QMessageBox.critical( self, 'Error', 'I could not understand what was encoded in the png!' )
return
def _ImportObject( self, obj ):
bad_object_type_names = set()
if isinstance( obj, HydrusSerialisable.SerialisableList ):
for sub_obj in obj:
self._ImportObject( sub_obj )
else:
if isinstance( obj, self._permitted_object_types ):
self._AddData( obj )
else:
bad_object_type_names.add( HydrusData.GetTypeName( type( obj ) ) )
if len( bad_object_type_names ) > 0:
message = 'The imported objects included these types:'
message += os.linesep * 2
message += os.linesep.join( bad_object_type_names )
message += os.linesep * 2
message += 'Whereas this control only allows:'
message += os.linesep * 2
message += os.linesep.join( ( HydrusData.GetTypeName( o ) for o in self._permitted_object_types ) )
QW.QMessageBox.critical( self, 'Error', message )
self.listBoxChanged.emit()
def _SetNoneDupeName( self, obj ):
pass
def _ShowHideButtons( self ):
if len( self._listbox.selectedItems() ) == 0:
self._edit_button.setEnabled( False )
self._delete_button.setEnabled( False )
for button in self._enabled_only_on_selection_buttons:
button.setEnabled( False )
else:
self._edit_button.setEnabled( True )
self._delete_button.setEnabled( True )
for button in self._enabled_only_on_selection_buttons:
button.setEnabled( True )
def AddDatas( self, datas ):
for data in datas:
self._AddData( data )
self.listBoxChanged.emit()
def AddDefaultsButton( self, defaults_callable ):
import_menu_items = []
all_call = HydrusData.Call( self._AddAllDefaults, defaults_callable )
some_call = HydrusData.Call( self._AddSomeDefaults, defaults_callable )
import_menu_items.append( ( 'normal', 'add them all', 'Load all the defaults.', all_call ) )
import_menu_items.append( ( 'normal', 'select from a list', 'Load some of the defaults.', some_call ) )
button = ClientGUICommon.MenuButton( self, 'add defaults', import_menu_items )
QP.AddToLayout( self._buttons_hbox, button, CC.FLAGS_VCENTER )
def AddImportExportButtons( self, permitted_object_types ):
self._permitted_object_types = permitted_object_types
export_menu_items = []
export_menu_items.append( ( 'normal', 'to clipboard', 'Serialise the selected data and put it on your clipboard.', self._ExportToClipboard ) )
export_menu_items.append( ( 'normal', 'to png', 'Serialise the selected data and encode it to an image file you can easily share with other hydrus users.', self._ExportToPng ) )
all_objs_are_named = False not in ( issubclass( o, HydrusSerialisable.SerialisableBaseNamed ) for o in self._permitted_object_types )
if all_objs_are_named:
export_menu_items.append( ( 'normal', 'to pngs', 'Serialise the selected data and encode it to multiple image files you can easily share with other hydrus users.', self._ExportToPngs ) )
import_menu_items = []
import_menu_items.append( ( 'normal', 'from clipboard', 'Load a data from text in your clipboard.', self._ImportFromClipboard ) )
import_menu_items.append( ( 'normal', 'from pngs', 'Load a data from an encoded png.', self._ImportFromPng ) )
button = ClientGUICommon.MenuButton( self, 'export', export_menu_items )
QP.AddToLayout( self._buttons_hbox, button, CC.FLAGS_VCENTER )
self._enabled_only_on_selection_buttons.append( button )
button = ClientGUICommon.MenuButton( self, 'import', import_menu_items )
QP.AddToLayout( self._buttons_hbox, button, CC.FLAGS_VCENTER )
button = ClientGUICommon.BetterButton( self, 'duplicate', self._Duplicate )
QP.AddToLayout( self._buttons_hbox, button, CC.FLAGS_VCENTER )
self._enabled_only_on_selection_buttons.append( button )
self._ShowHideButtons()
def AddSeparator( self ):
QP.AddToLayout( self._buttons_hbox, (20,20), CC.FLAGS_EXPAND_PERPENDICULAR )
def GetCount( self ):
return self._listbox.count()
def GetData( self, only_selected = False ):
datas = []
for i in range( self._listbox.count() ):
if only_selected and not QP.ListWidgetIsSelected( self._listbox, i ):
continue
data = QP.GetClientData( self._listbox, i )
datas.append( data )
return datas
def GetValue( self ):
return self.GetData()
class AddEditDeleteListBoxUniqueNamedObjects( AddEditDeleteListBox ):
def _SetNoneDupeName( self, obj ):
disallowed_names = { o.GetName() for o in self.GetData() }
HydrusSerialisable.SetNonDupeName( obj, disallowed_names )
class QueueListBox( QW.QWidget ):
listBoxChanged = QC.Signal()
def __init__( self, parent, height_num_chars, data_to_pretty_callable, add_callable = None, edit_callable = None ):
self._data_to_pretty_callable = data_to_pretty_callable
self._add_callable = add_callable
self._edit_callable = edit_callable
QW.QWidget.__init__( self, parent )
self._listbox = QW.QListWidget( self )
self._listbox.setSelectionMode( QW.QListWidget.ExtendedSelection )
self._up_button = ClientGUICommon.BetterButton( self, '\u2191', self._Up )
self._delete_button = ClientGUICommon.BetterButton( self, 'X', self._Delete )
self._down_button = ClientGUICommon.BetterButton( self, '\u2193', self._Down )
self._add_button = ClientGUICommon.BetterButton( self, 'add', self._Add )
self._edit_button = ClientGUICommon.BetterButton( self, 'edit', self._Edit )
if self._add_callable is None:
self._add_button.hide()
if self._edit_callable is None:
self._edit_button.hide()
#
vbox = QP.VBoxLayout()
buttons_vbox = QP.VBoxLayout()
QP.AddToLayout( buttons_vbox, self._up_button, CC.FLAGS_VCENTER )
QP.AddToLayout( buttons_vbox, self._delete_button, CC.FLAGS_VCENTER )
QP.AddToLayout( buttons_vbox, self._down_button, CC.FLAGS_VCENTER )
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, self._listbox, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( hbox, buttons_vbox, CC.FLAGS_VCENTER )
buttons_hbox = QP.HBoxLayout()
QP.AddToLayout( buttons_hbox, self._add_button, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( buttons_hbox, self._edit_button, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, hbox, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, buttons_hbox, CC.FLAGS_EXPAND_PERPENDICULAR )
self.setLayout( vbox )
#
( width, height ) = ClientGUIFunctions.ConvertTextToPixels( self._listbox, ( 20, height_num_chars ) )
self._listbox.setMinimumWidth( width )
self._listbox.setMinimumHeight( height )
#
self._listbox.itemSelectionChanged.connect( self.EventSelection )
self._listbox.itemDoubleClicked.connect( self._Edit )
def _Add( self ):
try:
data = self._add_callable()
except HydrusExceptions.VetoException:
return
self._AddData( data )
self.listBoxChanged.emit()
def _AddData( self, data ):
pretty_data = self._data_to_pretty_callable( data )
item = QW.QListWidgetItem()
item.setText( pretty_data )
item.setData( QC.Qt.UserRole, data )
self._listbox.addItem( item )
def _Delete( self ):
indices = list( self._listbox.selectedIndexes() )
if len( indices ) == 0:
return
indices.sort( reverse = True )
from hydrus.client.gui import ClientGUIDialogsQuick
result = ClientGUIDialogsQuick.GetYesNo( self, 'Remove all selected?' )
if result == QW.QDialog.Accepted:
for i in indices:
QP.ListWidgetDelete( self._listbox, i )
self.listBoxChanged.emit()
def _Down( self ):
indices = list( map( lambda idx: idx.row(), self._listbox.selectedIndexes() ) )
indices.sort( reverse = True )
for i in indices:
if i < self._listbox.count() - 1:
if not QP.ListWidgetIsSelected( self._listbox, i+1 ): # is the one below not selected?
self._SwapRows( i, i + 1 )
self.listBoxChanged.emit()
def _Edit( self ):
for i in range( self._listbox.count() ):
if not QP.ListWidgetIsSelected( self._listbox, i ):
continue
data = QP.GetClientData( self._listbox, i )
try:
new_data = self._edit_callable( data )
except HydrusExceptions.VetoException:
break
QP.ListWidgetDelete( self._listbox, i )
pretty_new_data = self._data_to_pretty_callable( new_data )
new_item = QW.QListWidgetItem()
new_item.setText( pretty_new_data )
new_item.setData( QC.Qt.UserRole, new_data )
self._listbox.insertItem( i, new_item )
self.listBoxChanged.emit()
def _SwapRows( self, index_a, index_b ):
a_was_selected = QP.ListWidgetIsSelected( self._listbox, index_a )
b_was_selected = QP.ListWidgetIsSelected( self._listbox, index_b )
data_a = QP.GetClientData( self._listbox, index_a )
data_b = QP.GetClientData( self._listbox, index_b )
pretty_data_a = self._data_to_pretty_callable( data_a )
pretty_data_b = self._data_to_pretty_callable( data_b )
QP.ListWidgetDelete( self._listbox, index_a )
item_b = QW.QListWidgetItem()
item_b.setText( pretty_data_b )
item_b.setData( QC.Qt.UserRole, data_b )
self._listbox.insertItem( index_a, item_b )
QP.ListWidgetDelete( self._listbox, index_b )
item_a = QW.QListWidgetItem()
item_a.setText( pretty_data_a )
item_a.setData( QC.Qt.UserRole, data_a )
self._listbox.insertItem( index_b, item_a )
if b_was_selected:
QP.ListWidgetSetSelection( self._listbox, index_a )
if a_was_selected:
QP.ListWidgetSetSelection( self._listbox, index_b )
def _Up( self ):
indices = map( lambda idx: idx.row(), self._listbox.selectedIndexes() )
for i in indices:
if i > 0:
if not QP.ListWidgetIsSelected( self._listbox, i-1 ): # is the one above not selected?
self._SwapRows( i, i - 1 )
self.listBoxChanged.emit()
def AddDatas( self, datas ):
for data in datas:
self._AddData( data )
self.listBoxChanged.emit()
def EventSelection( self ):
if len( self._listbox.selectedIndexes() ) == 0:
self._up_button.setEnabled( False )
self._delete_button.setEnabled( False )
self._down_button.setEnabled( False )
self._edit_button.setEnabled( False )
else:
self._up_button.setEnabled( True )
self._delete_button.setEnabled( True )
self._down_button.setEnabled( True )
self._edit_button.setEnabled( True )
def GetCount( self ):
return self._listbox.count()
def GetData( self, only_selected = False ):
datas = []
for i in range( self._listbox.count() ):
data = QP.GetClientData( self._listbox, i )
datas.append( data )
return datas
def Pop( self ):
if self._listbox.count() == 0:
return None
data = QP.GetClientData( self._listbox, 0 )
QP.ListWidgetDelete( self._listbox, 0 )
return data
class ListBox( QW.QScrollArea ):
listBoxChanged = QC.Signal()
TEXT_X_PADDING = 3
def __init__( self, parent, height_num_chars = 10 ):
QW.QScrollArea.__init__( self, parent )
self.setFrameStyle( QW.QFrame.Panel | QW.QFrame.Sunken )
self.setHorizontalScrollBarPolicy( QC.Qt.ScrollBarAlwaysOff )
self.setVerticalScrollBarPolicy( QC.Qt.ScrollBarAsNeeded )
self.setWidget( ListBox._InnerWidget( self ) )
self.setWidgetResizable( True )
self._background_colour = QG.QColor( 255, 255, 255 )
self._terms = set()
self._ordered_terms = []
self._selected_terms = set()
self._terms_to_texts = {}
self._last_hit_index = None
self._last_view_start = None
self._height_num_chars = height_num_chars
self._minimum_height_num_chars = 8
self._num_rows_per_page = 0
self.setFont( QW.QApplication.font() )
self._widget_event_filter = QP.WidgetEventFilter( self.widget() )
self._widget_event_filter.EVT_LEFT_DOWN( self.EventMouseSelect )
self._widget_event_filter.EVT_RIGHT_DOWN( self.EventMouseSelect )
self._widget_event_filter.EVT_LEFT_DCLICK( self.EventDClick )
def __len__( self ):
return len( self._ordered_terms )
def __bool__( self ):
return QP.isValid( self )
def _Activate( self ):
pass
def _ActivateFromKeyboard( self ):
selected_indices = []
for term in self._selected_terms:
try:
index = self._GetIndexFromTerm( term )
selected_indices.append( index )
except HydrusExceptions.DataMissing:
pass
self._Activate()
if len( self._selected_terms ) == 0 and len( selected_indices ) > 0:
ideal_index = min( selected_indices )
ideal_indices = [ ideal_index, ideal_index - 1, 0 ]
for ideal_index in ideal_indices:
if self._CanSelectIndex( ideal_index ):
self._Hit( False, False, ideal_index )
break
def _DeleteActivate( self ):
pass
def _AppendTerm( self, term ):
was_selected_before = term in self._selected_terms
if term in self._terms:
self._RemoveTerm( term )
self._terms.add( term )
self._ordered_terms.append( term )
self._terms_to_texts[ term ] = self._GetTextFromTerm( term )
if was_selected_before:
self._selected_terms.add( term )
def _CanHitIndex( self, index ):
return True
def _CanSelectIndex( self, index ):
return True
def _Clear( self ):
self._terms = set()
self._ordered_terms = []
self._selected_terms = set()
self._terms_to_texts = {}
self._last_hit_index = None
self._last_view_start = None
def _DataHasChanged( self ):
self._SetVirtualSize()
self.widget().update()
self.listBoxChanged.emit()
def _Deselect( self, index ):
term = self._GetTerm( index )
self._selected_terms.discard( term )
def _DeselectAll( self ):
self._selected_terms = set()
def _GetIndexFromTerm( self, term ):
if term in self._ordered_terms:
return self._ordered_terms.index( term )
raise HydrusExceptions.DataMissing()
def _GetIndexUnderMouse( self, mouse_event ):
y = mouse_event.pos().y()
text_height = self.fontMetrics().height()
row_index = y // text_height
if row_index >= len( self._ordered_terms ):
return None
return row_index
def _GetSelectedPredicatesAndInverseCopies( self ):
predicates = []
inverse_predicates = []
for term in self._selected_terms:
if isinstance( term, ClientSearch.Predicate ):
predicates.append( term )
possible_inverse = term.GetInverseCopy()
if possible_inverse is not None:
inverse_predicates.append( possible_inverse )
else:
s = term
predicates.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, term ) )
inverse_predicates.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, term, False ) )
return ( predicates, inverse_predicates )
def _GetSafeHitIndex( self, hit_index, direction = None ):
if direction is None:
if hit_index == 0:
direction = 1
else:
direction = -1
num_terms = len( self._ordered_terms )
if num_terms == 0:
return None
original_hit_index = hit_index
if hit_index is not None:
# if click/selection is out of bounds, fix it
if hit_index == -1 or hit_index > num_terms:
hit_index = num_terms - 1
elif hit_index == num_terms or hit_index < -1:
hit_index = 0
# while it is not ok to hit, move index
while not self._CanHitIndex( hit_index ):
hit_index = ( hit_index + direction ) % num_terms
if hit_index == original_hit_index:
# bail out if we circled around not finding an ok one to hit
return None
return hit_index
def _GetSimplifiedTextFromTerm( self, term ):
return self._GetTextFromTerm( term )
def _GetTerm( self, index ):
if index < 0 or index > len( self._ordered_terms ) - 1:
raise HydrusExceptions.DataMissing( 'No term for index ' + str( index ) )
return self._ordered_terms[ index ]
def _GetTextsAndColours( self, term ):
text = self._terms_to_texts[ term ]
return [ ( text, ( 0, 111, 250 ) ) ]
def _GetTextFromTerm( self, term ):
raise NotImplementedError()
def _HandleClick( self, event ):
hit_index = self._GetIndexUnderMouse( event )
shift = event.modifiers() & QC.Qt.ShiftModifier
ctrl = event.modifiers() & QC.Qt.ControlModifier
self._Hit( shift, ctrl, hit_index )
def _Hit( self, shift, ctrl, hit_index ):
hit_index = self._GetSafeHitIndex( hit_index )
to_select = set()
to_deselect = set()
deselect_all = False
if shift:
if hit_index is not None:
if self._last_hit_index is not None:
lower = min( hit_index, self._last_hit_index )
upper = max( hit_index, self._last_hit_index )
to_select = list( range( lower, upper + 1) )
else:
to_select.add( hit_index )
elif ctrl:
if hit_index is not None:
if self._IsSelected( hit_index ):
to_deselect.add( hit_index )
else:
to_select.add( hit_index )
else:
if hit_index is None:
deselect_all = True
else:
if not self._IsSelected( hit_index ):
deselect_all = True
to_select.add( hit_index )
if deselect_all:
self._DeselectAll()
for index in to_select:
self._Select( index )
for index in to_deselect:
self._Deselect( index )
self._last_hit_index = hit_index
if self._last_hit_index is not None:
text_height = self.fontMetrics().height()
y = text_height * self._last_hit_index
visible_rect = QP.ScrollAreaVisibleRect( self )
visible_rect_y = visible_rect.y()
visible_rect_height = visible_rect.height()
if y < visible_rect_y:
self.ensureVisible( 0, y, 0, 0 )
elif y > visible_rect_y + visible_rect_height - text_height:
self.ensureVisible( 0, y + text_height , 0, 0 )
self.widget().update()
def _HitFirstSelectedItem( self ):
selected_indices = []
if len( self._selected_terms ) > 0:
for term in self._selected_terms:
try:
index = self._GetIndexFromTerm( term )
selected_indices.append( index )
except HydrusExceptions.DataMissing:
pass
if len( selected_indices ) > 0:
first_index = min( selected_indices )
self._Hit( False, False, first_index )
def _IsSelected( self, index ):
try:
term = self._GetTerm( index )
except HydrusExceptions.DataMissing:
return False
return term in self._selected_terms
def _Redraw( self, painter ):
text_height = self.fontMetrics().height()
visible_rect = QP.ScrollAreaVisibleRect( self )
visible_rect_y = visible_rect.y()
visible_rect_width = visible_rect.width()
visible_rect_height = visible_rect.height()
first_visible_index = visible_rect_y // text_height
last_visible_index = ( visible_rect_y + visible_rect_height ) // text_height
if ( visible_rect_y + visible_rect_height ) % text_height != 0:
last_visible_index += 1
last_visible_index = min( last_visible_index, len( self._ordered_terms ) - 1 )
painter.setBackground( QG.QBrush( self._background_colour ) )
painter.eraseRect( painter.viewport() )
for ( i, current_index ) in enumerate( range( first_visible_index, last_visible_index + 1 ) ):
term = self._GetTerm( current_index )
texts_and_colours = self._GetTextsAndColours( term )
there_is_more_than_one_text = len( texts_and_colours ) > 1
x_start = self.TEXT_X_PADDING
for ( text, ( r, g, b ) ) in texts_and_colours:
text_colour = QG.QColor( r, g, b )
if term in self._selected_terms:
painter.setBrush( QG.QBrush( text_colour ) )
painter.setPen( QC.Qt.NoPen )
if x_start == self.TEXT_X_PADDING:
background_colour_x = 0
else:
background_colour_x = x_start
painter.drawRect( background_colour_x, current_index * text_height, visible_rect_width, text_height )
text_colour = self._background_colour
painter.setPen( QG.QPen( text_colour ) )
( x, y ) = ( x_start, current_index * text_height )
this_text_size = painter.fontMetrics().size( QC.Qt.TextSingleLine, text )
this_text_width = this_text_size.width()
this_text_height = this_text_size.height()
painter.drawText( QC.QRectF( x, y, this_text_width, this_text_height ), text )
if there_is_more_than_one_text:
x_start += this_text_width
def _RefreshTexts( self ):
self._terms_to_texts = { term : self._GetTextFromTerm( term ) for term in self._terms }
self.widget().update()
def _RemoveSelectedTerms( self ):
for term in list( self._selected_terms ):
self._RemoveTerm( term )
def _RemoveTerm( self, term ):
if term in self._terms:
self._terms.discard( term )
self._ordered_terms.remove( term )
self._selected_terms.discard( term )
del self._terms_to_texts[ term ]
def _Select( self, index ):
if not self._CanSelectIndex( index ):
return
try:
term = self._GetTerm( index )
self._selected_terms.add( term )
except HydrusExceptions.DataMissing:
pass
def _SelectAll( self ):
self._selected_terms = set( self._terms )
def _SetVirtualSize( self ):
self.setWidgetResizable( True )
my_size = self.widget().size()
text_height = self.fontMetrics().height()
ideal_virtual_size = QC.QSize( my_size.width(), text_height * len( self._ordered_terms ) )
if ideal_virtual_size != self.widget().size():
self.widget().setMinimumSize( ideal_virtual_size )
def _SortByText( self ):
def lexicographic_key( term ):
return self._terms_to_texts[ term ]
self._ordered_terms.sort( key = lexicographic_key )
def keyPressEvent( self, event ):
shift = event.modifiers() & QC.Qt.ShiftModifier
ctrl = event.modifiers() & QC.Qt.ControlModifier
key_code = event.key()
if self.hasFocus() and key_code in ClientGUIShortcuts.DELETE_KEYS_QT:
self._DeleteActivate()
elif key_code in ( QC.Qt.Key_Enter, QC.Qt.Key_Return ):
self._ActivateFromKeyboard()
else:
if ctrl and key_code in ( ord( 'A' ), ord( 'a' ) ):
self._SelectAll()
self.widget().update()
else:
hit_index = None
if len( self._ordered_terms ) > 1:
roll_up = False
roll_down = False
if key_code in ( QC.Qt.Key_Home, ):
hit_index = 0
elif key_code in ( QC.Qt.Key_End, ):
hit_index = len( self._ordered_terms ) - 1
roll_up = True
elif self._last_hit_index is not None:
if key_code in ( QC.Qt.Key_Up, ):
hit_index = self._last_hit_index - 1
roll_up = True
elif key_code in ( QC.Qt.Key_Down, ):
hit_index = self._last_hit_index + 1
roll_down = True
elif key_code in ( QC.Qt.Key_PageUp, ):
hit_index = max( 0, self._last_hit_index - self._num_rows_per_page )
roll_up = True
elif key_code in ( QC.Qt.Key_PageDown, ):
hit_index = min( len( self._ordered_terms ) - 1, self._last_hit_index + self._num_rows_per_page )
roll_down = True
if hit_index is None:
# don't send to parent, which will do silly scroll window business with arrow key presses
event.ignore()
else:
if roll_up:
hit_index = self._GetSafeHitIndex( hit_index, -1 )
if roll_down:
hit_index = self._GetSafeHitIndex( hit_index, 1 )
self._Hit( shift, ctrl, hit_index )
def EventDClick( self, event ):
self._Activate()
def EventMouseSelect( self, event ):
self._HandleClick( event )
return True # was: event.ignore()
class _InnerWidget( QW.QWidget ):
def __init__( self, parent ):
QW.QWidget.__init__( self, parent )
self._parent = parent
def paintEvent( self, event ):
painter = QG.QPainter( self )
self._parent._Redraw( painter )
def resizeEvent( self, event ):
text_height = self.fontMetrics().height()
visible_rect = QP.ScrollAreaVisibleRect( self )
self.verticalScrollBar().setSingleStep( text_height )
visible_rect_height = visible_rect.height()
self._num_rows_per_page = visible_rect_height // text_height
self._SetVirtualSize()
self.widget().update()
def GetIdealHeight( self ):
text_height = self.fontMetrics().height()
return text_height * len( self._ordered_terms ) + 20
def HasValues( self ):
return len( self._ordered_terms ) > 0
def minimumSizeHint( self ):
size_hint = QW.QScrollArea.minimumSizeHint( self )
text_height = self.fontMetrics().height()
minimum_height = self._minimum_height_num_chars * text_height + ( self.frameWidth() * 2 )
size_hint.setHeight( minimum_height )
return size_hint
def MoveSelectionDown( self ):
if len( self._ordered_terms ) > 1 and self._last_hit_index is not None:
hit_index = ( self._last_hit_index + 1 ) % len( self._ordered_terms )
hit_index = self._GetSafeHitIndex( hit_index, 1 )
self._Hit( False, False, hit_index )
def MoveSelectionUp( self ):
if len( self._ordered_terms ) > 1 and self._last_hit_index is not None:
hit_index = ( self._last_hit_index - 1 ) % len( self._ordered_terms )
hit_index = self._GetSafeHitIndex( hit_index, -1 )
self._Hit( False, False, hit_index )
def SelectTopItem( self ):
if len( self._ordered_terms ) > 0:
self._selected_terms = set()
self._Hit( False, False, 0 )
def SetMinimumHeightNumChars( self, minimum_height_num_chars ):
self._minimum_height_num_chars = minimum_height_num_chars
def sizeHint( self ):
size_hint = QW.QScrollArea.sizeHint( self )
text_height = self.fontMetrics().height()
ideal_height = self._height_num_chars * text_height + ( self.frameWidth() * 2 )
size_hint.setHeight( ideal_height )
return size_hint
class ListBoxTags( ListBox ):
ors_are_under_construction = False
has_counts = False
can_spawn_new_windows = True
def __init__( self, *args, **kwargs ):
ListBox.__init__( self, *args, **kwargs )
self._tag_display_type = ClientTags.TAG_DISPLAY_STORAGE
self._page_key = None # placeholder. if a subclass sets this, it changes menu behaviour to allow 'select this tag' menu pubsubs
self._UpdateBackgroundColour()
self._widget_event_filter.EVT_MIDDLE_DOWN( self.EventMouseMiddleClick )
HG.client_controller.sub( self, 'ForceTagRecalc', 'refresh_all_tag_presentation_gui' )
HG.client_controller.sub( self, '_UpdateBackgroundColour', 'notify_new_colourset' )
def _CanProvideCurrentPagePredicates( self ):
return False
def _GetNamespaceColours( self ):
return HC.options[ 'namespace_colours' ]
def _GetCurrentFileServiceKey( self ):
return CC.LOCAL_FILE_SERVICE_KEY
def _GetCurrentPagePredicates( self ) -> typing.Set[ ClientSearch.Predicate ]:
return set()
def _GetNamespaceFromTerm( self, term ):
raise NotImplementedError()
def _GetSelectedActualTags( self ):
selected_actual_tags = set()
for term in self._selected_terms:
if isinstance( term, ClientSearch.Predicate ):
if term.GetType() == ClientSearch.PREDICATE_TYPE_TAG:
tag = term.GetValue()
selected_actual_tags.add( tag )
else:
tag = term
selected_actual_tags.add( tag )
return selected_actual_tags
def _GetCopyableTagStrings( self, only_selected = False, with_counts = False ):
if only_selected:
terms = self._selected_terms
else:
terms = self._ordered_terms
selected_copyable_tag_strings = set()
for term in terms:
if isinstance( term, ClientSearch.Predicate ):
if term.GetType() in ( ClientSearch.PREDICATE_TYPE_TAG, ClientSearch.PREDICATE_TYPE_NAMESPACE, ClientSearch.PREDICATE_TYPE_WILDCARD ):
tag = term.GetValue()
else:
tag = term.ToString( with_count = with_counts )
selected_copyable_tag_strings.add( tag )
else:
tag = str( term )
selected_copyable_tag_strings.add( tag )
return selected_copyable_tag_strings
def _GetTagFromTerm( self, term ):
raise NotImplementedError()
def _GetTextsAndColours( self, term ):
namespace_colours = self._GetNamespaceColours()
if isinstance( term, ClientSearch.Predicate ) and term.GetType() == ClientSearch.PREDICATE_TYPE_OR_CONTAINER:
texts_and_namespaces = term.GetTextsAndNamespaces( or_under_construction = self.ors_are_under_construction )
else:
text = self._terms_to_texts[ term ]
namespace = self._GetNamespaceFromTerm( term )
texts_and_namespaces = [ ( text, namespace ) ]
texts_and_colours = []
for ( text, namespace ) in texts_and_namespaces:
if namespace in namespace_colours:
( r, g, b ) = namespace_colours[ namespace ]
else:
( r, g, b ) = namespace_colours[ None ]
texts_and_colours.append( ( text, ( r, g, b ) ) )
return texts_and_colours
def _NewSearchPage( self ):
predicates = []
for term in self._selected_terms:
if isinstance( term, ClientSearch.Predicate ):
predicates.append( term )
else:
predicates.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, term ) )
predicates = ClientGUISearch.FleshOutPredicates( self, predicates )
if len( predicates ) > 0:
s = sorted( ( predicate.ToString() for predicate in predicates ) )
page_name = ', '.join( s )
activate_window = HG.client_controller.new_options.GetBoolean( 'activate_window_on_tag_search_page_activation' )
file_service_key = self._GetCurrentFileServiceKey()
HG.client_controller.pub( 'new_page_query', file_service_key, initial_predicates = predicates, page_name = page_name, activate_window = activate_window )
def _NewSearchPageForEach( self ):
predicates = []
for term in self._selected_terms:
if isinstance( term, ClientSearch.Predicate ):
predicates.append( term )
else:
predicates.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, term ) )
predicates = ClientGUISearch.FleshOutPredicates( self, predicates )
for predicate in predicates:
page_name = predicate.ToString()
HG.client_controller.pub( 'new_page_query', CC.LOCAL_FILE_SERVICE_KEY, initial_predicates = ( predicate, ), page_name = page_name )
def _ProcessMenuCopyEvent( self, command ):
only_selected = False
with_counts = False
texts = []
if command in ( 'copy_selected_terms', 'copy_selected_sub_terms' ):
only_selected = True
if command == 'copy_all_tags_with_counts':
with_counts = True
texts = self._GetCopyableTagStrings( only_selected = only_selected, with_counts = with_counts )
texts = HydrusTags.SortNumericTags( texts )
if command == 'copy_selected_sub_terms':
texts = [ subtag for ( namespace, subtag ) in [ HydrusTags.SplitTag( text ) for text in texts ] ]
if len( texts ) > 0:
text = os.linesep.join( texts )
HG.client_controller.pub( 'clipboard', 'text', text )
def _ProcessMenuPredicateEvent( self, command ):
pass
def _ProcessMenuTagEvent( self, command ):
tags = [ self._GetTagFromTerm( term ) for term in self._selected_terms ]
tags = [ tag for tag in tags if tag is not None ]
if command in ( 'hide', 'hide_namespace' ):
if len( tags ) == 1:
( tag, ) = tags
if command == 'hide':
message = 'Hide "{}" from here?'.format( tag )
from hydrus.client.gui import ClientGUIDialogsQuick
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return
HG.client_controller.tag_display_manager.HideTag( self._tag_display_type, CC.COMBINED_TAG_SERVICE_KEY, tag )
elif command == 'hide_namespace':
( namespace, subtag ) = HydrusTags.SplitTag( tag )
if namespace == '':
insert = 'unnamespaced'
else:
insert = '"{}"'.format( namespace )
message = 'Hide {} tags from here?'.format( insert )
from hydrus.client.gui import ClientGUIDialogsQuick
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return
if namespace != '':
namespace += ':'
HG.client_controller.tag_display_manager.HideTag( self._tag_display_type, CC.COMBINED_TAG_SERVICE_KEY, namespace )
HG.client_controller.pub( 'notify_new_tag_display_rules' )
else:
from hydrus.client.gui import ClientGUITags
if command == 'parent':
title = 'manage tag parents'
elif command == 'sibling':
title = 'manage tag siblings'
from hydrus.client.gui import ClientGUITopLevelWindowsPanels
with ClientGUITopLevelWindowsPanels.DialogManage( self, title ) as dlg:
if command == 'parent':
panel = ClientGUITags.ManageTagParents( dlg, tags )
elif command == 'sibling':
panel = ClientGUITags.ManageTagSiblings( dlg, tags )
dlg.SetPanel( panel )
dlg.exec()
def _UpdateBackgroundColour( self ):
new_options = HG.client_controller.new_options
self._background_colour = new_options.GetColour( CC.COLOUR_TAGS_BOX )
self.widget().update()
def EventMouseMiddleClick( self, event ):
self._HandleClick( event )
if self.can_spawn_new_windows:
self._NewSearchPage()
def contextMenuEvent( self, event ):
if event.reason() == QG.QContextMenuEvent.Keyboard:
self.ShowMenu()
def mouseReleaseEvent( self, event ):
if event.button() != QC.Qt.RightButton:
ListBox.mouseReleaseEvent( self, event )
return
self.ShowMenu()
def ShowMenu( self ):
if len( self._ordered_terms ) > 0:
menu = QW.QMenu()
copy_menu = QW.QMenu( menu )
selected_copyable_tag_strings = self._GetCopyableTagStrings( only_selected = True, with_counts = False )
selected_actual_tags = self._GetSelectedActualTags()
if len( selected_copyable_tag_strings ) == 1:
( selection_string, ) = selected_copyable_tag_strings
else:
selection_string = 'selected'
selected_stuff_to_copy = len( selected_copyable_tag_strings ) > 0
if selected_stuff_to_copy:
ClientGUIMenus.AppendMenuItem( copy_menu, selection_string, 'Copy the selected predicates to your clipboard.', self._ProcessMenuCopyEvent, 'copy_selected_terms' )
if len( selected_copyable_tag_strings ) == 1:
( selection_string, ) = selected_copyable_tag_strings
( namespace, subtag ) = HydrusTags.SplitTag( selection_string )
if namespace != '':
sub_selection_string = subtag
ClientGUIMenus.AppendMenuItem( copy_menu, sub_selection_string, 'Copy the selected sub-predicate to your clipboard.', self._ProcessMenuCopyEvent, 'copy_selected_sub_terms' )
else:
ClientGUIMenus.AppendMenuItem( copy_menu, 'selected subtags', 'Copy the selected sub-predicates to your clipboard.', self._ProcessMenuCopyEvent, 'copy_selected_sub_terms' )
siblings = []
if len( selected_actual_tags ) == 1:
( selected_tag, ) = selected_actual_tags
( selected_namespace, selected_subtag ) = HydrusTags.SplitTag( selected_tag )
sibling_tags_seen = set()
sibling_tags_seen.add( selected_tag )
sibling_tags_seen.add( selected_subtag )
siblings = set( HG.client_controller.tag_siblings_manager.GetAllSiblings( CC.COMBINED_TAG_SERVICE_KEY, selected_tag ) )
siblings.difference_update( sibling_tags_seen )
if len( siblings ) > 0:
siblings = HydrusTags.SortNumericTags( siblings )
siblings_menu = QW.QMenu( copy_menu )
for sibling in siblings:
if sibling not in sibling_tags_seen:
ClientGUIMenus.AppendMenuItem( siblings_menu, sibling, 'Copy the selected tag sibling to your clipboard.', HG.client_controller.pub, 'clipboard', 'text', sibling )
sibling_tags_seen.add( sibling )
( sibling_namespace, sibling_subtag ) = HydrusTags.SplitTag( sibling )
if sibling_subtag not in sibling_tags_seen:
ClientGUIMenus.AppendMenuItem( siblings_menu, sibling_subtag, 'Copy the selected sibling subtag to your clipboard.', HG.client_controller.pub, 'clipboard', 'text', sibling_subtag )
sibling_tags_seen.add( sibling_subtag )
ClientGUIMenus.AppendMenu( copy_menu, siblings_menu, 'siblings' )
copy_all_is_appropriate = len( self._ordered_terms ) > len( self._selected_terms )
if copy_all_is_appropriate:
ClientGUIMenus.AppendSeparator( copy_menu )
ClientGUIMenus.AppendMenuItem( copy_menu, 'all tags', 'Copy all the predicates in this list to your clipboard.', self._ProcessMenuCopyEvent, 'copy_all_tags' )
if self.has_counts:
ClientGUIMenus.AppendMenuItem( copy_menu, 'all tags with counts', 'Copy all the predicates in this list, with their counts, to your clipboard.', self._ProcessMenuCopyEvent, 'copy_all_tags_with_counts' )
ClientGUIMenus.AppendMenu( menu, copy_menu, 'copy' )
if len( self._selected_terms ) > 0:
if len( selected_actual_tags ) > 0 and self._page_key is not None:
select_menu = QW.QMenu( menu )
tags_sorted_to_show_on_menu = HydrusTags.SortNumericTags( selected_actual_tags )
tags_sorted_to_show_on_menu_string = ', '.join( tags_sorted_to_show_on_menu )
while len( tags_sorted_to_show_on_menu_string ) > 64:
if len( tags_sorted_to_show_on_menu ) == 1:
tags_sorted_to_show_on_menu_string = '(many/long tags)'
else:
tags_sorted_to_show_on_menu.pop( -1 )
tags_sorted_to_show_on_menu_string = ', '.join( tags_sorted_to_show_on_menu + [ '\u2026' ] )
if len( selected_actual_tags ) == 1:
label = 'files with "{}"'.format( tags_sorted_to_show_on_menu_string )
else:
label = 'files with all of "{}"'.format( tags_sorted_to_show_on_menu_string )
ClientGUIMenus.AppendMenuItem( select_menu, label, 'Select the files with these tags.', HG.client_controller.pub, 'select_files_with_tags', self._page_key, 'AND', set( selected_actual_tags ) )
if len( selected_actual_tags ) > 1:
label = 'files with any of "{}"'.format( tags_sorted_to_show_on_menu_string )
ClientGUIMenus.AppendMenuItem( select_menu, label, 'Select the files with any of these tags.', HG.client_controller.pub, 'select_files_with_tags', self._page_key, 'OR', set( selected_actual_tags ) )
ClientGUIMenus.AppendMenu( menu, select_menu, 'select' )
if self.can_spawn_new_windows:
ClientGUIMenus.AppendSeparator( menu )
ClientGUIMenus.AppendMenuItem( menu, 'open a new search page for ' + selection_string, 'Open a new search page starting with the selected predicates.', self._NewSearchPage )
if len( self._selected_terms ) > 1:
ClientGUIMenus.AppendMenuItem( menu, 'open new search pages for each in selection', 'Open one new search page for each selected predicate.', self._NewSearchPageForEach )
if self._CanProvideCurrentPagePredicates():
current_predicates = self._GetCurrentPagePredicates()
ClientGUIMenus.AppendSeparator( menu )
( predicates, inverse_predicates ) = self._GetSelectedPredicatesAndInverseCopies()
predicates = set( predicates )
inverse_predicates = set( inverse_predicates )
if len( predicates ) == 1:
( pred, ) = predicates
predicates_selection_string = pred.ToString( with_count = False )
else:
predicates_selection_string = 'selected'
some_selected_in_current = len( predicates.intersection( current_predicates ) ) > 0
if some_selected_in_current:
ClientGUIMenus.AppendMenuItem( menu, 'discard {} from current search'.format( predicates_selection_string ), 'Remove the selected predicates from the current search.', self._ProcessMenuPredicateEvent, 'remove_predicates' )
some_selected_not_in_current = len( predicates.intersection( current_predicates ) ) < len( predicates )
if some_selected_not_in_current:
ClientGUIMenus.AppendMenuItem( menu, 'require {} for current search'.format( predicates_selection_string ), 'Add the selected predicates from the current search.', self._ProcessMenuPredicateEvent, 'add_predicates' )
some_selected_are_excluded_explicitly = len( inverse_predicates.intersection( current_predicates ) ) > 0
if some_selected_are_excluded_explicitly:
ClientGUIMenus.AppendMenuItem( menu, 'permit {} for current search'.format( predicates_selection_string ), 'Stop disallowing the selected predicates from the current search.', self._ProcessMenuPredicateEvent, 'remove_inverse_predicates' )
some_selected_are_not_excluded_explicitly = len( inverse_predicates.intersection( current_predicates ) ) < len( inverse_predicates )
if some_selected_are_not_excluded_explicitly:
ClientGUIMenus.AppendMenuItem( menu, 'exclude {} from current search'.format( predicates_selection_string ), 'Disallow the selected predicates for the current search.', self._ProcessMenuPredicateEvent, 'add_inverse_predicates' )
if len( selected_actual_tags ) == 1:
( selected_tag, ) = selected_actual_tags
if self._tag_display_type in ( ClientTags.TAG_DISPLAY_SINGLE_MEDIA, ClientTags.TAG_DISPLAY_SELECTION_LIST ):
ClientGUIMenus.AppendSeparator( menu )
( namespace, subtag ) = HydrusTags.SplitTag( selected_tag )
hide_menu = QW.QMenu( menu )
ClientGUIMenus.AppendMenuItem( hide_menu, '"{}" tags from here'.format( ClientTags.RenderNamespaceForUser( namespace ) ), 'Hide this namespace from view in future.', self._ProcessMenuTagEvent, 'hide_namespace' )
ClientGUIMenus.AppendMenuItem( hide_menu, '"{}" from here'.format( selected_tag ), 'Hide this tag from view in future.', self._ProcessMenuTagEvent, 'hide' )
ClientGUIMenus.AppendMenu( menu, hide_menu, 'hide' )
ClientGUIMenus.AppendSeparator( menu )
ClientGUIMenus.AppendSeparator( menu )
favourite_tags = list( HG.client_controller.new_options.GetStringList( 'favourite_tags' ) )
def set_favourite_tags( favourite_tags ):
HG.client_controller.new_options.SetStringList( 'favourite_tags', favourite_tags )
HG.client_controller.pub( 'notify_new_favourite_tags' )
if selected_tag in favourite_tags:
favourite_tags.remove( selected_tag )
label = 'remove "{}" from favourites'.format( selected_tag )
description = 'Remove this tag from your favourites'
else:
favourite_tags.append( selected_tag )
label = 'add "{}" to favourites'.format( selected_tag )
description = 'Add this tag from your favourites'
ClientGUIMenus.AppendMenuItem( menu, label, description, set_favourite_tags, favourite_tags )
if len( selected_actual_tags ) > 0 and self.can_spawn_new_windows:
ClientGUIMenus.AppendSeparator( menu )
if len( selected_actual_tags ) == 1:
( tag, ) = selected_actual_tags
text = tag
else:
text = 'selection'
ClientGUIMenus.AppendMenuItem( menu, 'add parents to ' + text, 'Add a parent to this tag.', self._ProcessMenuTagEvent, 'parent' )
ClientGUIMenus.AppendMenuItem( menu, 'add siblings to ' + text, 'Add a sibling to this tag.', self._ProcessMenuTagEvent, 'sibling' )
CGC.core().PopupMenu( self, menu )
def GetSelectedTags( self ):
return set( self._selected_terms )
def ForceTagRecalc( self ):
pass
class ListBoxTagsPredicates( ListBoxTags ):
has_counts = True
def _CanHitIndex( self, index ):
result = ListBoxTags._CanHitIndex( self, index )
if not result:
return False
try:
term = self._GetTerm( index )
except HydrusExceptions.DataMissing:
return False
if term.GetType() in ( ClientSearch.PREDICATE_TYPE_LABEL, ClientSearch.PREDICATE_TYPE_PARENT ):
return False
return True
def _CanSelectIndex( self, index ):
result = ListBoxTags._CanSelectIndex( self, index )
if not result:
return False
try:
term = self._GetTerm( index )
except HydrusExceptions.DataMissing:
return False
if term.GetType() == ClientSearch.PREDICATE_TYPE_LABEL:
return False
return True
def _Deselect( self, index ):
to_deselect = self._GetWithParentIndices( index )
for index in to_deselect:
ListBoxTags._Deselect( self, index )
def _GetMutuallyExclusivePredicates( self, predicate ):
m_e_predicates = { existing_predicate for existing_predicate in self._terms if existing_predicate.IsMutuallyExclusive( predicate ) }
return m_e_predicates
def _GetNamespaceFromTerm( self, term ):
predicate = term
namespace = predicate.GetNamespace()
return namespace
def _GetTagFromTerm( self, term ):
predicate = term
if term.GetType() == ClientSearch.PREDICATE_TYPE_TAG:
return term.GetValue()
else:
return None
def _GetSimplifiedTextFromTerm( self, term ):
predicate = term
return predicate.ToString( with_count = False )
def _GetTextFromTerm( self, term ):
predicate = term
return predicate.ToString()
def _GetWithParentIndices( self, index ):
indices = [ index ]
index += 1
while index < len( self._ordered_terms ):
term = self._GetTerm( index )
if term.GetType() == ClientSearch.PREDICATE_TYPE_PARENT:
indices.append( index )
else:
break
index += 1
return indices
def _HasPredicate( self, predicate ):
return predicate in self._terms
def _Hit( self, shift, ctrl, hit_index ):
hit_index = self._GetSafeHitIndex( hit_index )
ListBoxTags._Hit( self, shift, ctrl, hit_index )
def _Select( self, index ):
to_select = self._GetWithParentIndices( index )
for index in to_select:
ListBoxTags._Select( self, index )
def GetPredicates( self ) -> typing.Set[ ClientSearch.Predicate ]:
return set( self._terms )
def SetPredicates( self, predicates ):
selected_terms = set( self._selected_terms )
self._Clear()
for predicate in predicates:
self._AppendTerm( predicate )
for term in selected_terms:
if term in self._ordered_terms:
self._selected_terms.add( term )
self._HitFirstSelectedItem()
self._DataHasChanged()
class ListBoxTagsCensorship( ListBoxTags ):
def __init__( self, parent, removed_callable = None ):
ListBoxTags.__init__( self, parent )
self._removed_callable = removed_callable
def _Activate( self ):
if len( self._selected_terms ) > 0:
tags = set( self._selected_terms )
for tag in tags:
self._RemoveTerm( tag )
if self._removed_callable is not None:
self._removed_callable( tags )
self._ordered_terms.sort()
self._DataHasChanged()
def _GetNamespaceFromTerm( self, term ):
tag_slice = term
if tag_slice == ':':
return None
else:
( namespace, subtag ) = HydrusTags.SplitTag( tag_slice )
return namespace
def _GetTagFromTerm( self, term ):
tag_slice = term
if tag_slice in ( ':', '' ):
return None
else:
return tag_slice
def _GetTextFromTerm( self, term ):
tag_slice = term
return ClientTags.ConvertTagSliceToString( tag_slice )
def AddTags( self, tags ):
for tag in tags:
self._AppendTerm( tag )
self._ordered_terms.sort()
self._DataHasChanged()
def EnterTags( self, tags ):
for tag in tags:
if tag in self._terms:
self._RemoveTerm( tag )
else:
self._AppendTerm( tag )
self._ordered_terms.sort()
self._DataHasChanged()
def GetTags( self ):
return list( self._ordered_terms )
def RemoveTags( self, tags ):
for tag in tags:
self._RemoveTerm( tag )
self._ordered_terms.sort()
self._DataHasChanged()
def SetTags( self, tags ):
self._Clear()
self.AddTags( tags )
class ListBoxTagsColourOptions( ListBoxTags ):
PROTECTED_TERMS = ( None, '' )
can_spawn_new_windows = False
def __init__( self, parent, initial_namespace_colours ):
ListBoxTags.__init__( self, parent )
for ( namespace, colour ) in initial_namespace_colours.items():
colour = tuple( colour ) # tuple to convert from list, for oooold users who have list colours
self._AppendTerm( ( namespace, colour ) )
self._SortByText()
self._DataHasChanged()
def _Activate( self ):
namespaces = [ namespace for ( namespace, colour ) in self._selected_terms ]
if len( namespaces ) > 0:
from hydrus.client.gui import ClientGUIDialogsQuick
result = ClientGUIDialogsQuick.GetYesNo( self, 'Delete all selected colours?' )
if result == QW.QDialog.Accepted:
self._RemoveNamespaces( namespaces )
def _DeleteActivate( self ):
self._Activate()
def _GetNamespaceColours( self ):
return dict( self._terms )
def _GetNamespaceFromTerm( self, term ):
( namespace, colour ) = term
return namespace
def _GetTagFromTerm( self, term ):
return None
def _GetTextFromTerm( self, term ):
( namespace, colour ) = term
if namespace is None:
namespace_string = 'default namespace:tag'
elif namespace == '':
namespace_string = 'unnamespaced tag'
else:
namespace_string = namespace + ':tag'
return namespace_string
def _RemoveNamespaces( self, namespaces ):
namespaces = [ namespace for namespace in namespaces if namespace not in self.PROTECTED_TERMS ]
removees = [ ( existing_namespace, existing_colour ) for ( existing_namespace, existing_colour ) in self._terms if existing_namespace in namespaces ]
for removee in removees:
self._RemoveTerm( removee )
self._DataHasChanged()
def SetNamespaceColour( self, namespace, colour: QG.QColor ):
colour_tuple = ( colour.red(), colour.green(), colour.blue() )
for ( existing_namespace, existing_colour ) in self._terms:
if existing_namespace == namespace:
self._RemoveTerm( ( existing_namespace, existing_colour ) )
break
self._AppendTerm( ( namespace, colour_tuple ) )
self._SortByText()
self._DataHasChanged()
def GetNamespaceColours( self ):
return self._GetNamespaceColours()
def GetSelectedNamespaceColours( self ):
namespace_colours = dict( self._selected_terms )
return namespace_colours
class ListBoxTagsStrings( ListBoxTags ):
def __init__( self, parent, service_key = None, show_sibling_text = True, sort_tags = True ):
ListBoxTags.__init__( self, parent )
if service_key is not None:
service_key = CC.COMBINED_TAG_SERVICE_KEY
self._service_key = service_key
self._show_sibling_text = show_sibling_text
self._sort_tags = sort_tags
def _GetNamespaceFromTerm( self, term ):
tag = term
( namespace, subtag ) = HydrusTags.SplitTag( tag )
return namespace
def _GetSimplifiedTextFromTerm( self, term ):
tag = term
return str( tag )
def _GetTagFromTerm( self, term ):
tag = term
return tag
def _GetTextFromTerm( self, term ):
siblings_manager = HG.client_controller.tag_siblings_manager
tag = term
tag_string = ClientTags.RenderTag( tag, True )
if self._show_sibling_text:
sibling = siblings_manager.GetSibling( self._service_key, tag )
if sibling is not None:
tag_string += ' (will display as ' + ClientTags.RenderTag( sibling, True ) + ')'
return tag_string
def _RecalcTags( self ):
self._RefreshTexts()
if self._sort_tags:
self._SortByText()
self._DataHasChanged()
def GetTags( self ):
return set( self._terms )
def SetTagServiceKey( self, service_key ):
self._service_key = service_key
self._RecalcTags()
def SetTags( self, tags ):
selected_terms = set( self._selected_terms )
self._Clear()
for tag in tags:
self._AppendTerm( tag )
for term in selected_terms:
if term in self._ordered_terms:
self._selected_terms.add( term )
self._HitFirstSelectedItem()
self._RecalcTags()
def ForceTagRecalc( self ):
if self.window().isMinimized():
return
self._RecalcTags()
class ListBoxTagsStringsAddRemove( ListBoxTagsStrings ):
def __init__( self, parent, service_key = None, removed_callable = None, show_sibling_text = True ):
ListBoxTagsStrings.__init__( self, parent, service_key = service_key, show_sibling_text = show_sibling_text )
self._removed_callable = removed_callable
def _Activate( self ):
if len( self._selected_terms ) > 0:
tags = set( self._selected_terms )
self._RemoveTags( tags )
def _RemoveTags( self, tags ):
for tag in tags:
self._RemoveTerm( tag )
self._RecalcTags()
if self._removed_callable is not None:
self._removed_callable( tags )
def AddTags( self, tags ):
for tag in tags:
self._AppendTerm( tag )
self._RecalcTags()
def Clear( self ):
self._Clear()
self._RecalcTags()
def EnterTags( self, tags ):
removed = set()
for tag in tags:
if tag in self._terms:
self._RemoveTerm( tag )
removed.add( tag )
else:
self._AppendTerm( tag )
self._RecalcTags()
if len( removed ) > 0 and self._removed_callable is not None:
self._removed_callable( removed )
def keyPressEvent( self, event ):
( modifier, key ) = ClientGUIShortcuts.ConvertKeyEventToSimpleTuple( event )
if key in ClientGUIShortcuts.DELETE_KEYS_QT:
self._Activate()
else:
ListBoxTagsStrings.keyPressEvent( self, event )
def RemoveTags( self, tags ):
self._RemoveTags( tags )
class ListBoxTagsMedia( ListBoxTags ):
render_for_user = True
has_counts = True
def __init__( self, parent, tag_display_type, include_counts = True, show_sibling_description = False ):
ListBoxTags.__init__( self, parent, height_num_chars = 24 )
self._sort = HC.options[ 'default_tag_sort' ]
self._last_media = set()
self._tag_service_key = CC.COMBINED_TAG_SERVICE_KEY
self._tag_display_type = tag_display_type
self._include_counts = include_counts
self._show_sibling_description = show_sibling_description
self._current_tags_to_count = collections.Counter()
self._deleted_tags_to_count = collections.Counter()
self._pending_tags_to_count = collections.Counter()
self._petitioned_tags_to_count = collections.Counter()
self._show_current = True
self._show_deleted = False
self._show_pending = True
self._show_petitioned = True
def _GetNamespaceFromTerm( self, term ):
tag = term
( namespace, subtag ) = HydrusTags.SplitTag( tag )
return namespace
def _GetSimplifiedTextFromTerm( self, term ):
tag = term
return str( tag )
def _GetTagFromTerm( self, term ):
tag = term
return tag
def _GetTextFromTerm( self, term ):
tag = term
tag_string = ClientTags.RenderTag( tag, self.render_for_user )
if self._include_counts:
if self._show_current and tag in self._current_tags_to_count: tag_string += ' (' + HydrusData.ToHumanInt( self._current_tags_to_count[ tag ] ) + ')'
if self._show_pending and tag in self._pending_tags_to_count: tag_string += ' (+' + HydrusData.ToHumanInt( self._pending_tags_to_count[ tag ] ) + ')'
if self._show_petitioned and tag in self._petitioned_tags_to_count: tag_string += ' (-' + HydrusData.ToHumanInt( self._petitioned_tags_to_count[ tag ] ) + ')'
if self._show_deleted and tag in self._deleted_tags_to_count: tag_string += ' (X' + HydrusData.ToHumanInt( self._deleted_tags_to_count[ tag ] ) + ')'
else:
if self._show_pending and tag in self._pending_tags_to_count: tag_string += ' (+)'
if self._show_petitioned and tag in self._petitioned_tags_to_count: tag_string += ' (-)'
if self._show_deleted and tag in self._deleted_tags_to_count: tag_string += ' (X)'
if self._show_sibling_description:
sibling = HG.client_controller.tag_siblings_manager.GetSibling( self._tag_service_key, tag )
if sibling is not None:
sibling = ClientTags.RenderTag( sibling, self.render_for_user )
tag_string += ' (will display as ' + sibling + ')'
return tag_string
def _RecalcStrings( self, limit_to_these_tags = None ):
previous_selected_terms = set( self._selected_terms )
if limit_to_these_tags is None:
self._Clear()
nonzero_tags = set()
if self._show_current: nonzero_tags.update( ( tag for ( tag, count ) in list(self._current_tags_to_count.items()) if count > 0 ) )
if self._show_deleted: nonzero_tags.update( ( tag for ( tag, count ) in list(self._deleted_tags_to_count.items()) if count > 0 ) )
if self._show_pending: nonzero_tags.update( ( tag for ( tag, count ) in list(self._pending_tags_to_count.items()) if count > 0 ) )
if self._show_petitioned: nonzero_tags.update( ( tag for ( tag, count ) in list(self._petitioned_tags_to_count.items()) if count > 0 ) )
for tag in nonzero_tags:
self._AppendTerm( tag )
else:
if not isinstance( limit_to_these_tags, set ):
limit_to_these_tags = set( limit_to_these_tags )
for tag in limit_to_these_tags:
self._RemoveTerm( tag )
nonzero_tags = set()
if self._show_current: nonzero_tags.update( ( tag for ( tag, count ) in list(self._current_tags_to_count.items()) if count > 0 and tag in limit_to_these_tags ) )
if self._show_deleted: nonzero_tags.update( ( tag for ( tag, count ) in list(self._deleted_tags_to_count.items()) if count > 0 and tag in limit_to_these_tags ) )
if self._show_pending: nonzero_tags.update( ( tag for ( tag, count ) in list(self._pending_tags_to_count.items()) if count > 0 and tag in limit_to_these_tags ) )
if self._show_petitioned: nonzero_tags.update( ( tag for ( tag, count ) in list(self._petitioned_tags_to_count.items()) if count > 0 and tag in limit_to_these_tags ) )
for tag in nonzero_tags:
self._AppendTerm( tag )
for term in previous_selected_terms:
if term in self._terms:
self._selected_terms.add( term )
self._SortTags()
def _SortTags( self ):
tags_to_count = collections.Counter()
if self._show_current: tags_to_count.update( self._current_tags_to_count )
if self._show_deleted: tags_to_count.update( self._deleted_tags_to_count )
if self._show_pending: tags_to_count.update( self._pending_tags_to_count )
if self._show_petitioned: tags_to_count.update( self._petitioned_tags_to_count )
ClientTags.SortTags( self._sort, self._ordered_terms, tags_to_count )
self._DataHasChanged()
def ChangeTagService( self, service_key ):
self._tag_service_key = service_key
self.SetTagsByMedia( self._last_media )
def SetSort( self, sort ):
self._sort = sort
self._SortTags()
def SetShow( self, show_type, value ):
if show_type == 'current': self._show_current = value
elif show_type == 'deleted': self._show_deleted = value
elif show_type == 'pending': self._show_pending = value
elif show_type == 'petitioned': self._show_petitioned = value
self._RecalcStrings()
def IncrementTagsByMedia( self, media ):
media = set( media )
media = media.difference( self._last_media )
( current_tags_to_count, deleted_tags_to_count, pending_tags_to_count, petitioned_tags_to_count ) = ClientMedia.GetMediasTagCount( media, self._tag_service_key, self._tag_display_type )
self._current_tags_to_count.update( current_tags_to_count )
self._deleted_tags_to_count.update( deleted_tags_to_count )
self._pending_tags_to_count.update( pending_tags_to_count )
self._petitioned_tags_to_count.update( petitioned_tags_to_count )
tags_changed = set()
if self._show_current: tags_changed.update( list(current_tags_to_count.keys()) )
if self._show_deleted: tags_changed.update( list(deleted_tags_to_count.keys()) )
if self._show_pending: tags_changed.update( list(pending_tags_to_count.keys()) )
if self._show_petitioned: tags_changed.update( list(petitioned_tags_to_count.keys()) )
if len( tags_changed ) > 0:
self._RecalcStrings( tags_changed )
self._last_media.update( media )
def SetTagsByMedia( self, media ):
media = set( media )
( current_tags_to_count, deleted_tags_to_count, pending_tags_to_count, petitioned_tags_to_count ) = ClientMedia.GetMediasTagCount( media, self._tag_service_key, self._tag_display_type )
self._current_tags_to_count = current_tags_to_count
self._deleted_tags_to_count = deleted_tags_to_count
self._pending_tags_to_count = pending_tags_to_count
self._petitioned_tags_to_count = petitioned_tags_to_count
self._RecalcStrings()
self._last_media = media
self._DataHasChanged()
def SetTagsByMediaFromMediaPanel( self, media, tags_changed ):
# this uses the last-set media and count cache to generate new numbers and is faster than re-counting from scratch when the tags have not changed
selection_shrank = len( media ) < len( self._last_media ) // 10 # if we are dropping to a much smaller selection (e.g. 5000 -> 1), we should just recalculate from scratch
if tags_changed or selection_shrank:
self.SetTagsByMedia( media )
return
media = set( media )
removees = self._last_media.difference( media )
adds = media.difference( self._last_media )
( current_tags_to_count, deleted_tags_to_count, pending_tags_to_count, petitioned_tags_to_count ) = ClientMedia.GetMediasTagCount( removees, self._tag_service_key, self._tag_display_type )
self._current_tags_to_count.subtract( current_tags_to_count )
self._deleted_tags_to_count.subtract( deleted_tags_to_count )
self._pending_tags_to_count.subtract( pending_tags_to_count )
self._petitioned_tags_to_count.subtract( petitioned_tags_to_count )
( current_tags_to_count, deleted_tags_to_count, pending_tags_to_count, petitioned_tags_to_count ) = ClientMedia.GetMediasTagCount( adds, self._tag_service_key, self._tag_display_type )
self._current_tags_to_count.update( current_tags_to_count )
self._deleted_tags_to_count.update( deleted_tags_to_count )
self._pending_tags_to_count.update( pending_tags_to_count )
self._petitioned_tags_to_count.update( petitioned_tags_to_count )
for counter in ( self._current_tags_to_count, self._deleted_tags_to_count, self._pending_tags_to_count, self._petitioned_tags_to_count ):
tags = list( counter.keys() )
for tag in tags:
if counter[ tag ] == 0:
del counter[ tag ]
if len( removees ) == 0:
tags_changed = set()
if self._show_current: tags_changed.update( list(current_tags_to_count.keys()) )
if self._show_deleted: tags_changed.update( list(deleted_tags_to_count.keys()) )
if self._show_pending: tags_changed.update( list(pending_tags_to_count.keys()) )
if self._show_petitioned: tags_changed.update( list(petitioned_tags_to_count.keys()) )
if len( tags_changed ) > 0:
self._RecalcStrings( tags_changed )
else:
self._RecalcStrings()
self._last_media = media
self._DataHasChanged()
def ForceTagRecalc( self ):
if self.window().isMinimized():
return
self.SetTagsByMedia( self._last_media )
class StaticBoxSorterForListBoxTags( ClientGUICommon.StaticBox ):
def __init__( self, parent, title ):
ClientGUICommon.StaticBox.__init__( self, parent, title )
self._sorter = ClientGUICommon.BetterChoice( self )
self._sorter.addItem( 'lexicographic (a-z)', CC.SORT_BY_LEXICOGRAPHIC_ASC )
self._sorter.addItem( 'lexicographic (z-a)', CC.SORT_BY_LEXICOGRAPHIC_DESC )
self._sorter.addItem( 'lexicographic (a-z) (group unnamespaced)', CC.SORT_BY_LEXICOGRAPHIC_NAMESPACE_ASC )
self._sorter.addItem( 'lexicographic (z-a) (group unnamespaced)', CC.SORT_BY_LEXICOGRAPHIC_NAMESPACE_DESC )
self._sorter.addItem( 'lexicographic (a-z) (ignore namespace)', CC.SORT_BY_LEXICOGRAPHIC_IGNORE_NAMESPACE_ASC )
self._sorter.addItem( 'lexicographic (z-a) (ignore namespace)', CC.SORT_BY_LEXICOGRAPHIC_IGNORE_NAMESPACE_DESC )
self._sorter.addItem( 'incidence (desc)', CC.SORT_BY_INCIDENCE_DESC )
self._sorter.addItem( 'incidence (asc)', CC.SORT_BY_INCIDENCE_ASC )
self._sorter.addItem( 'incidence (desc) (grouped by namespace)', CC.SORT_BY_INCIDENCE_NAMESPACE_DESC )
self._sorter.addItem( 'incidence (asc) (grouped by namespace)', CC.SORT_BY_INCIDENCE_NAMESPACE_ASC )
self._sorter.SetValue( HC.options[ 'default_tag_sort' ] )
self._sorter.currentIndexChanged.connect( self.EventSort )
self.Add( self._sorter, CC.FLAGS_EXPAND_PERPENDICULAR )
def ChangeTagService( self, service_key ):
self._tags_box.ChangeTagService( service_key )
def EventSort( self, index ):
selection = self._sorter.currentIndex()
if selection != -1:
sort = self._sorter.GetValue()
self._tags_box.SetSort( sort )
def SetTagsBox( self, tags_box: ListBoxTagsMedia ):
self._tags_box = tags_box
self.Add( self._tags_box, CC.FLAGS_EXPAND_BOTH_WAYS )
def SetTagsByMedia( self, media ):
self._tags_box.SetTagsByMedia( media )
class ListBoxTagsMediaHoverFrame( ListBoxTagsMedia ):
def __init__( self, parent, canvas_key ):
ListBoxTagsMedia.__init__( self, parent, ClientTags.TAG_DISPLAY_SINGLE_MEDIA, include_counts = False )
self._canvas_key = canvas_key
def _Activate( self ):
HG.client_controller.pub( 'canvas_manage_tags', self._canvas_key )
class ListBoxTagsMediaTagsDialog( ListBoxTagsMedia ):
render_for_user = False
def __init__( self, parent, enter_func, delete_func ):
ListBoxTagsMedia.__init__( self, parent, ClientTags.TAG_DISPLAY_STORAGE, include_counts = True, show_sibling_description = True )
self._enter_func = enter_func
self._delete_func = delete_func
def _Activate( self ):
if len( self._selected_terms ) > 0:
self._enter_func( set( self._selected_terms ) )
def _DeleteActivate( self ):
if len( self._selected_terms ) > 0:
self._delete_func( set( self._selected_terms ) )
| [
"hydrus.admin@gmail.com"
] | hydrus.admin@gmail.com |
a4de822d452f9b30465f5be889f1f3b10fb5bd39 | 6c10c6e229014dc3bf14efaec2ea8bf07c406752 | /AILearning/ComputerVision/ImageClassification.py | 5ddaebaa3e190d8957c87cd97df64729df342429 | [] | no_license | GuyRobot/AIPythonExamples | e59c6edb355d9cadee2b3f19a087b1b656956262 | 4acdd0d4966e31a616910554bc075b641aa152df | refs/heads/master | 2021-05-21T13:05:49.615593 | 2021-02-28T06:41:04 | 2021-02-28T06:41:04 | 252,662,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,674 | py | import collections
from d2l import AllDeepLearning as d2l
from mxnet import gluon, init, nd, autograd
from mxnet.gluon import nn
import os
import pandas as pd
import shutil
import time
import math
import tarfile
from pathlib import Path
data_dir = "E:/Python_Data/cifar-10/"
# tiny_data_dir = "E:/Python_Data/kaggle_cifar10_tiny/"
# data_dir = tiny_data_dir
# data_dir = "E:/Python_Data/cifar-10/"
tiny_data_dir = data_dir
a = "http://d2l-data.s3-accelerate.amazonaws.com/kaggle_cifar10_tiny.zip"
demo = False
# def download_voc_pascal(data_dir='../data'):
# """Download the VOC2012 segmentation dataset."""
# voc_dir = os.path.join(data_dir, 'Cifar_Tiny')
# url = "http://d2l-data.s3-accelerate.amazonaws.com/kaggle_cifar10_tiny.zip"
# sha1 = '2068874e4b9a9f0fb07ebe0ad2b29754449ccacd'
# fname = gluon.utils.download(url, data_dir, sha1_hash=sha1)
# with tarfile.open(fname, 'r') as f:
# f.extractall(data_dir)
# return voc_dir
def read_csv_labels(fname):
with open(fname) as f:
lines = f.readlines()[1:]
tokens = [l.rstrip().split(',') for l in lines]
return dict(((name, label) for name, label in tokens))
def copyfile(filename, target_dir):
Path("%s" % target_dir).mkdir(parents=True, exist_ok=True)
shutil.copy(filename, target_dir)
def reorg_train_valid(data_dir, labels, valid_ratio):
n = collections.Counter(labels.values()).most_common()[-1][1]
n_valid_per_label = max(1, math.floor(n * valid_ratio))
label_count = {}
for train_file in os.listdir(data_dir + 'train'):
label = labels[train_file.split('.')[0]]
fname = data_dir + 'train/' + train_file
copyfile(fname, data_dir + 'train_valid_test/train_valid/' + label)
if label not in label_count or label_count[label] < n_valid_per_label:
copyfile(fname, data_dir + 'train_valid_test/valid/' + label)
label_count[label] = label_count.get(label, 0) + 1
else:
copyfile(fname, data_dir + 'train_valid_test/train/' + label)
return n_valid_per_label
def reorg_test(data_dir):
for test_file in os.listdir(data_dir + 'test'):
copyfile(data_dir + 'test/' + test_file,
data_dir + 'train_valid_test/test/unknown/')
def reorg_cifar10_data(data_dir, valid_ratio):
labels = read_csv_labels(data_dir + 'trainLabels.csv')
reorg_train_valid(data_dir, labels, valid_ratio)
reorg_test(data_dir)
batch_size = 1 if demo else 128
valid_ratio = 0.1
reorg_cifar10_data(tiny_data_dir, valid_ratio)
transform_train = gluon.data.vision.transforms.Compose([
gluon.data.vision.transforms.Resize(40),
gluon.data.vision.transforms.RandomResizedCrop(32, scale=(0.64, 0.1), ratio=(1.0, 1.0)),
gluon.data.vision.transforms.RandomFlipLeftRight(),
gluon.data.vision.transforms.ToTensor(),
gluon.data.vision.transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010))])
transform_test = gluon.data.vision.transforms.Compose([
gluon.data.vision.transforms.ToTensor(),
gluon.data.vision.transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010))
])
train_ds, valid_ds, train_valid_ds, test_ds = [gluon.data.vision.ImageFolderDataset(
data_dir+'train_valid_test/'+folder) for folder in ['train', 'valid', 'train_valid', 'test']]
train_iter, train_valid_iter = [gluon.data.DataLoader(
dataset.transform_first(transform_train), batch_size, shuffle=True,
last_batch='keep') for dataset in (train_ds, train_valid_ds)]
valid_iter, test_iter = [gluon.data.DataLoader(
dataset.transform_first(transform_test), batch_size, shuffle=False,
last_batch='keep') for dataset in [valid_ds, test_ds]]
class Residual(nn.HybridBlock):
def __init__(self, num_channels, use_1x1_conv=False, strides=1, **kwargs):
super(Residual, self).__init__(**kwargs)
self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1, strides=strides)
self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)
if use_1x1_conv:
self.conv3 = nn.Conv2D(num_channels, kernel_size=1, strides=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm()
self.bn2 = nn.BatchNorm()
def hybrid_forward(self, F, X, *args, **kwargs):
Y = F.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
return F.relu(Y + X)
def resnet18(num_classes):
net = nn.HybridSequential()
net.add(nn.Conv2D(64, kernel_size=3, strides=1, padding=1),
nn.BatchNorm(),
nn.Activation('relu'))
def resnet_block(num_channels, num_residuals, first_block=False):
blk = nn.HybridSequential()
for i in range(num_residuals):
if i == 0 and not first_block:
blk.add(Residual(num_channels, use_1x1_conv=True, strides=2))
else:
blk.add(Residual(num_channels))
return blk
net.add(resnet_block(64, 2, first_block=True),
resnet_block(128, 2),
resnet_block(256, 2),
resnet_block(512, 2))
net.add(nn.GlobalAvgPool2D(), nn.Dense(num_classes))
return net
def get_net(ctx):
num_classes = 10
net = resnet18(num_classes)
net.initialize(init.Xavier(), ctx=ctx)
return net
def train(net, train_iter, valid_iter, num_epochs, lr,
wd, ctx, lr_period, lr_decay):
loss = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr,
"momentum": 0.9, 'wd': wd})
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
if epoch > 0 and epoch % lr_period == 0:
trainer.set_learning_rate(trainer.learning_rate * lr_decay)
for X, y in train_iter:
y = y.astype('float32').as_in_context(ctx)
with autograd.record():
y_hat = net(X.as_in_context(ctx))
l = loss(y_hat, y)
l.backward()
trainer.step(batch_size)
train_l_sum += float(l.sum().asscalar())
train_acc_sum += float((y_hat.argmax(axis=1) == y).sum().asscalar())
n += y.size
time_s = "time %.2f sec" % (time.time() - start)
if valid_iter is not None:
valid_acc = d2l.evaluate_accuracy_gpu(net, valid_iter, ctx=ctx)
epoch_s = ("epoch %d, loss %.2f, train acc %f, valid acc %f, " %
(epoch + 1, train_l_sum / n, train_acc_sum / n, valid_acc))
else:
epoch_s = ("epoch %d, loss %f, train acc %f" %
(epoch + 1, train_l_sum / n, train_acc_sum / n))
print(epoch_s + time_s + ', lr ' + str(trainer.learning_rate))
ctx, num_epochs, lr, wd = d2l.try_gpu(), 100, 0.1, 5e-4
lr_period, lr_decay, net = 80, 0.1, get_net(ctx)
net.hybridize()
train(net, train_iter, valid_iter, num_epochs, lr, wd, ctx, lr_period, lr_decay)
net, preds = get_net(ctx), []
net.hybridize()
train(net, train_valid_iter, None, num_epochs, lr, wd, ctx, lr_period,
lr_decay)
# for X, _ in test_iter:
# y_hat = net(X.as_in_context(ctx))
# preds.extend(y_hat.argmax(axis=1).astype(int).asnumpy())
# sorted_ids = list(range(1, len(test_ds) + 1))
# sorted_ids.sort(key=lambda x: str(x))
# df = pd.DataFrame({'id': sorted_ids, 'label': preds})
# df['label'] = df['label'].apply(lambda x: train_valid_ds.synsets[x])
# df.to_csv('submission.csv', index=False) | [
"bluexker@gmail.com"
] | bluexker@gmail.com |
0ff320ce0727df5f0904b4620d4c6d0545a34571 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_mirroring.py | a70d8db0f6d067749196aa854f55063eb436422f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _MIRRORING():
def __init__(self,):
self.name = "MIRRORING"
self.definitions = mirror
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['mirror']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
b4c1978dceeb02f1e9f67909e5ca91c0b929cef9 | b007d88e6726452ffa8fe80300614f311ae5b318 | /educative.io/coding_patterns/two_pointers/triplet_sum_to_zero.py | cf9fb664ace0958b7075ee5471e82813d78f042d | [] | no_license | jinurajan/Datastructures | ec332b12b8395f42cb769e771da3642f25ba7e7f | 647fea5d2c8122468a1c018c6829b1c08717d86a | refs/heads/master | 2023-07-06T14:42:55.168795 | 2023-07-04T13:23:22 | 2023-07-04T13:23:22 | 76,943,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | """
Given an array of unsorted numbers, find all unique triplets in it that add up to zero.
Example 1:
Input: [-3, 0, 1, 2, -1, 1, -2]
Output: [-3, 1, 2], [-2, 0, 2], [-2, 1, 1], [-1, 0, 1]
Explanation: There are four unique triplets whose sum is equal to zero.
Example 2:
Input: [-5, 2, -1, -2, 3]
Output: [[-5, 2, 3], [-2, -1, 3]]
Explanation: There are two unique triplets whose sum is equal to zero.
"""
def search_triplets(arr):
triplets = []
arr.sort()
n = len(arr)
def two_sum(target, left, triplets):
right = len(arr) - 1
while left < right:
curr = arr[left] + arr[right]
if curr == target:
triplets.append([-target, arr[left], arr[right]])
left += 1
right -= 1
while left < right and arr[left] == arr[left - 1]:
left += 1
while right >= 0 and arr[right] == arr[right + 1]:
right -= 1
elif target > curr:
left += 1 # we need a pair with a bigger sum
else:
right -= 1
for i in range(n - 3):
if i > 0 and arr[i] == arr[i - 1]:
continue
two_sum(-arr[i], i + 1, triplets)
return triplets
| [
"jinu.p.r@gmail.com"
] | jinu.p.r@gmail.com |
8f3d58a6187c5c0b08864b3b0efa195d47915b34 | dabf4121ac793c2cfe87ff525a8a0f7305ea2c59 | /plugins/maya/publish/extract_pointcache_abc.py | a086790b49e5224c1d72527853aaeadeeda9c52b | [
"MIT"
] | permissive | Lynn5160/reveries-config | 9d91210ebde47a69bb00614f95341a7ce313118f | 1928e4d41acc4861ffa3260fa855ca77561285b0 | refs/heads/master | 2022-11-13T21:13:47.665939 | 2020-07-02T08:24:21 | 2020-07-02T08:24:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,148 | py |
import contextlib
import pyblish.api
class ExtractPointCacheABC(pyblish.api.InstancePlugin):
"""
"""
order = pyblish.api.ExtractorOrder
hosts = ["maya"]
label = "Extract PointCache (abc)"
families = [
"reveries.pointcache.abc",
]
def process(self, instance):
from maya import cmds
from reveries import utils
staging_dir = utils.stage_dir(dir=instance.data["_sharedStage"])
filename = "%s.abc" % instance.data["subset"]
outpath = "%s/%s" % (staging_dir, filename)
instance.data["repr.Alembic._stage"] = staging_dir
instance.data["repr.Alembic._hardlinks"] = [filename]
instance.data["repr.Alembic.entryFileName"] = filename
if instance.data.get("staticCache"):
start = cmds.currentTime(query=True)
end = cmds.currentTime(query=True)
else:
context_data = instance.context.data
start = context_data["startFrame"]
end = context_data["endFrame"]
instance.data["startFrame"] = start
instance.data["endFrame"] = end
euler_filter = instance.data.get("eulerFilter", False)
root = instance.data["outCache"]
instance.data["repr.Alembic._delayRun"] = {
"func": self.export_alembic,
"args": [
root, outpath, start, end, euler_filter
],
}
def export_alembic(self, root, outpath, start, end, euler_filter):
from reveries.maya import io, lib, capsule
from maya import cmds
with contextlib.nested(
capsule.no_undo(),
capsule.no_refresh(),
capsule.evaluation("off"),
capsule.maintained_selection(),
):
# Selection may change if there are duplicate named nodes and
# require instancing them to resolve
with capsule.delete_after() as delete_bin:
# (NOTE) We need to check any duplicate named nodes, or
# error will raised during Alembic export.
result = lib.ls_duplicated_name(root)
duplicated = [n for m in result.values() for n in m]
if duplicated:
self.log.info("Duplicate named nodes found, resolving...")
# Duplicate it so we could have a unique named new node
unique_named = list()
for node in duplicated:
new_nodes = cmds.duplicate(node,
inputConnections=True,
renameChildren=True)
new_nodes = cmds.ls(new_nodes, long=True)
unique_named.append(new_nodes[0])
# New nodes will be deleted after the export
delete_bin.extend(new_nodes)
# Replace duplicate named nodes with unique named
root = list(set(root) - set(duplicated)) + unique_named
for node in set(root):
# (NOTE) If a descendent is instanced, it will appear only
# once on the list returned.
root += cmds.listRelatives(node,
allDescendents=True,
fullPath=True,
noIntermediate=True) or []
root = list(set(root))
cmds.select(root, replace=True, noExpand=True)
def _export_alembic():
io.export_alembic(
outpath,
start,
end,
selection=True,
renderableOnly=True,
writeVisibility=True,
writeCreases=True,
worldSpace=True,
eulerFilter=euler_filter,
attr=[
lib.AVALON_ID_ATTR_LONG,
],
attrPrefix=[
"ai", # Write out Arnold attributes
"avnlook_", # Write out lookDev controls
],
)
auto_retry = 1
while auto_retry:
try:
_export_alembic()
except RuntimeError as err:
if auto_retry:
# (NOTE) Auto re-try export
# For unknown reason, some artist may encounter
# runtime error when exporting but re-run the
# publish without any change will resolve.
auto_retry -= 1
self.log.warning(err)
self.log.warning("Retrying...")
else:
raise err
else:
break
| [
"davidlatwe@gmail.com"
] | davidlatwe@gmail.com |
499c1470a1433ed4086dcaf206216e5fda9b4ec6 | a839135eae95f745f1d9edb370ac459854042cce | /tests/test_feed.py | 4b9c19439fe949c370936b6b86e25b4acde5b1bb | [
"MIT"
] | permissive | d21d3q/thermalprinter | 8afae538fa81055bf47710390af1c213b86455fc | a502fe8a7b7ab5a0773e92a37e6539f73b34b950 | refs/heads/master | 2023-03-16T01:49:52.478726 | 2022-06-02T10:52:23 | 2022-06-02T10:52:23 | 164,908,229 | 0 | 0 | MIT | 2019-01-09T17:31:32 | 2019-01-09T17:31:25 | Python | UTF-8 | Python | false | false | 771 | py | # coding: utf-8
import pytest
from thermalprinter.exceptions import ThermalPrinterValueError
def test_changing_no_value(printer):
printer.feed()
assert printer.feeds == 1
def test_changing_good_value(printer):
printer.feed(42)
assert printer.feeds == 42 + 1
def test_bad_value__not_int(printer):
with pytest.raises(ThermalPrinterValueError):
printer.feed('42')
assert printer.feeds == 42 + 1
def test_changing_bad_value__not_in_range_low(printer):
with pytest.raises(ThermalPrinterValueError):
printer.feed(-42)
assert printer.feeds == 42 + 1
def test_changing_bad_value__not_in_range_high(printer):
with pytest.raises(ThermalPrinterValueError):
printer.feed(512)
assert printer.feeds == 42 + 1
| [
"contact@tiger-222.fr"
] | contact@tiger-222.fr |
756caf90edd534e5f336c64ff1742c1aa610a6d9 | 945f9c5c34b42fd7863c525f7e54d2c88a5950e6 | /pyppl_strict.py | c29178d57ce1b3515b5241aa48a033f133e73c5d | [
"MIT"
] | permissive | stjordanis/pyppl_strict | 0c517a4e803e039b09602c385a75cbcd773514fc | efd4d361ddda3f95b1249cee612ef9f5d7b46123 | refs/heads/master | 2022-09-28T02:09:19.641392 | 2020-06-06T05:14:33 | 2020-06-06T05:14:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,010 | py | """More strict check of job success for PyPPL
Features:
1. make sure all outputs have been generated
2. allow custom returncode settings
3. allow a custom script to check the output file
"""
from os import utime
import random
import cmdy
from pyppl.plugin import hookimpl
from pyppl.config import config
from pyppl.jobmgr import STATES
from pyppl.utils import always_list, fs, filesig
from pyppl._proc import OUT_VARTYPE
from pyppl._job import RC_NO_RCFILE
__version__ = "0.0.7"
RC_NO_OUTFILE = 5000
RC_EXPECT_FAIL = 10000
config.config.strict_rc = [0]
config.config.strict_expect = ""
def strict_rc_converter(rc):
"""Convert return code from input"""
if not rc:
return [0]
if isinstance(rc, str):
rc = always_list(rc)
rc = list(rc)
if 0 not in rc:
rc.insert(0, 0)
return rc
def show_error(job, total):
"""Show error message for a job"""
if job.rc >= RC_EXPECT_FAIL:
msg = '%s (Expectation failed)' % (job.rc - RC_EXPECT_FAIL)
elif job.rc >= RC_NO_OUTFILE:
msg = '%s (Output file/dir not generated)' % (job.rc - RC_NO_OUTFILE)
elif job.rc == RC_NO_RCFILE:
msg = '- (No RC file generated)'
else:
msg = '%s (Script failed)' % job.rc
if job.proc.errhow == 'ignore':
job.logger(
f'Failed but ignored (totally {total}). Return code: {msg}.',
level='warning',
plugin='strict'
)
return
job.logger(f'Failed (totally {total}). Return code: {msg}.',
level='failed',
plugin='strict')
job.logger(f'Script: {job.dir / "job.script"}',
level='failed', plugin='strict')
job.logger(f'Stdout: {job.dir / "job.stdout"}',
level='failed', plugin='strict')
job.logger(f'Stderr: {job.dir / "job.stderr"}',
level='failed', plugin='strict')
# errors are not echoed, echo them out
if (job.index not in job.proc.config.get('echo_jobs', [])
or 'stderr' not in job.proc.config.get('echo_types', {})):
job.logger('Check STDERR below:', level='failed', plugin='strict')
errmsgs = []
if job.dir.joinpath('job.stderr').exists():
errmsgs = job.dir.joinpath('job.stderr').read_text().splitlines()
if not errmsgs:
errmsgs = ['<EMPTY STDERR>']
for errmsg in errmsgs[-20:] if len(errmsgs) > 20 else errmsgs:
job.logger(errmsg, level='failed', plugin='strict')
if len(errmsgs) > 20:
job.logger(
'[ Top {top} line(s) ignored, see all in stderr file. ]'.
format(top=len(errmsgs) - 20),
level='failed', plugin='strict'
)
@hookimpl
def logger_init(logger):
"""Add log levels"""
logger.add_level('FAILED', 'ERROR')
logger.add_sublevel('OUTFILE_NOT_EXISTS', -1)
logger.add_sublevel('EXPECTATION_FAILED', -1)
@hookimpl
def proc_init(proc):
"""Add configs"""
def strict_expect_converter(expect):
if isinstance(expect, proc.template):
return expect
return proc.template(expect, **proc.envs)
proc.add_config('strict_rc', default=0, converter=strict_rc_converter)
proc.add_config('strict_expect',
default='',
converter=strict_expect_converter)
@hookimpl
def job_succeeded(job):
"""Check rc, expect and outfiles to tell if a job is really succeeded"""
if job.rc not in job.proc.config.strict_rc:
return False
# check if all outputs are generated
# refresh stat
outdir, mtime = filesig(job.dir.joinpath('output'), job.proc.dirsig)
utime(outdir, (mtime, mtime))
for outtype, outdata in job.output.values():
if outtype not in OUT_VARTYPE and not fs.exists(outdata):
job.rc += RC_NO_OUTFILE
job.logger('Outfile not generated: {}'.format(outdata),
slevel="OUTFILE_NOT_EXISTS",
level='debug',
plugin='strict')
return False
expect_cmd = job.proc.config.strict_expect.render(job.data)
if expect_cmd:
cmd = cmdy.bash(c=expect_cmd, _raise=False) # pylint: disable=no-member
if cmd.rc != 0:
job.rc += RC_EXPECT_FAIL
job.logger(expect_cmd,
slevel="EXPECTATION_FAILED",
level='error',
plugin='strict')
return False
return True
@hookimpl
def proc_postrun(proc, status):
"""Show error message for failed jobs"""
if status == 'failed':
failed_jobs = [
job for job in proc.jobs
if job.state in (STATES.ENDFAILED, STATES.DONEFAILED,
STATES.SUBMITFAILED, STATES.BUILTFAILED,
STATES.KILLED, STATES.KILLFAILED)
]
failed_jobs = failed_jobs or [proc.jobs[0]]
show_error(random.choice(failed_jobs), total=len(failed_jobs))
| [
"pwwang@pwwang.com"
] | pwwang@pwwang.com |
a5fb8ff0526b379fe6f367eb993bd6f0943b7aac | f8b4461f66801fa624ec1798c4547b6f5c9bdf51 | /SpaceInvaders/train.py | e950d4b70255c9c88363bf6d9d3eb624142b5460 | [] | no_license | jcw024/NEAT-retro | 5c713aea81efecc108c88e7dde434586c66e35a1 | fdf9a6044e934fb658ad86cad2730690b7af8975 | refs/heads/master | 2021-10-24T13:57:41.696033 | 2021-10-23T02:42:49 | 2021-10-23T02:42:49 | 165,330,293 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,121 | py | #! /usr/bin/env python
from __future__ import division
import retro
import numpy as np
import cv2
import neat
import pickle
import cProfile
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', '-c', type=str, help='checkpoint file to continue previous run')
args = parser.parse_args()
#trains single genome within generation
def eval_genomes(genome, config):
ob = env.reset()
ac = env.action_space.sample()
inx, iny, inc = env.observation_space.shape
inx = int(inx/5)
iny = int(iny/5)
net = neat.nn.recurrent.RecurrentNetwork.create(genome, config)
current_max_fitness = 0
fitness_current = 0
counter = 0
lives_tot = 3
#cv2.namedWindow("network_input", cv2.WINDOW_NORMAL) #show input pixels to neat
done = False
while not done:
env.render()
#shrink screen for fewer pixel observations per loop
#scaledimg = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
#scaledimg = cv2.resize(scaledimg, (iny, inx))
ob = cv2.resize(ob, (inx, iny))
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
ob = np.reshape(ob, (inx,iny))
#cv2.imshow('network_input', scaledimg)
#cv2.waitKey(1)
imgarray = np.ndarray.flatten(ob)
nnOutput = net.activate(imgarray)
ob, rew1, done1, info = env.step(nnOutput) #3 steps to skip some frames
ob, rew2, done2, info = env.step(nnOutput)
ob, rew3, done3, info = env.step(nnOutput)
rew = (rew1 + rew2 + rew3)
lives = info['lives']
if lives < lives_tot:
#fitness_current -= 100
lives_tot = lives
fitness_current += rew
if fitness_current > current_max_fitness:
current_max_fitness = fitness_current
counter = 0
else:
counter += 1
if any([done1, done2, done3]) or counter == 400:
done = True
genome.fitness = fitness_current
print(genome.fitness)
return genome.fitness
#setup training population parameters
def main(checkpoint=None):
if checkpoint is not None:
p = neat.checkpoint.Checkpointer.restore_checkpoint(checkpoint)
else:
p = neat.Population(config)
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
p.add_reporter(neat.Checkpointer(generation_interval=10,time_interval_seconds=3600))
pe = neat.ParallelEvaluator(5, eval_genomes) #run on multiple cores
winner = p.run(pe.evaluate, 40) #do training for 40 generations
with open('winner_pop50_gen40.pkl', 'wb') as output:
print('writing winner gen to ', output)
pickle.dump(winner, output)
if __name__ == '__main__':
env = retro.make('SpaceInvaders-Snes', '1Player.ClassicMode.UprightCabinet.state')
imgarray = []
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
'config')
main(args.checkpoint)
| [
"="
] | = |
ebe5fe916109b377ee1b3e2552daf2bebbd35c57 | 2989c0d93d88da28a17bc37acca5f0086baab580 | /thesis_scripts/yumi_main/source/camera_robotCAL_astra.py | 04a555379cceaa993da2f930ebe019bdae0a531a | [] | no_license | catree/Ms-Thesis-CVUT | 5307f8c77517de3a39579591206021f4300a8455 | 558122adf3a57ef627087ac84e9452aedb80f6cb | refs/heads/master | 2020-07-11T11:37:36.469246 | 2019-06-23T21:21:34 | 2019-06-23T21:21:34 | 204,523,422 | 1 | 0 | null | 2019-08-26T19:08:51 | 2019-08-26T17:08:51 | null | UTF-8 | Python | false | false | 9,830 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 Cesar Sinchiguano <cesarsinchiguano@hotmail.es>
#
# Distributed under terms of the MIT License license.
"""
Finally I got in xyz coordinates according to ROS
"""
import sys
sys.path.insert(0, '/home/casch/yumi_depends_ws/src/thesis_pkg/yumi_main/scripts/project')
from thesis_library import *
euler_angles_=[0,0,0]
position_=[0.0,0.0,0.0]
##convert a rot and trans matrix to a 4x4 matrix
def data_to_transform(r_matrix,t_position):
mat =np.hstack((r_matrix,t_position))
mat=np.vstack((mat,[0.0,0.0,0.0,1.0]))
return mat
#convert a 4x4 matrix to a Pose message
def transform_to_pose(mat):
pose = Pose()
pose.position.x = mat[0,3]
pose.position.y = mat[1,3]
pose.position.z = mat[2,3]
quat = tf.transformations.quaternion_from_matrix(mat)
pose.orientation.x = quat[0]
pose.orientation.y = quat[1]
pose.orientation.z = quat[2]
pose.orientation.w = quat[3]
return pose
def pose_camera_callback(msg):
global euler_angles_
global position_
position_=[msg.position.x,msg.position.y,msg.position.z]
quaternion_=msg.orientation
quaternion_tmp=[quaternion_.x,quaternion_.y,quaternion_.z,quaternion_.w]
roll_, pitch_, yaw_=tf.transformations.euler_from_quaternion(quaternion_tmp)
euler_angles_=[roll_,pitch_,yaw_]
def publish_transforms(br):
global euler_angles_
global position_
# t0 = geometry_msgs.msg.TransformStamped()
# t0.header.stamp = rospy.Time.now()
# t0.header.frame_id = "world"
# t0.child_frame_id = "panda_link0"
# #t0.child_frame_id = "yumi_base_link"
# #t0.child_frame_id = "base_link"
# t0.transform.translation.x = 0.0
# t0.transform.translation.y = 0.0
# t0.transform.translation.z = 0.0
# tmp_rot=np.array([[1,0, 0], [0, 1, 0],[0, 0, 1]])
# tmp_trans=np.array([[0.30],[0],[0] ])
# myrot =np.hstack((tmp_rot,tmp_trans))
# myrot=np.vstack((myrot,[0.0,0.0,0.0,1.0]))
# q0 = tf.transformations.quaternion_from_matrix(myrot)
# t0.transform.rotation.x = q0[0]
# t0.transform.rotation.y = q0[1]
# t0.transform.rotation.z = q0[2]
# t0.transform.rotation.w = q0[3]
# br.sendTransform(t0)
# t1 = geometry_msgs.msg.TransformStamped()
# t1.header.stamp = rospy.Time.now()
# t1.header.frame_id = "world"
# t1.child_frame_id = "target"
# t1.transform.translation.x = 0.30
# t1.transform.translation.y = 0.0
# t1.transform.translation.z = 0.0
# q1 = tf.transformations.quaternion_from_euler(0, 0, 0)
# t1.transform.rotation.x = q1[0]
# t1.transform.rotation.y = q1[1]
# t1.transform.rotation.z = q1[2]
# t1.transform.rotation.w = q1[3]
# br.sendTransform(t1)
t2 = geometry_msgs.msg.TransformStamped()
t2.header.stamp = rospy.Time.now()
t2.header.frame_id = "yumi_tcp"
t2.child_frame_id = "camera_link"
t2.transform.translation.x = 1.0*position_[0]
t2.transform.translation.y = 1.0*position_[1]
t2.transform.translation.z = 1.0*position_[2]
#orientation according to openCV
q3 = tf.transformations.quaternion_from_euler(euler_angles_[0],euler_angles_[1],euler_angles_[2])
#orientation of camera link. which is parallel to world frame
q2 = tf.transformations.quaternion_from_euler(math.pi/2,-math.pi/2,0)
#correction of camera frame according to openCV orientation
q4=quaternion_multiply(q3,q2)#rotation,origin
t2.transform.rotation.x = q4[0]
t2.transform.rotation.y = q4[1]
t2.transform.rotation.z = q4[2]
t2.transform.rotation.w = q4[3]
br.sendTransform(t2)
def print_information(rotation_vector,translation_vector,rvec_matrix):
global euler_angles
print("\n\nThe world coordinate systems origin in-->> camera's coordinate system:")
print("===rotation_vector:")
print(rotation_vector)
print("===rotation_matrix:")
print(rvec_matrix)
print("===translation_vector:")
print(translation_vector)
print("\n\nThe camera origin in -->>Yumi TCP coordinates system:")
print("===camera rvec_matrix:")
print(rvec_matrix.T)
print("===camera translation_vector:")
print(-np.dot(rvec_matrix.T, translation_vector))
print('\n\n-----------------------------------------------------')
def draw_show_on_image(frame,axi_imgpts,corners,ret,line_width=2):
# We can now plot limes on the 3D image using the cv2.line function,numpy.ravel-->Return a contiguous flattened array.
#cv2.drawChessboardCorners(frame, (8,9), corners, ret)#column and rows 7x9 after the calibration i do not need anymore
cv2.line(frame, tuple(axi_imgpts[3].ravel()), tuple(axi_imgpts[1].ravel()), (0,255,0), line_width) #GREEN Y
cv2.line(frame, tuple(axi_imgpts[3][0]), tuple(axi_imgpts[2].ravel()), (255,0,0), line_width) #BLUE Z
cv2.line(frame, tuple(axi_imgpts[3,0]), tuple(axi_imgpts[0].ravel()), (0,0,255), line_width) #RED x
text_pos = (axi_imgpts[0].ravel() + np.array([3.5,-7])).astype(int)
cv2.putText(frame,'X', tuple(text_pos),cv2.FONT_HERSHEY_PLAIN, 1, (0, 0,255))
text_pos = (axi_imgpts[1].ravel() + np.array([3.5,-7])).astype(int)
cv2.putText(frame,'Y', tuple(text_pos),cv2.FONT_HERSHEY_PLAIN, 1, (0, 0,255))
text_pos = (axi_imgpts[2].ravel() + np.array([3.5,-7])).astype(int)
cv2.putText(frame,'Z', tuple(text_pos),cv2.FONT_HERSHEY_PLAIN, 1, (0, 0,255))
text_pos = (axi_imgpts[3].ravel() + np.array([200,50])).astype(int)
cv2.putText(frame,'1unit=1cm', tuple(text_pos),cv2.FONT_HERSHEY_PLAIN, 1, (0, 0,255))
# Display the resulting frame
cv2.imshow('Target locator',frame)
cv2.imwrite('test.jpg', frame)
def locate_target_orientation(frame,ret, corners):
#Vertical configuration
x,y=np.meshgrid(range(8),range(9))#col row vertical
world_points_3d=np.hstack((y.reshape(72,1)*0.01,x.reshape(72,1)*0.01,np.zeros((72,1)))).astype(np.float32)
# #astra camera
# #Intrinsic parameters===>>> from the intrinsic calibration!!!!
# list_matrix=[506.1338725148187, 0, 313.7031356480479, 0, 506.4623458309018, 246.4363947238303, 0, 0, 1]
# cameraMatrix_ar=np.asarray(list_matrix).reshape(3,3)
# distCoef=[-0.004755818169472225, -0.04879035388633979, -0.002404621345494799, 0.001159841420888698, 0]
# distCoef_ar=np.asarray(distCoef).reshape(len(distCoef),1)
#May_2_2019
list_matrix=[513.916180, 0.000000, 308.570130, 0.000000, 514.377333, 240.628363, 0.000000, 0.000000, 1.000000]
cameraMatrix_ar=np.asarray(list_matrix).reshape(3,3)
distCoef=[0.071388, -0.188724, -0.002271, 0.002146, 0.000000]
distCoef_ar=np.asarray(distCoef).reshape(len(distCoef),1)
#Rotation vector (radians)
(success, rotation_vector, translation_vector) = cv2.solvePnP(world_points_3d, corners, cameraMatrix_ar, distCoef_ar, flags=cv2.SOLVEPNP_ITERATIVE)
# World coordinates system
axis = np.float32([[0.09,0,0],[0,0.08,0],[0,0,0.06],[0,0,0]])
axis_imgpts, jacobian = cv2.projectPoints(axis, rotation_vector, translation_vector,cameraMatrix_ar, distCoef_ar)
# Rotation_vector into rotation_matrix
rvec_matrix = cv2.Rodrigues(rotation_vector)[0]
return axis_imgpts,corners,ret,rvec_matrix,translation_vector,rotation_vector
def main():
counter=0
tmpNamec='temp2.jpg'
pub_pose = rospy.Publisher('pose_yumi_tcp_camera', Pose, queue_size=10)
sub_pose = rospy.Subscriber('/pose_yumi_tcp_camera', Pose, pose_camera_callback)
br = tf2_ros.TransformBroadcaster()
rate = rospy.Rate(10) # 10hz
import sys
print "This is the name of the script: ", sys.argv[0]
#flag=sys.argv[1]
while not rospy.is_shutdown():
counter+=1
# Capture frame-by-frame
#frame=cv2.imread('temp3.jpg')
frame=camObj.get_image()
#print(type(frame))
if frame is None:
print('no image!!!')
continue
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.imwrite('temp3.jpg', frame)
break
try:
# 2D image points
ret, corners = cv2.findChessboardCorners(frame, (8,9))
corners=corners.reshape(-1,2)#undefied number of rows
if not ret:
print('\nPlease, locate well the calibration target!!!')
continue
except Exception as ex:
print('\nStatus of findChessboardCorners: {}'.format(ret))
print('Please, locate well the calibration target!!!')
print(ex)
print('-------------------------------------------------')
continue
# Extrinsic calibration!!!
axis_imgpts,corners,ret,rvec_matrix,translation_vector,rotation_vector= locate_target_orientation(frame,ret, corners)
# print information about rotation and translation for the camera and world frame
print_information(rotation_vector,translation_vector,rvec_matrix)
#draw and display lines and text on the image
draw_show_on_image(frame,axis_imgpts,corners,ret)
# get transform matrix from rotation and translation of the camera frame relative to the world frame
mat=data_to_transform(rvec_matrix.T,-np.dot(rvec_matrix.T, translation_vector))
# get the pose of the camera frame relative to the world frame
pose=transform_to_pose(mat)
# publish pose of the camera frame
pub_pose.publish(pose)
# publish transform for the following coordinate frames: target, camera and world
publish_transforms(br)
# we should expect to go through the loop 10 times per second
rate.sleep()
print('\ncounter:',counter,'\n')
# When everything done, release the capture
cv2.destroyAllWindows()
if __name__ == '__main__':
camObj=camera()
main()
| [
"cesarsinchiguano@hotmail.es"
] | cesarsinchiguano@hotmail.es |
0e0a01939fd310d7aafb5cccf0c79513c9cf03ab | a7bc6a7c45c58ac08e295b77d4a19769bfd97463 | /NMT/Transformers_NMT/process_data.py | 98081e75fabdfaea0b0488852ea04fb70cffe3ca | [] | no_license | pnageshkar/NLP_pytorch_project | cc2a3fb8a41e7d2d8a794561e1b9a971b36f2cfa | 57d2ac65a0cf21dca43a9329f6c25e6a23854e15 | refs/heads/master | 2022-11-14T14:59:03.099067 | 2020-07-09T09:05:10 | 2020-07-09T09:05:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,056 | py | """
@file : process_data.py
@author: xiaolu
@time : 2019-12-26
"""
'''
主要功能:
1. 对中文文本和英文文本建立词典 然后存到vocab.pkl
2. 将训练集和验证集转化为id序列 然后存到data.pkl
'''
import os
import pickle
from collections import Counter
import jieba
import nltk
from tqdm import tqdm
from config import Config
from utils import normalizeString, encode_text
def build_vocab(token, word2idx, idx2char):
if token not in word2idx:
next_index = len(word2idx)
word2idx[token] = next_index
idx2char[next_index] = token
def process(file, lang='zh'):
'''
建立词表
:param file:
:param lang:
:return:
'''
print('processing {}...'.format(file))
with open(file, 'r', encoding='utf-8') as f:
data = f.readlines()
word_freq = Counter()
lengths = []
for line in tqdm(data):
sentence = line.strip()
if lang == 'en':
# 若是英文 转小写 然后切分
sentence_en = sentence.lower()
tokens = [normalizeString(s) for s in nltk.word_tokenize(sentence_en)] # 得到token然后再清洗
word_freq.update(list(tokens))
vocab_size = Config.n_src_vocab # 是由超参数给出的
else:
# 若是中文 使用jieba进行分词
seg_list = jieba.cut(sentence.strip())
tokens = list(seg_list)
word_freq.update(list(tokens))
vocab_size = Config.n_tgt_vocab
lengths.append(len(tokens)) # 得到每个句子的真实长度
words = word_freq.most_common(vocab_size - 4) # vocab_size 统计出词频最高的这么多个词
word_map = {k[0]: v + 4 for v, k in enumerate(words)} # 词->id
word_map['<pad>'] = 0
word_map['<sos>'] = 1
word_map['<eos>'] = 2
word_map['<unk>'] = 3
print(len(word_map))
print(words[:100])
word2idx = word_map
idx2char = {v: k for k, v in word2idx.items()}
return word2idx, idx2char
def get_data(in_file, out_file):
'''
加载语料
:param in_file: 中文数据集路径
:param out_file: 英文数据集路径
:return:
'''
print('getting data {}->{}...'.format(in_file, out_file))
with open(in_file, 'r', encoding='utf-8') as file:
in_lines = file.readlines()
with open(out_file, 'r', encoding='utf-8') as file:
out_lines = file.readlines()
samples = []
for i in tqdm(range(len(in_lines))):
sentence_zh = in_lines[i].strip()
tokens = jieba.cut(sentence_zh.strip())
in_data = encode_text(src_char2idx, tokens) # encode_text(src_char2idx, tokens) 将语料转为id序列
sentence_en = out_lines[i].strip().lower()
tokens = [normalizeString(s.strip()) for s in nltk.word_tokenize(sentence_en)] # 将英文单词预处理
out_data = [Config.sos_id] + encode_text(tgt_char2idx, tokens) + [Config.eos_id] # 转为id 并加上开始和结束标志
# 这里的maxlen_in=50 和 maxlen_out=100 也是有超参数给出的
if len(in_data) < Config.maxlen_in and len(out_data) < Config.maxlen_out and Config.unk_id not in in_data and Config.unk_id not in out_data:
samples.append({'in': in_data, 'out': out_data})
return samples
if __name__ == '__main__':
# 加载词表 没有的话 我们建立词表
if os.path.isfile(Config.vocab_file):
with open(Config.vocab_file, 'rb') as file:
data = pickle.load(file)
src_char2idx = data['dict']['src_char2idx']
src_idx2char = data['dict']['src_idx2char']
tgt_char2idx = data['dict']['tgt_char2idx']
tgt_idx2char = data['dict']['tgt_idx2char']
else:
src_char2idx, src_idx2char = process(Config.train_translation_zh_filename, lang='zh')
tgt_char2idx, tgt_idx2char = process(Config.train_translation_en_filename, lang='en')
print("输入文本字典的大小:", len(src_char2idx))
print("输出文本字典的大小:", len(tgt_char2idx))
data = {
'dict': {
'src_char2idx': src_char2idx,
'src_idx2char': src_idx2char,
'tgt_char2idx': tgt_char2idx,
'tgt_idx2char': tgt_idx2char
}
}
with open(Config.vocab_file, 'wb') as file:
pickle.dump(data, file)
# 加载训练集和验证集
train = get_data(Config.train_translation_zh_filename, Config.train_translation_en_filename)
valid = get_data(Config.valid_translation_zh_filename, Config.valid_translation_en_filename)
data = {
'train': train,
'valid': valid
}
# 这里面存的数据: 中文已映射成对应得id保存, 英文也已映射成id 并且加了其实标志和结束标志.他们都没有进行padding 只是有一个最大长度
print('num_train: ' + str(len(train)))
print('num_valid: ' + str(len(valid)))
with open(Config.data_file, 'wb') as file:
pickle.dump(data, file)
| [
"lu.xiao@tcl.com"
] | lu.xiao@tcl.com |
ea1091477d699b137e6ab824d5bb4743d7cd9fe0 | 18f672d3239d199770756ebb8000f6544b5093fb | /stock/migrations/0002_product_name_short.py | ea5dd79e743fe0fc8e7ce026a967bf1d7ae275ab | [
"Apache-2.0"
] | permissive | pkimber/old-stock-migrated-to-gitlab | a8cc4adca8c90fe9fff134ff5fd31b37e914d3db | e712dd19684cdc2028bfea42c373c19ab3aab152 | refs/heads/master | 2021-06-15T10:58:03.884840 | 2017-04-13T15:23:00 | 2017-04-13T15:23:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-30 12:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stock', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='name_short',
field=models.CharField(blank=True, max_length=100),
),
]
| [
"code@pkimber.net"
] | code@pkimber.net |
d8e6164c4728fb4b27177e08f4714d8ae4094824 | 10123c03954bfd57e6b9ee0acbe93e61c11dc6d0 | /Permutations.py | 01368d7730607adce93caeb2b458812778a12f72 | [] | no_license | ramchinta/python | 4a720d27fd137d32d83326338ad1748c17cd5998 | e82305a822ea200086a0978a29c18ab65a3b18fb | refs/heads/master | 2020-09-02T06:49:30.365372 | 2020-05-09T08:22:57 | 2020-05-09T08:22:57 | 219,160,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | '''Given a collection of distinct integers, return all possible permutations.
Example:
Input: [1,2,3]
Output:
[
[1,2,3],
[1,3,2],
[2,1,3],
[2,3,1],
[3,1,2],
[3,2,1]
]'''
class Solution:
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
def backtrack(first=0):
# if all integers are used up
if first == n:
output.append(nums[:])
for i in range(first, n):
# place i-th integer first
# in the current permutation
nums[first], nums[i] = nums[i], nums[first]
# use next integers to complete the permutations
backtrack(first + 1)
# backtrack
nums[first], nums[i] = nums[i], nums[first]
n = len(nums)
output = []
backtrack()
return output
print(Solution().permute([1,2,3]))
#[[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 2, 1], [3, 1, 2]] | [
"mithrachinta@gmail.com"
] | mithrachinta@gmail.com |
9ea9c27397cba8d779b2f23ebe41b720d863300d | 632efa9e1991bc632aaba4783e05c942afc77712 | /tests/py/test_goal.py | e180004421eec2fb754be4f7d4fe2dadeaa2aba3 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | astiob/liberapay.com | 82e2785b17a104197c483bf5dd1c1cc85cc1cb26 | 29842f7aeaaaca99f4427ff43ebdefd41223ff9c | refs/heads/master | 2020-04-05T23:39:24.843313 | 2016-04-19T13:02:07 | 2016-04-19T13:02:07 | 56,925,959 | 0 | 1 | null | 2016-04-23T15:22:00 | 2016-04-23T15:22:00 | null | UTF-8 | Python | false | false | 2,309 | py | from __future__ import print_function, unicode_literals
from decimal import Decimal
from liberapay.testing import Harness
class Tests(Harness):
def setUp(self):
self.alice = self.make_participant('alice')
def change_goal(self, goal, goal_custom="", auth_as="alice"):
return self.client.PxST(
"/alice/goal",
{'goal': goal, 'goal_custom': goal_custom},
auth_as=self.alice if auth_as == 'alice' else auth_as
)
def test_changing_to_minus_1_asks_confirmation(self):
r = self.client.PxST('/alice/goal', {'goal': '-1'}, auth_as=self.alice)
assert "Warning: Doing this will remove all the tips you are currently receiving." in r.text
def test_wonky_custom_amounts_are_standardized(self):
self.change_goal("custom", ",100,100.00001")
alice = self.alice.from_id(self.alice.id)
assert alice.goal == 100100
def test_anonymous_gets_403(self):
response = self.change_goal("100.00", auth_as=None)
assert response.code == 403, response.code
def test_invalid_is_400(self):
response = self.change_goal("cheese")
assert response.code == 400, response.code
def test_invalid_custom_amount_is_400(self):
response = self.change_goal("custom", "cheese")
assert response.code == 400, response.code
def test_change_goal(self):
self.change_goal("custom", "100")
self.change_goal("0")
self.change_goal("custom", "1,100.00")
self.change_goal("null", "")
self.change_goal("custom", "400")
actual = self.db.one("SELECT goal FROM participants")
assert actual == Decimal("400.00")
actual = self.db.all("""
SELECT payload
FROM events
WHERE type = 'set_goal'
ORDER BY ts DESC
""")
assert actual == ['400', None, '1100.00', '0', '100']
def test_team_member_can_change_team_goal(self):
team = self.make_participant('team', kind='group')
team.add_member(self.alice)
r = self.client.PxST(
'/team/goal',
{'goal': 'custom', 'goal_custom': '99.99'},
auth_as=self.alice
)
assert r.code == 302
assert team.refetch().goal == Decimal('99.99')
| [
"changaco@changaco.oy.lc"
] | changaco@changaco.oy.lc |
f596c523a1306f6053d5fbe033879f5d0178fc5c | 3c5c4c4fb296d08e9e984c4a60ae4fa147293e9a | /ceres/wallet/wallet_block_store.py | 5893e5e48d9f02ddc7a36cdca3c52cb93e78a186 | [
"Apache-2.0"
] | permissive | signingup/ceres-combineharvester | a8874ab11145e7ba2223b85483b96dea01054ad0 | aad918a03a4a522e0e2f3bac104d19d693d6bf79 | refs/heads/main | 2023-07-25T04:11:13.765471 | 2021-09-09T14:59:48 | 2021-09-09T14:59:48 | 404,918,382 | 1 | 0 | Apache-2.0 | 2021-09-10T01:22:20 | 2021-09-10T01:22:20 | null | UTF-8 | Python | false | false | 12,924 | py | from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
import aiosqlite
from ceres.consensus.block_record import BlockRecord
from ceres.types.blockchain_format.sized_bytes import bytes32
from ceres.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from ceres.types.coin_spend import CoinSpend
from ceres.types.header_block import HeaderBlock
from ceres.util.db_wrapper import DBWrapper
from ceres.util.ints import uint32, uint64
from ceres.util.lru_cache import LRUCache
from ceres.util.streamable import Streamable, streamable
from ceres.wallet.block_record import HeaderBlockRecord
@dataclass(frozen=True)
@streamable
class AdditionalCoinSpends(Streamable):
coin_spends_list: List[CoinSpend]
class WalletBlockStore:
"""
This object handles HeaderBlocks and Blocks stored in DB used by wallet.
"""
db: aiosqlite.Connection
db_wrapper: DBWrapper
block_cache: LRUCache
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
self.db_wrapper = db_wrapper
self.db = db_wrapper.db
await self.db.execute("pragma journal_mode=wal")
await self.db.execute("pragma synchronous=2")
await self.db.execute(
"CREATE TABLE IF NOT EXISTS header_blocks(header_hash text PRIMARY KEY, height int,"
" timestamp int, block blob)"
)
await self.db.execute("CREATE INDEX IF NOT EXISTS header_hash on header_blocks(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS timestamp on header_blocks(timestamp)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on header_blocks(height)")
# Block records
await self.db.execute(
"CREATE TABLE IF NOT EXISTS block_records(header_hash "
"text PRIMARY KEY, prev_hash text, height bigint, weight bigint, total_iters text,"
"block blob, sub_epoch_summary blob, is_peak tinyint)"
)
await self.db.execute(
"CREATE TABLE IF NOT EXISTS additional_coin_spends(header_hash text PRIMARY KEY, spends_list_blob blob)"
)
# Height index so we can look up in order of height for sync purposes
await self.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS hh on block_records(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak)")
await self.db.commit()
self.block_cache = LRUCache(1000)
return self
async def _clear_database(self):
cursor_2 = await self.db.execute("DELETE FROM header_blocks")
await cursor_2.close()
await self.db.commit()
async def add_block_record(
self,
header_block_record: HeaderBlockRecord,
block_record: BlockRecord,
additional_coin_spends: List[CoinSpend],
):
"""
Adds a block record to the database. This block record is assumed to be connected
to the chain, but it may or may not be in the LCA path.
"""
cached = self.block_cache.get(header_block_record.header_hash)
if cached is not None:
# Since write to db can fail, we remove from cache here to avoid potential inconsistency
# Adding to cache only from reading
self.block_cache.put(header_block_record.header_hash, None)
if header_block_record.header.foliage_transaction_block is not None:
timestamp = header_block_record.header.foliage_transaction_block.timestamp
else:
timestamp = uint64(0)
cursor = await self.db.execute(
"INSERT OR REPLACE INTO header_blocks VALUES(?, ?, ?, ?)",
(
header_block_record.header_hash.hex(),
header_block_record.height,
timestamp,
bytes(header_block_record),
),
)
await cursor.close()
cursor_2 = await self.db.execute(
"INSERT OR REPLACE INTO block_records VALUES(?, ?, ?, ?, ?, ?, ?,?)",
(
header_block_record.header.header_hash.hex(),
header_block_record.header.prev_header_hash.hex(),
header_block_record.header.height,
header_block_record.header.weight.to_bytes(128 // 8, "big", signed=False).hex(),
header_block_record.header.total_iters.to_bytes(128 // 8, "big", signed=False).hex(),
bytes(block_record),
None
if block_record.sub_epoch_summary_included is None
else bytes(block_record.sub_epoch_summary_included),
False,
),
)
await cursor_2.close()
if len(additional_coin_spends) > 0:
blob: bytes = bytes(AdditionalCoinSpends(additional_coin_spends))
cursor_3 = await self.db.execute(
"INSERT OR REPLACE INTO additional_coin_spends VALUES(?, ?)",
(header_block_record.header_hash.hex(), blob),
)
await cursor_3.close()
async def get_header_block_at(self, heights: List[uint32]) -> List[HeaderBlock]:
if len(heights) == 0:
return []
heights_db = tuple(heights)
formatted_str = f'SELECT block from header_blocks WHERE height in ({"?," * (len(heights_db) - 1)}?)'
cursor = await self.db.execute(formatted_str, heights_db)
rows = await cursor.fetchall()
await cursor.close()
return [HeaderBlock.from_bytes(row[0]) for row in rows]
async def get_header_block_record(self, header_hash: bytes32) -> Optional[HeaderBlockRecord]:
"""Gets a block record from the database, if present"""
cached = self.block_cache.get(header_hash)
if cached is not None:
return cached
cursor = await self.db.execute("SELECT block from header_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
hbr: HeaderBlockRecord = HeaderBlockRecord.from_bytes(row[0])
self.block_cache.put(hbr.header_hash, hbr)
return hbr
else:
return None
async def get_additional_coin_spends(self, header_hash: bytes32) -> Optional[List[CoinSpend]]:
cursor = await self.db.execute(
"SELECT spends_list_blob from additional_coin_spends WHERE header_hash=?", (header_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
coin_spends: AdditionalCoinSpends = AdditionalCoinSpends.from_bytes(row[0])
return coin_spends.coin_spends_list
else:
return None
async def get_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]:
cursor = await self.db.execute(
"SELECT block from block_records WHERE header_hash=?",
(header_hash.hex(),),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return BlockRecord.from_bytes(row[0])
return None
async def get_block_records(
self,
) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
cursor = await self.db.execute("SELECT header_hash, block, is_peak from block_records")
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
peak: Optional[bytes32] = None
for row in rows:
header_hash_bytes, block_record_bytes, is_peak = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = BlockRecord.from_bytes(block_record_bytes)
if is_peak:
assert peak is None # Sanity check, only one peak
peak = header_hash
return ret, peak
def rollback_cache_block(self, header_hash: bytes32):
self.block_cache.remove(header_hash)
async def set_peak(self, header_hash: bytes32) -> None:
cursor_1 = await self.db.execute("UPDATE block_records SET is_peak=0 WHERE is_peak=1")
await cursor_1.close()
cursor_2 = await self.db.execute(
"UPDATE block_records SET is_peak=1 WHERE header_hash=?",
(header_hash.hex(),),
)
await cursor_2.close()
async def get_block_records_close_to_peak(
self, blocks_n: int
) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
res = await self.db.execute("SELECT header_hash, height from block_records WHERE is_peak = 1")
row = await res.fetchone()
await res.close()
if row is None:
return {}, None
header_hash_bytes, peak_height = row
peak: bytes32 = bytes32(bytes.fromhex(header_hash_bytes))
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {peak_height - blocks_n}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
for row in rows:
header_hash_bytes, block_record_bytes = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = BlockRecord.from_bytes(block_record_bytes)
return ret, peak
async def get_header_blocks_in_range(
self,
start: int,
stop: int,
) -> Dict[bytes32, HeaderBlock]:
formatted_str = f"SELECT header_hash, block from header_blocks WHERE height >= {start} and height <= {stop}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, HeaderBlock] = {}
for row in rows:
header_hash_bytes, block_record_bytes = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = HeaderBlock.from_bytes(block_record_bytes)
return ret
async def get_block_records_in_range(
self,
start: int,
stop: int,
) -> Dict[bytes32, BlockRecord]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {start} and height <= {stop}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
for row in rows:
header_hash_bytes, block_record_bytes = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = BlockRecord.from_bytes(block_record_bytes)
return ret
async def get_peak_heights_dicts(self) -> Tuple[Dict[uint32, bytes32], Dict[uint32, SubEpochSummary]]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
res = await self.db.execute("SELECT header_hash from block_records WHERE is_peak = 1")
row = await res.fetchone()
await res.close()
if row is None:
return {}, {}
peak: bytes32 = bytes.fromhex(row[0])
cursor = await self.db.execute("SELECT header_hash,prev_hash,height,sub_epoch_summary from block_records")
rows = await cursor.fetchall()
await cursor.close()
hash_to_prev_hash: Dict[bytes32, bytes32] = {}
hash_to_height: Dict[bytes32, uint32] = {}
hash_to_summary: Dict[bytes32, SubEpochSummary] = {}
for row in rows:
hash_to_prev_hash[bytes.fromhex(row[0])] = bytes.fromhex(row[1])
hash_to_height[bytes.fromhex(row[0])] = row[2]
if row[3] is not None:
hash_to_summary[bytes.fromhex(row[0])] = SubEpochSummary.from_bytes(row[3])
height_to_hash: Dict[uint32, bytes32] = {}
sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {}
curr_header_hash = peak
curr_height = hash_to_height[curr_header_hash]
while True:
height_to_hash[curr_height] = curr_header_hash
if curr_header_hash in hash_to_summary:
sub_epoch_summaries[curr_height] = hash_to_summary[curr_header_hash]
if curr_height == 0:
break
curr_header_hash = hash_to_prev_hash[curr_header_hash]
curr_height = hash_to_height[curr_header_hash]
return height_to_hash, sub_epoch_summaries
| [
"hulatang_eric@163.com"
] | hulatang_eric@163.com |
086dd62330fcf5b767d3e54b7e8ca44c0db75ec7 | ec1aa6a0217c29301b08c8b9559be1f8a346502b | /correctiv_eurosfueraerzte/admin/__init__.py | e4eabaa7c8211dc838ab3b52a39e8e2ede241a5f | [
"MIT"
] | permissive | correctiv/correctiv-eurosfueraerzte | 091039881130fa6c7a78811cdc9bf00893aa6906 | 291c358d65eccf06034e409d888de56a4545c7b7 | refs/heads/master | 2022-12-16T03:24:41.366597 | 2017-08-16T13:37:33 | 2017-08-16T13:37:33 | 53,336,927 | 5 | 0 | MIT | 2022-11-22T01:53:29 | 2016-03-07T15:39:45 | Python | UTF-8 | Python | false | false | 788 | py | from django.contrib import admin
from ..models import (PharmaCompany, Drug, ObservationalStudy,
PaymentRecipient, PharmaPayment, ZeroDoctor,
ZeroDocSubmission)
from .base import (PharmaCompanyAdmin, DrugAdmin, ObservationalStudyAdmin,
PaymentRecipientAdmin, PharmaPaymentAdmin)
from .zerodocs import ZeroDoctorAdmin, ZeroDocSubmissionAdmin
admin.site.register(PharmaCompany, PharmaCompanyAdmin)
admin.site.register(Drug, DrugAdmin)
admin.site.register(ObservationalStudy, ObservationalStudyAdmin)
admin.site.register(PaymentRecipient, PaymentRecipientAdmin)
admin.site.register(PharmaPayment, PharmaPaymentAdmin)
admin.site.register(ZeroDoctor, ZeroDoctorAdmin)
admin.site.register(ZeroDocSubmission, ZeroDocSubmissionAdmin)
| [
"mail@stefanwehrmeyer.com"
] | mail@stefanwehrmeyer.com |
f7f852b199e0f51ab15fa49c3bcdb5463ef18a76 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /NbZ2cMeEfH3KpQRku_24.py | d4f52eab2e0c8c08f9daf43571dcc4231657dce1 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,541 | py | """
You are given a list of `0`s and `1`s, like the one below:
[0, 1, 0, 0, 0, 1, 1, 1, 0, 1]
# The first element, a 0, and the last element, a 1 are both unhappy.
# The second element, a 1 is unhappy.
# The second-to-last element, a 0 is unhappy.
# All other numbers in this list are happy.
A `1` is **unhappy** if the digit to its left and the digit to its right are
both 0s. A `0` is **unhappy** if the digit to its left and the digit to its
right are both 1s. If a number has only one neighbor, it is **unhappy** if its
only neighbor is different. Otherwise, a number is **happy**.
Write a function that takes in a list of `0`s and `1`s and outputs the
**portion of numbers which are happy**. The total portion of numbers which are
happy can be represented as:
portion of happy 0s = # happy 0s / total # 0s
portion of happy 1s = # happy 1s / total # 1s
portion of happy numbers = (portion of happy 0s + portion of happy 1s) / 2
In the example above, `0.6` is the number of happy numbers.
### Examples
portion_happy([0, 1, 0, 1, 0]) ➞ 0
portion_happy([0, 1, 1, 0]) ➞ 0.5
portion_happy([0, 0, 0, 1, 1]) ➞ 1
portion_happy([1, 0, 0, 1, 1]) ➞ 0.8
### Notes
* Remember: a `0` border number is unhappy if its only neighbor is a `1` and vice versa.
* A list will contain at least two elements.
"""
import re
def portion_happy(n):
n = ''.join(map(str,n))
u = lambda x: len(re.findall("(?<!%s)%s(?!%s)"%(x,x,x),n))
return 1-((u(0)+u(1))/len(n))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
c14bcc28f82bc83a691ba3359d6e052753d8c34e | 7d852b8d7b8a6ad7fc9c39957e1097509d08e607 | /cf/mixin/propertiesdata.py | ac31edf878b91a44748c62b5dc8097973d19b920 | [
"MIT"
] | permissive | AJamesPhillips/cf-python | ca0a7ca8681fe928f069d5809bf067d064265e38 | 4631bc4ba3c0cb51dcd18905116440007e291e6b | refs/heads/master | 2020-09-20T10:04:38.336267 | 2019-11-27T14:07:53 | 2019-11-27T14:07:53 | 224,445,029 | 0 | 0 | MIT | 2019-11-27T14:08:11 | 2019-11-27T14:08:10 | null | UTF-8 | Python | false | false | 133,553 | py | from functools import partial as functools_partial
from numpy import array as numpy_array
from numpy import result_type as numpy_result_type
from numpy import vectorize as numpy_vectorize
from ..cfdatetime import dt
from ..functions import equivalent as cf_equivalent
from ..functions import inspect as cf_inspect
from ..functions import default_netCDF_fillvals
from ..query import Query
from ..timeduration import TimeDuration
from ..units import Units
from ..data import Data
from . import Properties
from ..functions import (_DEPRECATION_ERROR_KWARGS,
_DEPRECATION_ERROR_METHOD,
)
_units_None = Units()
_month_units = ('month', 'months')
_year_units = ('year', 'years', 'yr')
_relational_methods = ('__eq__', '__ne__', '__lt__', '__le__', '__gt__', '__ge__')
class PropertiesData(Properties):
'''Mixin class for a data array with metadata.
'''
_special_properties = ('units',
'calendar')
def __array__(self, *dtype):
'''Returns a numpy array representation of the data.
'''
data = self.get_data(None)
if data is not None:
return data.__array__(*dtype)
raise ValueError("{} has no data".format(self.__class__.__name__))
def __contains__(self, value):
'''Called to implement membership test operators.
x.__contains__(y) <==> y in x
'''
data = self.get_data(None)
if data is None:
return False
return value in data
def __data__(self):
'''Returns a new reference to the data.
Allows the construct to initialize a `Data` object.
:Returns:
`Data`
**Examples:**
>>> f.data
<CF Data(12): [14, ..., 56] km)
>>> cf.Data(f)
<CF Data(12): [14, ..., 56] km)
>>> cf.Data.asdata(f)
<CF Data(12): [14, ..., 56] km)
'''
data =self.get_data(None)
if data is not None:
return data
raise ValueError("{} has no data".format(self.__class__.__name__))
def __setitem__(self, indices, value):
'''Called to implement assignment to x[indices]
x.__setitem__(indices, value) <==> x[indices]
'''
data = self.get_data(None)
if data is None:
raise ValueError("Can't set elements when there is no data")
if isinstance(value, self.__class__):
value = value.data
data[indices] = value
def __add__(self, y):
'''The binary arithmetic operation ``+``
x.__add__(y) <==> x+y
'''
return self._binary_operation(y, '__add__')
def __iadd__(self, y):
'''The augmented arithmetic assignment ``+=``
x.__iadd__(y) <==> x+=y
'''
return self._binary_operation(y, '__iadd__')
def __radd__(self, y):
'''The binary arithmetic operation ``+`` with reflected operands
x.__radd__(y) <==> y+x
'''
return self._binary_operation(y, '__radd__')
def __sub__(self, y):
'''The binary arithmetic operation ``-``
x.__sub__(y) <==> x-y
'''
return self._binary_operation(y, '__sub__')
def __isub__(self, y):
'''The augmented arithmetic assignment ``-=``
x.__isub__(y) <==> x-=y
'''
return self._binary_operation(y, '__isub__')
def __rsub__(self, y):
'''The binary arithmetic operation ``-`` with reflected operands
x.__rsub__(y) <==> y-x
'''
return self._binary_operation(y, '__rsub__')
def __mul__(self, y):
'''The binary arithmetic operation ``*``
x.__mul__(y) <==> x*y
'''
return self._binary_operation(y, '__mul__')
def __imul__(self, y):
'''The augmented arithmetic assignment ``*=``
x.__imul__(y) <==> x*=y
'''
return self._binary_operation(y, '__imul__')
def __rmul__(self, y):
'''The binary arithmetic operation ``*`` with reflected operands
x.__rmul__(y) <==> y*x
'''
return self._binary_operation(y, '__rmul__')
def __div__(self, y):
'''The binary arithmetic operation ``/``
x.__div__(y) <==> x/y
'''
return self._binary_operation(y, '__div__')
def __idiv__(self, y):
'''The augmented arithmetic assignment ``/=``
x.__idiv__(y) <==> x/=y
'''
return self._binary_operation(y, '__idiv__')
def __rdiv__(self, y):
'''The binary arithmetic operation ``/`` with reflected operands
x.__rdiv__(y) <==> y/x
'''
return self._binary_operation(y, '__rdiv__')
def __floordiv__(self, y):
'''The binary arithmetic operation ``//``
x.__floordiv__(y) <==> x//y
'''
return self._binary_operation(y, '__floordiv__')
def __ifloordiv__(self, y):
'''The augmented arithmetic assignment ``//=``
x.__ifloordiv__(y) <==> x//=y
'''
return self._binary_operation(y, '__ifloordiv__')
def __rfloordiv__(self, y):
'''The binary arithmetic operation ``//`` with reflected operands
x.__rfloordiv__(y) <==> y//x
'''
return self._binary_operation(y, '__rfloordiv__')
def __truediv__(self, y):
'''The binary arithmetic operation ``/`` (true division)
x.__truediv__(y) <==> x/y
'''
return self._binary_operation(y, '__truediv__')
def __itruediv__(self, y):
'''The augmented arithmetic assignment ``/=`` (true division)
x.__itruediv__(y) <==> x/=y
'''
return self._binary_operation(y, '__itruediv__')
def __rtruediv__(self, y):
'''The binary arithmetic operation ``/`` (true division) with
reflected operands
x.__rtruediv__(y) <==> y/x
'''
return self._binary_operation(y, '__rtruediv__')
def __pow__(self, y, modulo=None):
'''The binary arithmetic operations ``**`` and ``pow``
x.__pow__(y) <==> x**y
'''
if modulo is not None:
raise NotImplementedError("3-argument power not supported for %r" %
self.__class__.__name__)
return self._binary_operation(y, '__pow__')
def __ipow__(self, y, modulo=None):
'''The augmented arithmetic assignment ``**=``
x.__ipow__(y) <==> x**=y
'''
if modulo is not None:
raise NotImplementedError("3-argument power not supported for %r" %
self.__class__.__name__)
return self._binary_operation(y, '__ipow__')
def __rpow__(self, y, modulo=None):
'''The binary arithmetic operations ``**`` and ``pow`` with reflected
operands
x.__rpow__(y) <==> y**x
'''
if modulo is not None:
raise NotImplementedError("3-argument power not supported for %r" %
self.__class__.__name__)
return self._binary_operation(y, '__rpow__')
def __mod__(self, y):
'''The binary arithmetic operation ``%``
x.__mod__(y) <==> x % y
.. versionadded:: 1.0
'''
return self._binary_operation(y, '__mod__')
def __imod__(self, y):
'''The binary arithmetic operation ``%=``
x.__imod__(y) <==> x %= y
.. versionadded:: 1.0
'''
return self._binary_operation(y, '__imod__')
def __rmod__(self, y):
'''The binary arithmetic operation ``%`` with reflected operands
x.__rmod__(y) <==> y % x
.. versionadded:: 1.0
'''
return self._binary_operation(y, '__rmod__')
def __eq__(self, y):
'''The rich comparison operator ``==``
x.__eq__(y) <==> x==y
'''
return self._binary_operation(y, '__eq__')
def __ne__(self, y):
'''The rich comparison operator ``!=``
x.__ne__(y) <==> x!=y
'''
return self._binary_operation(y, '__ne__')
def __ge__(self, y):
'''The rich comparison operator ``>=``
x.__ge__(y) <==> x>=y
'''
return self._binary_operation(y, '__ge__')
def __gt__(self, y):
'''The rich comparison operator ``>``
x.__gt__(y) <==> x>y
'''
return self._binary_operation(y, '__gt__')
def __le__(self, y):
'''The rich comparison operator ``<=``
x.__le__(y) <==> x<=y
'''
return self._binary_operation(y, '__le__')
def __lt__(self, y):
'''The rich comparison operator ``<``
x.__lt__(y) <==> x<y
'''
return self._binary_operation(y, '__lt__')
def __and__(self, y):
'''The binary bitwise operation ``&``
x.__and__(y) <==> x&y
'''
return self._binary_operation(y, '__and__')
def __iand__(self, y):
'''The augmented bitwise assignment ``&=``
x.__iand__(y) <==> x&=y
'''
return self._binary_operation(y, '__iand__')
def __rand__(self, y):
'''The binary bitwise operation ``&`` with reflected operands
x.__rand__(y) <==> y&x
'''
return self._binary_operation(y, '__rand__')
def __or__(self, y):
'''The binary bitwise operation ``|``
x.__or__(y) <==> x|y
'''
return self._binary_operation(y, '__or__')
def __ior__(self, y):
'''The augmented bitwise assignment ``|=``
x.__ior__(y) <==> x|=y
'''
return self._binary_operation(y, '__ior__')
def __ror__(self, y):
'''The binary bitwise operation ``|`` with reflected operands
x.__ror__(y) <==> y|x
'''
return self._binary_operation(y, '__ror__')
def __xor__(self, y):
'''The binary bitwise operation ``^``
x.__xor__(y) <==> x^y
'''
return self._binary_operation(y, '__xor__')
def __ixor__(self, y):
'''The augmented bitwise assignment ``^=``
x.__ixor__(y) <==> x^=y
'''
return self._binary_operation(y, '__ixor__')
def __rxor__(self, y):
'''The binary bitwise operation ``^`` with reflected operands
x.__rxor__(y) <==> y^x
'''
return self._binary_operation(y, '__rxor__')
def __lshift__(self, y):
'''The binary bitwise operation ``<<``
x.__lshift__(y) <==> x<<y
'''
return self._binary_operation(y, '__lshift__')
def __ilshift__(self, y):
'''The augmented bitwise assignment ``<<=``
x.__ilshift__(y) <==> x<<=y
'''
return self._binary_operation(y, '__ilshift__')
def __rlshift__(self, y):
'''The binary bitwise operation ``<<`` with reflected operands
x.__rlshift__(y) <==> y<<x
'''
return self._binary_operation(y, '__rlshift__')
def __rshift__(self, y):
'''The binary bitwise operation ``>>``
x.__lshift__(y) <==> x>>y
'''
return self._binary_operation(y, '__rshift__')
def __irshift__(self, y):
'''The augmented bitwise assignment ``>>=``
x.__irshift__(y) <==> x>>=y
'''
return self._binary_operation(y, '__irshift__')
def __rrshift__(self, y):
'''The binary bitwise operation ``>>`` with reflected operands
x.__rrshift__(y) <==> y>>x
'''
return self._binary_operation(y, '__rrshift__')
def __abs__(self):
'''The unary arithmetic operation ``abs``
x.__abs__() <==> abs(x)
'''
return self._unary_operation('__abs__')
def __neg__(self):
'''The unary arithmetic operation ``-``
x.__neg__() <==> -x
'''
return self._unary_operation('__neg__')
def __invert__(self):
'''The unary bitwise operation ``~``
x.__invert__() <==> ~x
'''
return self._unary_operation('__invert__')
def __pos__(self):
'''The unary arithmetic operation ``+``
x.__pos__() <==> +x
'''
return self._unary_operation('__pos__')
# ----------------------------------------------------------------
# Private methods
# ----------------------------------------------------------------
def _binary_operation(self, y, method):
'''Implement binary arithmetic and comparison operations.
The operations act on the constructs data with the numpy
broadcasting rules.
It is intended to be called by the binary arithmetic and
comparison methods, such as `!__sub__` and `!__lt__`.
:Parameters:
operation: `str`
The binary arithmetic or comparison method name (such as
``'__imul__'`` or ``'__ge__'``).
:Returns:
A new construct, or the same construct if the operation
was in-place.
**Examples:**
>>> w = u._binary_operation(u, '__add__')
>>> w = u._binary_operation(v, '__lt__')
>>> u._binary_operation(2, '__imul__')
>>> u._binary_operation(v, '__idiv__')
'''
data = self.get_data(None)
if data is None:
raise ValueError(
"Can't apply {} to a {} object with no data: {!r}".format(
method, self.__class__.__name__, self))
inplace = method[2] == 'i'
units = self.Units
sn = self.get_property('standard_name', None)
ln = self.get_property('long_name', None)
try:
other_sn = y.get_property('standard_name', None)
other_ln = y.get_property('long_name', None)
except AttributeError:
other_sn = None
other_ln = None
if isinstance(y, self.__class__):
y = y.data
if not inplace:
new = self.copy() #data=False) TODO
new_data = data._binary_operation(y, method)
new.set_data(new_data, copy=False)
else:
new = self
new.data._binary_operation(y, method)
if method in _relational_methods:
# Booleans have no units
new.override_units(Units(), inplace=True)
# ------------------------------------------------------------
# Remove misleading identities
# ------------------------------------------------------------
if sn != other_sn:
if sn is not None and other_sn is not None:
new.del_property('standard_name', None)
new.del_property('long_name', None)
elif other_sn is not None:
new.set_property('standard_name', other_sn)
if other_ln is None:
new.del_property('long_name', None)
else:
new.set_property('long_name', other_ln)
elif ln is None and other_ln is not None:
new.set_property('long_name', other_ln)
new_units = new.Units
if (method in _relational_methods or
not units.equivalent(new_units) and
not (units.isreftime and new_units.isreftime)):
new.del_property('standard_name', None)
new.del_property('long_name', None)
return new
# def _ooo(self):
# '''
# '''
# units = self.Units
# sn = self.get_property('standard_name', None)
# ln = self.get_property('long_name', None)
#
# try:
# other_sn = y.get_property('standard_name', None)
# other_ln = y.get_property('long_name', None)
# except AttributeError:
# other_sn = None
# other_ln = None
#
# if isinstance(y, self.__class__):
# y = y.data
#
# if not inplace:
# new = self.copy() #data=False) TODO
# new_data = data._binary_operation(y, method)
# new.set_data(new_data, copy=False)
# else:
# new = self
# new.data._binary_operation(y, method)
#
#
# if sn != other_sn:
# if sn is not None and other_sn is not None:
# new.del_property('standard_name', None)
# new.del_property('long_name', None)
# elif other_sn is not None:
# new.set_property('standard_name', other_sn)
# if other_ln is None:
# new.del_property('long_name', None)
# else:
# new.set_property('long_name', other_ln)
# elif ln is None and other_ln is not None:
# new.set_property('long_name', other_ln)
#
# new_units = new.Units
# if (not units.equivalent(new_units) and
# not (units.isreftime and new_units.isreftime)):
# new.del_property('standard_name', None)
# new.del_property('long_name', None)
# def _change_axis_names(self, dim_name_map):
# '''Change the axis names of the Data object.
#
#:Parameters:
#
# dim_name_map: `dict`
#
#:Returns:
#
# `None`
#
#**Examples:**
#
#>>> f._change_axis_names({'0': 'dim1', '1': 'dim2'})
#
# '''
# data = self.get_data(None)
# if data is not None:
# data.change_axis_names(dim_name_map)
def _conform_for_assignment(self, other):
'''TODO
'''
return other
def _equivalent_data(self, other, atol=None, rtol=None,
verbose=False):
'''TODO
Two real numbers ``x`` and ``y`` are considered equal if
``|x-y|<=atol+rtol|y|``, where ``atol`` (the tolerance on absolute
differences) and ``rtol`` (the tolerance on relative differences) are
positive, typically very small numbers. See the *atol* and *rtol*
parameters.
:Parameters:
transpose: `dict`, optional
atol: `float`, optional
The tolerance on absolute differences between real
numbers. The default value is set by the `ATOL` function.
rtol: `float`, optional
The tolerance on relative differences between real
numbers. The default value is set by the `RTOL` function.
:Returns:
`bool`
Whether or not the two variables have equivalent data arrays.
'''
if self.has_data() != other.has_data():
if verbose:
print("{}: Only one construct has data: {!r}, {!r}".format(
self.__class__.__name__, self, other))
return False
if not self.has_data():
return True
data0 = self.get_data()
data1 = other.get_data()
if data0.shape != data1.shape:
if verbose:
print("{}: Data have different shapes: {}, {}".format(
self.__class__.__name__, data0.shape, data1.shape))
return False
if not data0.Units.equivalent(data1.Units):
if verbose:
print("{}: Data have non-equivalent units: {!r}, {!r}".format(
self.__class__.__name__, data0.Units, data1.Units))
return False
# if atol is None:
# atol = ATOL()
# if rtol is None:
# rtol = RTOL()
if not data0.allclose(data1, rtol=rtol, atol=atol):
if verbose:
print("{}: Data have non-equivalent values: {!r}, {!r}".format(
self.__class__.__name__, data0, data1))
return False
return True
def _parse_axes(self, axes):
'''TODO
'''
if axes is None:
return axes
if isinstance(axes, int):
axes = (axes,)
ndim = self.ndim
return [(i + ndim if i < 0 else i) for i in axes]
def _parse_match(self, match):
'''Called by `match`
:Parameters:
match:
As for the *match* parameter of `match` method.
:Returns:
`list`
'''
if not match:
return ()
if isinstance(match, (str, dict, Query)):
match = (match,)
matches = []
for m in match:
if isinstance(m, str):
if '=' in m:
# CF property (string-valued)
m = m.split('=')
matches.append({m[0]: '='.join(m[1:])})
else:
# Identity (string-valued) or python attribute
# (string-valued) or axis type
matches.append({None: m})
elif isinstance(m, dict):
# Dictionary
matches.append(m)
else:
# Identity (not string-valued, e.g. cf.Query).
matches.append({None: m})
#--- End: for
return matches
def __query_set__(self, values):
'''TODO
'''
new = self.copy()
new.set_data(self.data.__query_set__(values), copy=False)
return new
# def _query_contain(self, value):
# '''TODO#
#
# '''
# new = self.copy()
# new.set_data(self.data._query_contain(value), copy=False)
# return new
# def _query_contains(self, value):
# '''TODO
#
# '''
# new = self.copy()
# new.set_data(self.data._query_contains(value), copy=False)
# return new
def __query_wi__(self, value):
'''TODO
'''
new = self.copy()
new.set_data(self.data.__query_wi__(value), copy=False)
return new
def __query_wo__(self, value):
'''TODO
1
'''
new = self.copy()
new.set_data(self.data.__query_wo__(value), copy=False)
return new
def _unary_operation(self, method):
'''Implement unary arithmetic operations on the data array.
:Parameters:
method: `str`
The unary arithmetic method name (such as "__abs__").
:Returns:
TODO
**Examples:**
>>> print(v.array)
[1 2 -3 -4 -5]
>>> w = v._unary_operation('__abs__')
>>> print(w.array)
[1 2 3 4 5]
>>> w = v.__abs__()
>>> print(w.array)
[1 2 3 4 5]
>>> w = abs(v)
>>> print(w.array)
[1 2 3 4 5]
'''
data = self.get_data(None)
if data is None:
raise ValueError("Can't apply {} to a {} with no data".format(
method, self.__class__.__name__))
new = self.copy()
new_data = data._unary_operation(method)
new.set_data(new_data, copy=False)
return new
def _YMDhms(self, attr):
'''TODO
'''
data = self.get_data(None)
if data is None:
raise ValueError(
"ERROR: Can't get {}s when there is no data array".format(attr))
out = self.copy() # data=False)
out.set_data(getattr(data, attr), copy=False)
out.del_property('standard_name', None)
out.set_property('long_name', attr)
out.override_units(Units(), inplace=True)
return out
# def _hmmm(self, method):
# data = self.get_data(None)
# if data is not None:
# out = self.copy() #data=False)
# out.set_data(getattr(data, method)(), copy=False)
# out.del_property('standard_name', None)
# out.set_property('long_name', method)
# return out
# #--- End: if
#
# raise ValueError(
# "ERROR: Can't get {0} when there is no data array".format(method))
# ----------------------------------------------------------------
# Attributes
# ----------------------------------------------------------------
@property
def T(self):
'''`True` if and only if the data are coordinates for a CF 'T' axis.
CF 'T' axis coordinates are defined by having units of reference
time
.. seealso:: `X`, `Y`, `Z`
**Examples:**
>>> c.T
False
'''
return False
@property
def X(self):
'''Always False.
.. seealso:: `T`, `Y`, `Z`
**Examples:**
>>> print(f.X)
False
'''
return False
@property
def Y(self):
'''Always False.
.. seealso:: `T`, `X`, `Z`
**Examples:**
>>> print(f.Y)
False
'''
return False
@property
def Z(self):
'''Always False.
.. seealso:: `T`, `X`, `Y`
**Examples:**
>>> print(f.Z)
False
'''
return False
@property
def binary_mask(self):
'''A binary (0 and 1) missing data mask of the data array.
The binary mask's data array comprises dimensionless 32-bit
integers and has 0 where the data array has missing data and 1
otherwise.
**Examples:**
>>> print(f.mask.array)
[[ True False True False]]
>>> b = f.binary_mask()
>>> print(b.array)
[[0 1 0 1]]
'''
out = type(self)()
out.set_propoerty('long_name', 'binary_mask')
out.set_data(self.data.binary_mask(), copy=False)
return out
@property
def data(self):
'''The `Data` object containing the data array.
* ``f.data = x`` is equivalent to ``f.set_data(x, copy=False)``
* ``x = f.data`` is equivalent to ``x = f.get_data()``
* ``del f.data`` is equivalent to ``f.del_data()``
* ``hasattr(f, 'data')`` is equivalent to ``f.has_data()``
.. seealso:: `del_data`, `get_data`, `has_data`, `set_data`
'''
return self.get_data()
@data.setter
def data(self, value):
self.set_data(value, set_axes=False, copy=False)
@data.deleter
def data(self):
return self.del_data()
@property
def reference_datetime(self):
'''The reference date-time of units of elapsed time.
**Examples**
>>> f.units
'days since 2000-1-1'
>>> f.reference_datetime
cftime.DatetimeNoLeap(2000-01-01 00:00:00)
'''
units = self.Units
if not units.isreftime:
raise AttributeError(
"{0} doesn't have attribute 'reference_datetime'".format(
self.__class__.__name__))
return dt(units.reftime, calendar=units._calendar)
@reference_datetime.setter
def reference_datetime(self, value):
units = self.Units
if not units.isreftime:
raise AttributeError(
"Can't set 'reference_datetime' for non reference date-time units".format(
self.__class__.__name__))
units = units.units.split(' since ')
try:
self.units = "{0} since {1}".format(units[0], value)
except (ValueError, TypeError):
raise ValueError(
"Can't override reference date-time {0!r} with {1!r}".format(
units[1], value))
@property
def Units(self):
'''The `cf.Units` object containing the units of the data array.
Stores the units and calendar CF properties in an internally
consistent manner. These are mirrored by the `units` and
`calendar` CF properties respectively.
**Examples:**
>>> f.Units
<Units: K>
>>> f.Units
<Units: days since 2014-1-1 calendar=noleap>
'''
data = self.get_data(None)
if data is not None:
return data.Units
try:
return self._custom['Units']
except KeyError:
self._custom['Units'] = _units_None
return _units_None
@Units.setter
def Units(self, value):
data = self.get_data(None)
if data is not None:
data.Units = value
else:
self._custom['Units'] = value
# units = getattr(value, 'units', None)
# if units is not None:
# self.set_property('units', units)
#
# calendar = getattr(value, 'calendar', None)
# if calendar is not None:
# self.set_property('calendar', calendar)
@Units.deleter
def Units(self):
raise AttributeError(
"Can't delete {} attribute 'Units'. Use the override_units method.".format(
self.__class__.__name__))
@property
def year(self):
'''The year of each date-time data array element.
Only applicable to data arrays with reference time units.
.. seealso:: `month`, `day`, `hour`, `minute`, `second`
**Examples:**
>>> print(f.datetime_array)
[0450-11-15 00:00:00 0450-12-16 12:30:00 0451-01-16 12:00:45]
>>> print(f.year.array)
[450 450 451]
'''
return self._YMDhms('year')
@property
def month(self):
'''The month of each date-time data array element.
Only applicable to data arrays with reference time units.
.. seealso:: `year`, `day`, `hour`, `minute`, `second`
**Examples:**
>>> print(f.datetime_array)
[0450-11-15 00:00:00 0450-12-16 12:30:00 0451-01-16 12:00:45]
>>> print(f.month.array)
[11 12 1]
'''
return self._YMDhms('month')
@property
def day(self):
'''The day of each date-time data array element.
Only applicable to data arrays with reference time units.
.. seealso:: `year`, `month`, `hour`, `minute`, `second`
**Examples:**
>>> print(f.datetime_array)
[0450-11-15 00:00:00 0450-12-16 12:30:00 0451-01-16 12:00:45]
>>> print(f.day.array)
[15 16 16]
'''
return self._YMDhms('day')
@property
def hour(self):
'''The hour of each date-time data array element.
Only applicable to data arrays with reference time units.
.. seealso:: `year`, `month`, `day`, `minute`, `second`
**Examples:**
>>> print(f.datetime_array)
[0450-11-15 00:00:00 0450-12-16 12:30:00 0451-01-16 12:00:45]
>>> print(f.hour.array)
[ 0 12 12]
'''
return self._YMDhms('hour')
@property
def minute(self):
'''The minute of each date-time data array element.
Only applicable to data arrays with reference time units.
.. seealso:: `year`, `month`, `day`, `hour`, `second`
**Examples:**
>>> print(f.datetime_array)
[0450-11-15 00:00:00 0450-12-16 12:30:00 0451-01-16 12:00:45]
>>> print(f.minute.array)
[ 0 30 0]
'''
return self._YMDhms('minute')
@property
def second(self):
'''The second of each date-time data array element.
Only applicable to data arrays with reference time units.
.. seealso:: `year`, `month`, `day`, `hour`, `minute`
**Examples:**
>>> print(f.datetime_array)
[0450-11-15 00:00:00 0450-12-16 12:30:00 0451-01-16 12:00:45]
>>> print(f.second.array)
[ 0 0 45]
'''
return self._YMDhms('second')
@property
def mask(self):
'''The mask of the data array.
Values of True indicate masked elements.
.. seealso:: `binary_mask`
**Examples:**
>>> f.shape
(12, 73, 96)
>>> m = f.mask
>>> m.long_name
'mask'
>>> m.shape
(12, 73, 96)
>>> m.dtype
dtype('bool')
>>> m.data
<CF Data(12, 73, 96): [[[True, ..., False]]] >
'''
if not self.has_data():
raise ValueError(
"ERROR: Can't get mask when there is no data array")
out = self.copy()
out.set_data(self.data.mask, copy=False)
out.override_units(Units(), inplace=True)
out.clear_properties()
out.set_property('long_name', 'mask')
out.nc_del_variable(default=None)
return out
# ----------------------------------------------------------------
# CF properties
# ----------------------------------------------------------------
@property
def add_offset(self):
'''The add_offset CF property.
If present then this number is *subtracted* from the data prior to
it being written to a file. If both `scale_factor` and
`add_offset` properties are present, the offset is subtracted
before the data are scaled. See
http://cfconventions.org/latest.html for details.
**Examples:**
>>> f.add_offset = -4.0
>>> f.add_offset
-4.0
>>> del f.add_offset
>>> f.set_property('add_offset', 10.5)
>>> f.get_property('add_offset')
10.5
>>> f.del_property('add_offset')
10.5
>>> f.has_property('add_offset')
False
'''
return self.get_property('add_offset', default=AttributeError())
@add_offset.setter
def add_offset(self, value):
self.set_property('add_offset', value)
self.dtype = numpy_result_type(self.dtype, numpy_array(value).dtype)
@add_offset.deleter
def add_offset(self):
self.delprop('add_offset', default=AttributeError())
if not self.has_property('scale_factor'):
del self.dtype
@property
def calendar(self):
'''The calendar CF property.
The calendar used for encoding time data. See
http://cfconventions.org/latest.html for details.
**Examples:**
>>> f.calendar = 'noleap'
>>> f.calendar
'noleap'
>>> del f.calendar
>>> f.set_property('calendar', 'proleptic_gregorian')
>>> f.get_property('calendar')
'proleptic_gregorian'
>>> f.del_property('calendar')
'proleptic_gregorian'
>>> f.has_property('calendar')
False
'''
value = getattr(self.Units, 'calendar', None)
if value is None:
raise AttributeError(
"{} doesn't have CF property 'calendar'".format(
self.__class__.__name__))
return value
@calendar.setter
def calendar(self, value):
self.Units = Units(getattr(self, 'units', None), value)
@calendar.deleter
def calendar(self):
if getattr(self, 'calendar', None) is None:
raise AttributeError(
"Can't delete non-existent {} CF property 'calendar'".format(
self.__class__.__name__))
self.Units = Units(getattr(self, 'units', None))
@property
def _FillValue(self):
'''The _FillValue CF property.
A value used to represent missing or undefined data.
Note that this property is primarily for writing data to disk and
is independent of the missing data mask. It may, however, get used
when unmasking data array elements. See
http://cfconventions.org/latest.html for details.
The recommended way of retrieving the missing data value is with
the `fill_value` method.
.. seealso:: `fill_value`, `missing_value`,
`cf.default_netCDF_fillvals`
**Examples:**
>>> f._FillValue = -1.0e30
>>> f._FillValue
-1e+30
>>> del f._FillValue
>>> f.set_property('_FillValue', -1.0e30)
>>> f.get_property('_FillValue')
-1e+30
>>> f.del_property('_FillValue')
-1e30
>>> f.del_property('_FillValue', None)
None
'''
return self.get_property('_FillValue', default=AttributeError())
@_FillValue.setter
def _FillValue(self, value):
self.set_property('_FillValue', value)
@_FillValue.deleter
def _FillValue(self):
self.del_property('_FillValue', default=AttributeError())
@property
def missing_value(self):
'''The missing_value CF property.
A value used to represent missing or undefined data (deprecated by
the netCDF user guide). See http://cfconventions.org/latest.html
for details.
Note that this attribute is used primarily for writing data to
disk and is independent of the missing data mask. It may, however,
be used when unmasking data array elements.
The recommended way of retrieving the missing data value is with
the `fill_value` method.
.. seealso:: `_FillValue`, `fill_value`,
`cf.default_netCDF_fillvals`
**Examples:**
>>> f.missing_value = 1.0e30
>>> f.missing_value
1e+30
>>> del f.missing_value
>>> f.set_property('missing_value', -1.0e30)
>>> f.get_property('missing_value')
-1e+30
>>> f.del_property('missing_value')
-1e30
>>> f.del_property('missing_value', None)
None
'''
return self.get_property('missing_value', default=AttributeError())
@missing_value.setter
def missing_value(self, value):
self.set_property('missing_value', value)
@missing_value.deleter
def missing_value(self):
self.del_property('missing_value', default=AttributeError())
@property
def scale_factor(self):
'''The scale_factor CF property.
If present then the data are *divided* by this factor prior to it
being written to a file. If both `scale_factor` and `add_offset`
properties are present, the offset is subtracted before the data
are scaled. See http://cfconventions.org/latest.html for details.
**Examples:**
>>> f.scale_factor = 10.0
>>> f.scale_factor
10.0
>>> del f.scale_factor
>>> f.set_property('scale_factor', 10.0)
>>> f.get_property('scale_factor')
10.0
>>> f.del_property('scale_factor')
10
>>> f.has_property('scale_factor')
False
'''
return self.get_property('scale_factor', default=AttributeError())
@scale_factor.setter
def scale_factor(self, value): self.set_property('scale_factor', value)
@scale_factor.deleter
def scale_factor(self): self.del_property('scale_factor', default=AttributeError())
@property
def units(self):
'''The units CF property.
The units of the data. The value of the `units` property is a
string that can be recognized by UNIDATA's Udunits package
(http://www.unidata.ucar.edu/software/udunits). See
http://cfconventions.org/latest.html for details.
**Examples:**
>>> f.units = 'K'
>>> f.units
'K'
>>> del f.units
>>> f.set_property('units', 'm.s-1')
>>> f.get_property('units')
'm.s-1'
>>> f.has_property('units')
True
'''
value = getattr(self.Units, 'units', None)
if value is None:
raise AttributeError("{} doesn't have CF property 'units'".format(
self.__class__.__name__))
return value
@units.setter
def units(self, value):
self.Units = Units(value, getattr(self, 'calendar', None))
@units.deleter
def units(self):
if getattr(self, 'units', None) is None:
raise AttributeError(
"Can't delete non-existent {} CF property 'units'".format(
self.__class__.__name__))
self.Units = Units(None, getattr(self, 'calendar', None))
# ----------------------------------------------------------------
# Methods
# ----------------------------------------------------------------
def mask_invalid(self, inplace=False, i=False):
'''Mask the array where invalid values occur.
Note that:
* Invalid values are Nan or inf
* Invalid values in the results of arithmetic operations only
occur if the raising of `FloatingPointError` exceptions has been
suppressed by `cf.data.seterr`.
* If the raising of `FloatingPointError` exceptions has been
allowed then invalid values in the results of arithmetic
operations it is possible for them to be automatically converted
to masked values, depending on the setting of
`cf.data.mask_fpe`. In this case, such automatic conversion
might be faster than calling `mask_invalid`.
.. seealso:: `cf.data.mask_fpe`, `cf.data.seterr`
:Parameters:
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
:Returns:
TODO
**Examples:**
>>> print(f.array)
[ 0. 1.]
>>> print(g.array)
[ 1. 2.]
>>> old = cf.data.seterr('ignore')
>>> h = g/f
>>> print(h.array)
[ inf 2.]
>>> h.mask_invalid(inplace=True)
>>> print(h.array)
[-- 2.]
>>> h = g**12345
>>> print(h.array)
[ 1. inf]
>>> h.mask_invalid(inplace=True)
>>> print(h.array)
[1. --]
>>> old = cf.data.seterr('raise')
>>> old = cf.data.mask_fpe(True)
>>> print((g/f).array)
[ -- 2]
>>> print((g**12345).array)
[1. -- ]
'''
if i:
_DEPRECATION_ERROR_KWARGS(self, 'mask_invalid', i=True) # pragma: no cover
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is not None:
data.mask_invalid(inplace=True)
if inplace:
v = None
return v
def max(self):
'''The maximum of the data array.
g
.. seealso:: `mean`, `mid_range`, `min`, `range`, `sample_size`,
`sd`, `sum`, `var`
:Returns:
`Data`
The maximum of the data array.
**Examples:**
>>> f.data
<CF Data(12, 64, 128): [[[236.512756, ..., 256.93371]]] K>
>>> f.max()
<CF Data(): 311.343780 K>
'''
data = self.get_data(None)
if data is not None:
return data.max(squeeze=True)
raise ValueError(
"ERROR: Can't get the maximum when there is no data array")
def mean(self):
'''The unweighted mean the data array.
.. seealso:: `max`, `mid_range`, `min`, `range`, `sample_size`, `sd`,
`sum`, `var`
:Returns:
`Data`
The unweighted mean the data array.
**Examples:**
>>> f.data
<CF Data(12, 73, 96): [[[236.512756348, ..., 256.93371582]]] K>
>>> f.mean()
<CF Data(): 280.192227593 K>
'''
data = self.get_data(None)
if data is not None:
return data.mean(squeeze=True)
raise ValueError(
"ERROR: Can't get the mean when there is no data array")
def mid_range(self):
'''The unweighted average of the maximum and minimum of the data
array.
.. seealso:: `max`, `mean`, `min`, `range`, `sample_size`, `sd`,
`sum`, `var`
:Returns:
`Data`
The unweighted average of the maximum and minimum of the
data array.
**Examples:**
>>> f.data
<CF Data(12, 73, 96): [[[236.512756348, ..., 256.93371582]]] K>
>>> f.mid_range()
<CF Data(): 255.08618927 K>
'''
data = self.get_data(None)
if data is not None:
return data.mid_range(squeeze=True)
raise ValueError(
"ERROR: Can't get the mid-range when there is no data array")
def min(self):
'''The minimum of the data array.
.. seealso:: `max`, `mean`, `mid_range`, `range`, `sample_size`,
`sd`, `sum`, `var`
:Returns:
`Data`
The minimum of the data array.
**Examples:**
>>> f.data
<CF Data(12, 73, 96): [[[236.512756348, ..., 256.93371582]]] K>
>>> f.min()
<CF Data(): 198.828598022 K>
'''
data = self.get_data(None)
if data is not None:
return data.min(squeeze=True)
raise ValueError(
"ERROR: Can't get the minimum when there is no data array")
def range(self):
'''The absolute difference between the maximum and minimum of the data
array.
.. seealso:: `max`, `mean`, `mid_range`, `min`, `sample_size`,
`sd`, `sum`, `var`
:Returns:
`Data`
The absolute difference between the maximum and minimum of
the data array.
**Examples:**
>>> f.data
<CF Data(12, 73, 96): [[[236.512756348, ..., 256.93371582]]] K>
>>> f.range()
<CF Data(): 112.515182495 K>
'''
data = self.get_data(None)
if data is not None:
return data.range(squeeze=True)
raise ValueError(
"ERROR: Can't get the range when there is no data array")
def sample_size(self):
'''The number of non-missing data elements in the data array.
.. seealso:: `count`, `max`, `mean`, `mid_range`, `min`, `range`,
`sd`, `sum`, `var`
:Returns:
`Data`
The number of non-missing data elements in the data array.
**Examples:**
>>> f.data
<CF Data(12, 73, 96): [[[236.512756348, ..., 256.93371582]]] K>
>>> f.sample_size()
<CF Data(): 98304.0>
'''
data = self.get_data(None)
if data is not None:
return data.sample_size(squeeze=True)
raise ValueError(
"ERROR: Can't get the sample size when there is no data array")
def sd(self):
'''The unweighted sample standard deviation of the data array.
.. seealso:: `max`, `mean`, `mid_range`, `min`, `range`,
`sample_size`, `sum`, `var`
:Returns:
`Data`
The unweighted standard deviation of the data array.
**Examples:**
>>> f.data
<CF Data(12, 73, 96): [[[236.512756348, ..., 256.93371582]]] K>
>>> f.sd()
<CF Data(): 22.685052535 K>
'''
data = self.get_data(None)
if data is not None:
return data.sd(squeeze=True, ddof=0)
raise ValueError(
"ERROR: Can't get the standard deviation when there is no data array")
def sum(self):
'''The sum of the data array.
.. seealso:: `max`, `mean`, `mid_range`, `min`, `range`,
`sample_size`, `sd`, `var`
:Returns:
`Data`
The sum of the data array.
**Examples:**
>>> f.data
<CF Data(12, 73, 96): [[[236.512756348, ..., 256.93371582]]] K>
>>> f.sum()
<CF Data(): 27544016.7413 K>
'''
data = self.get_data(None)
if data is not None:
return data.sum(squeeze=True)
raise ValueError(
"ERROR: Can't get the sum when there is no data array")
def swapaxes(self, axis0, axis1, inplace=False):
'''Interchange two axes of an array.
.. seealso:: `flatten`, `flip`, `insert_dimension`, `squeeze`,
`transpose`
:Parameters:
axis0, axis1: `int`, `int`
Select the axes to swap. Each axis is identified by its
original integer position.
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
The construct with data with swapped axis positions. If
the operation was in-place then `None` is returned.
**Examples:**
>>> f.shape
(1, 2, 3)
>>> f.swapaxes(1, 0).shape
(2, 1, 3)
>>> f.swapaxes(0, -1).shape
(3, 2, 1)
>>> f.swapaxes(1, 1).shape
(1, 2, 3)
>>> f.swapaxes(-1, -1).shape
(1, 2, 3)
'''
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is not None:
data.swapaxes(axis0, axis1, inplace=True)
if inplace:
v = None
return v
def var(self):
'''The unweighted sample variance of the data array.
.. seealso:: `max`, `mean`, `mid_range`, `min`, `range`,
`sample_size`, `sd`, `sum`
:Returns:
`Data`
The unweighted variance of the data array.
**Examples:**
>>> f.data
<CF Data(12, 73, 96): [[[236.512756348, ..., 256.93371582]]] K>
>>> f.var()
<CF Data(): 514.611608515 K2>
'''
data = self.get_data(None)
if data is None:
raise ValueError(
"ERROR: Can't get the variance when there is no data array")
return data.var(squeeze=True, ddof=0)
@property
def subspace(self):
'''Return a new variable whose data is subspaced.
This attribute may be indexed to select a subspace from dimension
index values.
**Subspacing by indexing**
Subspacing by dimension indices uses an extended Python slicing
syntax, which is similar numpy array indexing. There are two
extensions to the numpy indexing functionality:
TODO
* Size 1 dimensions are never removed.
An integer index i takes the i-th element but does not reduce
the rank of the output array by one.
* When advanced indexing is used on more than one dimension, the
advanced indices work independently.
When more than one dimension's slice is a 1-d boolean array or
1-d sequence of integers, then these indices work independently
along each dimension (similar to the way vector subscripts work
in Fortran), rather than by their elements.
**Examples:**
'''
return Subspace(self)
@property
def shape(self):
'''A tuple of the data array's dimension sizes.
.. seealso:: `data`, `hasdata`, `ndim`, `size`
**Examples:**
>>> f.shape
(73, 96)
>>> f.ndim
2
>>> f.ndim
0
>>> f.shape
()
>>> f.hasdata
True
>>> len(f.shape) == f.dnim
True
>>> reduce(lambda x, y: x*y, f.shape, 1) == f.size
True
'''
return self.data.shape
@property
def ndim(self):
'''The number of dimensions in the data array.
.. seealso:: `data`, `hasdata`, `isscalar`, `shape`
**Examples:**
>>> f.hasdata
True
>>> f.shape
(73, 96)
>>> f.ndim
2
>>> f.shape
()
>>> f.ndim
0
'''
return self.data.ndim
@property
def size(self):
'''The number of elements in the data array.
.. seealso:: `data`, `hasdata`, `ndim`, `shape`
**Examples:**
>>> f.shape
(73, 96)
>>> f.size
7008
>>> f.shape
()
>>> f.ndim
0
>>> f.size
1
>>> f.shape
(1, 1, 1)
>>> f.ndim
3
>>> f.size
1
>>> f.hasdata
True
>>> f.size == reduce(lambda x, y: x*y, f.shape, 1)
True
'''
return self.data.size
@property
def datetime_array(self):
'''An independent numpy array of date-time objects.
Only applicable for data with reference time units.
If the calendar has not been set then the CF default calendar will
be used and the units will be updated accordingly.
.. seealso:: `array`, `varray`
**Examples:**
>>> f.units
'days since 2000-01-01'
>>> print(f.array)
[ 0 31 60 91]
>>> print(f.datetime_array)
[cftime.DatetimeGregorian(2000-01-01 00:00:00)
cftime.DatetimeGregorian(2000-02-01 00:00:00)
cftime.DatetimeGregorian(2000-03-01 00:00:00)
cftime.DatetimeGregorian(2000-04-01 00:00:00)]
'''
data = self.get_data(None)
if data is None:
raise AttributeError(
"{} has no data array".format(self.__class__.__name__))
return data.datetime_array
@property
def dtype(self):
'''The `numpy` data type of the data array.
By default this is the data type with the smallest size and
smallest scalar kind to which all sub-arrays of the master data
array may be safely cast without loss of information. For example,
if the sub-arrays have data types 'int64' and 'float32' then the
master data array's data type will be 'float64'; or if the
sub-arrays have data types 'int64' and 'int32' then the master
data array's data type will be 'int64'.
Setting the data type to a `numpy.dtype` object, or any object
convertible to a `numpy.dtype` object, will cause the master data
array elements to be recast to the specified type at the time that
they are next accessed, and not before. This does not immediately
change the master data array elements, so, for example,
reinstating the original data type prior to data access results in
no loss of information.
Deleting the data type forces the default behaviour. Note that if
the data type of any sub-arrays has changed after `dtype` has been
set (which could occur if the data array is accessed) then the
reinstated default data type may be different to the data type
prior to `dtype` being set.
**Examples:**
>>> f.dtype
dtype('float64')
>>> type(f.dtype)
<type 'numpy.dtype'>
>>> print(f.array)
[0.5 1.5 2.5]
>>> import numpy
>>> f.dtype = numpy.dtype(int)
>>> print(f.array)
[0 1 2]
>>> f.dtype = bool
>>> print(f.array)
[False True True]
>>> f.dtype = 'float64'
>>> print(f.array)
[ 0. 1. 1.]
>>> print(f.array)
[0.5 1.5 2.5]
>>> f.dtype = int
>>> f.dtype = bool
>>> f.dtype = float
>>> print(f.array)
[ 0.5 1.5 2.5]
'''
data = self.get_data(None)
if data is None:
raise AttributeError("{} doesn't have attribute 'dtype'".format(
self.__class__.__name__))
return data.dtype
@dtype.setter
def dtype(self, value):
# DCH - allow dtype to be set before data c.f. Units
data = self.get_data(None)
if data is not None:
data.dtype = value
@dtype.deleter
def dtype(self):
data = self.get_data(None)
if data is not None:
del data.dtype
@property
def hardmask(self):
'''Whether the mask is hard (True) or soft (False).
When the mask is hard, masked elements of the data array can not
be unmasked by assignment, but unmasked elements may be still be
masked.
When the mask is soft, masked entries of the data array may be
unmasked by assignment and unmasked entries may be masked.
By default, the mask is hard.
.. seealso:: `where`, `subspace`, `__setitem__`
**Examples:**
>>> f.hardmask = False
>>> f.hardmask
False
'''
data = self.get_data(None)
if data is None:
raise AttributeError(
"{} doesn't have attribute 'hardmask'".format(self.__class__.__name__))
return data.hardmask
@hardmask.setter
def hardmask(self, value):
data = self.get_data(None)
if data is None:
raise AttributeError(
"{} doesn't have attribute 'hardmask'".format(self.__class__.__name__))
data.hardmask = value
@hardmask.deleter
def hardmask(self):
raise AttributeError(
"Won't delete {} attribute 'hardmask'".format(self.__class__.__name__))
@property
def array(self):
'''A numpy array deep copy of the data array.
Changing the returned numpy array does not change the data array.
.. seealso:: `data`, `datetime_array`, `varray`
**Examples:**
>>> f.data
<CF Data(5): [0, ... 4] kg m-1 s-2>
>>> a = f.array
>>> type(a)
<type 'numpy.ndarray'>
>>> print(a)
[0 1 2 3 4]
>>> a[0] = 999
>>> print(a)
[999 1 2 3 4]
>>> print(f.array)
[0 1 2 3 4]
>>> f.data
<CF Data(5): [0, ... 4] kg m-1 s-2>
'''
data = self.get_data(None)
if data is None:
raise AttributeError("{} has no data array".format(self.__class__.__name__))
return data.array
@property
def varray(self):
'''A numpy array view of the data array.
Changing the elements of the returned view changes the data array.
.. seealso:: `array`, `data`, `datetime_array`
**Examples:**
>>> f.data
<CF Data(5): [0, ... 4] kg m-1 s-2>
>>> a = f.array
>>> type(a)
<type 'numpy.ndarray'>
>>> print(a)
[0 1 2 3 4]
>>> a[0] = 999
>>> print(a)
[999 1 2 3 4]
>>> print(f.array)
[999 1 2 3 4]
>>> f.data
<CF Data(5): [999, ... 4] kg m-1 s-2>
'''
data = self.get_data(None)
if data is None:
raise AttributeError("{} has no data array".format(self.__class__.__name__))
return data.varray
@property
def isauxiliary(self):
'''True if the variable is an auxiliary coordinate object.
.. seealso:: `isdimension`, `isdomainancillary`,
`isfieldancillary`, `ismeasure`
**Examples:**
>>> f.isauxiliary
False
'''
return False
@property
def isdimension(self):
'''True if the variable is a dimension coordinate object.
.. seealso:: `isauxiliary`, `isdomainancillary`,
`isfieldancillary`, `ismeasure`
**Examples:**
>>> f.isdimension
False
'''
return False
@property
def isdomainancillary(self):
'''True if the variable is a domain ancillary object.
.. seealso:: `isauxiliary`, `isdimension`, `isfieldancillary`,
`ismeasure`
**Examples:**
>>> f.isdomainancillary
False
'''
return False
@property
def isfieldancillary(self):
'''True if the variable is a field ancillary object.
.. seealso:: `isauxiliary`, `isdimension`, `isdomainancillary`,
`ismeasure`
**Examples:**
>>> f.isfieldancillary
False
'''
return False
@property
def ismeasure(self):
'''True if the variable is a cell measure object.
.. seealso:: `isauxiliary`, `isdimension`, `isdomainancillary`,
`isfieldancillary`
**Examples:**
>>> f.ismeasure
False
'''
return False
@property
def isscalar(self):
'''True if the data array is scalar.
.. seealso:: `has_data`, `ndim`
**Examples:**
>>> f.ndim
0
>>> f.isscalar
True
>>> f.ndim >= 1
True
>>> f.isscalar
False
>>> f.hasdata
False
>>> f.isscalar
False
'''
data = self.get_data(None)
if data is None:
return False
return data.isscalar
def ceil(self, inplace=False, i=False):
'''The ceiling of the data, element-wise.
The ceiling of ``x`` is the smallest integer ``n``, such that
``n>=x``.
.. versionadded:: 1.0
.. seealso:: `floor`, `rint`, `trunc`
:Parameters:
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
:Returns:
The construct with ceilinged of data. If the operation was
in-place then `None` is returned.
**Examples:**
>>> print(f.array)
[-1.9 -1.5 -1.1 -1. 0. 1. 1.1 1.5 1.9]
>>> print(f.ceil().array)
[-1. -1. -1. -1. 0. 1. 2. 2. 2.]
>>> f.ceil(inplace=True)
>>> print(f.array)
[-1. -1. -1. -1. 0. 1. 2. 2. 2.]
'''
if i:
_DEPRECATION_ERROR_KWARGS(self, 'ceil', i=True) # pragma: no cover
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is not None:
data.ceil(inplace=True)
if inplace:
v = None
return v
def chunk(self, chunksize=None):
'''Partition the data array.
:Parameters:
chunksize: `int`
:Returns:
`None`
'''
data = self.get_data(None)
if data is not None:
data.chunk(chunksize)
def clip(self, a_min, a_max, units=None, inplace=False, i=False):
'''Limit the values in the data.
Given an interval, values outside the interval are clipped to the
interval edges. For example, if an interval of ``[0, 1]`` is
specified, values smaller than 0 become 0, and values larger than
1 become 1.
:Parameters:
a_min:
Minimum value. If `None`, clipping is not performed on
lower interval edge. Not more than one of `a_min` and
`a_max` may be `None`.
a_max:
Maximum value. If `None`, clipping is not performed on
upper interval edge. Not more than one of `a_min` and
`a_max` may be `None`.
units: `str` or `Units`
Specify the units of *a_min* and *a_max*. By default the
same units as the data are assumed.
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
:Returns:
The construct with clipped data. If the operation was
in-place then `None` is returned.
**Examples:**
>>> g = f.clip(-90, 90)
>>> g = f.clip(-90, 90, 'degrees_north')
'''
if i:
_DEPRECATION_ERROR_KWARGS(self, 'clip', i=True) # pragma: no cover
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is not None:
data.clip(a_min, a_max, units=units, inplace=True)
if inplace:
v = None
return v
def close(self):
'''Close all files referenced by the construct.
Note that a closed file will be automatically reopened if its
contents are subsequently required.
.. seealso:: `files`
:Returns:
`None`
**Examples:**
>>> f.close()
'''
data = self.get_data(None)
if data is not None:
data.close()
@classmethod
def concatenate(cls, variables, axis=0, _preserve=True):
'''Join a sequence of variables together.
:Parameters:
variables: sequence of constructs.
axis: `int`, optional
:Returns:
TODO
'''
variable0 = variables[0]
if len(variables) == 1:
return variable0.copy()
out = variable0.copy() #data=False)
data = Data.concatenate([v.get_data() for v in variables],
axis=axis,
_preserve=_preserve)
out.set_data(data, copy=False)
return out
def cos(self, bounds=True, inplace=False, i=False):
'''Take the trigonometric cosine of the data, element-wise.
Units are accounted for in the calculation, so that the the cosine
of 90 degrees_east is 0.0, as is the cosine of 1.57079632
radians. If the units are not equivalent to radians (such as
Kelvin) then they are treated as if they were radians.
The output units are '1' (nondimensionsal).
The "standard_name" and "long_name" properties are removed from
the result.
.. seealso:: `sin`, `tan`
:Parameters:
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
:Returns:
The construct with the cosine of data values. If the
operation was in-place then `None` is returned.
**Examples:**
>>> f.Units
<Units: degrees_east>
>>> print(f.array)
[[-90 0 90 --]]
>>> f.cos()
>>> f.Units
<Units: 1>
>>> print(f.array)
[[0.0 1.0 0.0 --]]
>>> f.Units
<Units: m s-1>
>>> print(f.array)
[[1 2 3 --]]
>>> f.cos()
>>> f.Units
<Units: 1>
>>> print(f.array)
[[0.540302305868 -0.416146836547 -0.9899924966 --]]
'''
if i:
_DEPRECATION_ERROR_KWARGS(self, 'cos', i=True) # pragma: no cover
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is not None:
data.cos(inplace=True)
# Remove misleading identities
v.del_property('standard_name', None)
v.del_property('long_name', None)
if inplace:
v = None
return v
def count(self):
'''Count the non-masked elements of the data.
:Returns:
`int`
The number of non-masked elements.
**Examples:**
>>> n = f.count()
'''
data = self.get_data(None)
if data is None:
raise AttributeError(
"Can't count when there are data")
return data.count()
def count_masked(self):
'''Count the masked elements of the data.
:Returns:
`int`
The number of masked elements.
**Examples:**
>>> n = f.count_masked()
'''
data = self.get_data(None)
if data is None:
raise AttributeError(
"Can't count masked when there are data")
return data.count_masked()
def cyclic(self, axes=None, iscyclic=True):
'''Set the cyclicity of an axis.
.. seealso:: `iscyclic`
:Parameters:
axes: (sequence of) `int`
The axes to be set. Each axis is identified by its integer
position. By default no axes are set.
iscyclic: `bool`, optional
If False then the axis is set to be non-cyclic. By default
the axis is set to be cyclic.
:Returns:
`!set`
**Examples:**
>>> f.cyclic()
set()
>>> f.cyclic(1)
set()
>>> f.cyclic()
{1} TODO
'''
data = self.get_data(None)
if data is None:
return set()
return data.cyclic(axes, iscyclic)
def datum(self, *index):
'''Return an element of the data array as a standard Python scalar.
The first and last elements are always returned with
``f.datum(0)`` and ``f.datum(-1)`` respectively, even if the data
array is a scalar array or has two or more dimensions.
:Parameters:
index: optional
Specify which element to return. When no positional
arguments are provided, the method only works for data
arrays with one element (but any number of dimensions),
and the single element is returned. If positional
arguments are given then they must be one of the
following:
* An integer. This argument is interpreted as a flat
index into the array, specifying which element to copy
and return.
*Parameter example:*
If the data aray shape is ``(2, 3, 6)`` then:
* ``f.datum(0)`` is equivalent to ``f.datum(0, 0, 0)``.
* ``f.datum(-1)`` is equivalent to ``f.datum(1, 2, 5)``.
* ``f.datum(16)`` is equivalent to ``f.datum(0, 2, 4)``.
If *index* is ``0`` or ``-1`` then the first or last
data array element respecitively will be returned,
even if the data array is a scalar array or has two or
more dimensions. ..
* Two or more integers. These arguments are interpreted
as a multidimensionsal index to the array. There must
be the same number of integers as data array
dimensions. ..
* A tuple of integers. This argument is interpreted as a
multidimensionsal index to the array. There must be
the same number of integers as data array dimensions.
*Example:*
``f.datum((0, 2, 4))`` is equivalent to ``f.datum(0,
2, 4)``; and ``f.datum(())`` is equivalent to
``f.datum()``.
:Returns:
A copy of the specified element of the array as a suitable
Python scalar.
**Examples:**
>>> print(f.array)
2
>>> f.datum()
2
>>> 2 == f.datum(0) == f.datum(-1) == f.datum(())
True
>>> print(f.array)
[[2]]
>>> 2 == f.datum() == f.datum(0) == f.datum(-1)
True
>>> 2 == f.datum(0, 0) == f.datum((-1, -1)) == f.datum(-1, 0)
True
>>> print(f.array)
[[4 -- 6]
[1 2 3]]
>>> f.datum(0)
4
>>> f.datum(-1)
3
>>> f.datum(1)
masked
>>> f.datum(4)
2
>>> f.datum(-2)
2
>>> f.datum(0, 0)
4
>>> f.datum(-2, -1)
6
>>> f.datum(1, 2)
3
>>> f.datum((0, 2))
6
'''
data = self.get_data(None)
if data is None:
raise ValueError(
"ERROR: Can't return an element when there is no data array")
return data.datum(*index)
def equals(self, other, rtol=None, atol=None, verbose=False,
ignore_data_type=False, ignore_fill_value=False,
ignore_properties=(), ignore_compression=False,
ignore_type=False):
'''Whether two instances are the same.
Equality is strict by default. This means that:
* the same descriptive properties must be present, with the same
values and data types, and vector-valued properties must also
have same the size and be element-wise equal (see the
*ignore_properties* and *ignore_data_type* parameters), and
..
* if there are data arrays then they must have same shape and data
type, the same missing data mask, and be element-wise equal (see
the *ignore_data_type* parameter).
Two real numbers ``x`` and ``y`` are considered equal if
``|x-y|<=atol+rtol|y|``, where ``atol`` (the tolerance on absolute
differences) and ``rtol`` (the tolerance on relative differences)
are positive, typically very small numbers. See the *atol* and
*rtol* parameters.
If data arrays are compressed then the compression type and the
underlying compressed arrays must be the same, as well as the
arrays in their uncompressed forms. See the *ignore_compression*
parameter.
Any type of object may be tested but, in general, equality is only
possible with another object of the same type, or a subclass of
one. See the *ignore_type* parameter.
NetCDF elements, such as netCDF variable and dimension names, do
not constitute part of the CF data model and so are not checked.
.. versionadded:: 1.7.0
:Parameters:
other:
The object to compare for equality.
atol: float, optional
The tolerance on absolute differences between real
numbers. The default value is set by the `cf.ATOL`
function.
rtol: float, optional
The tolerance on relative differences between real
numbers. The default value is set by the `cf.RTOL`
function.
ignore_fill_value: `bool`, optional
If True then the "_FillValue" and "missing_value"
properties are omitted from the comparison.
verbose: `bool`, optional
If True then print information about differences that lead
to inequality.
ignore_properties: sequence of `str`, optional
The names of properties to omit from the comparison.
ignore_data_type: `bool`, optional
If True then ignore the data types in all numerical
comparisons. By default different numerical data types
imply inequality, regardless of whether the elements are
within the tolerance for equality.
ignore_compression: `bool`, optional
If True then any compression applied to the underlying
arrays is ignored and only the uncompressed arrays are
tested for equality. By default the compression type and,
if appliciable, the underlying compressed arrays must be
the same, as well as the arrays in their uncompressed
forms.
ignore_type: `bool`, optional
Any type of object may be tested but, in general, equality
is only possible with another object of the same type, or
a subclass of one. If *ignore_type* is True then equality
is possible for any object with a compatible API.
:Returns:
`bool`
Whether the two instances are equal.
**Examples:**
>>> f.equals(f)
True
>>> f.equals(f.copy())
True
>>> f.equals('a string')
False
>>> f.equals(f - 1)
False
'''
# Check that each instance has the same Units
try:
if not self.Units.equals(other.Units):
if verbose:
print("{0}: Different Units: {1!r} != {2!r}".format(
self.__class__.__name__, self.Units, other.Units))
return False
except AttributeError:
pass
ignore_properties = tuple(ignore_properties) + self._special_properties
return super().equals(other, rtol=rtol, atol=atol,
verbose=verbose, ignore_data_type=ignore_data_type,
ignore_fill_value=ignore_fill_value,
ignore_properties=ignore_properties,
ignore_type=ignore_type)
def equivalent(self, other, rtol=None, atol=None, traceback=False):
'''True if two constructs are equal, False otherwise.
Two real numbers ``x`` and ``y`` are considered equal if
``|x-y|<=atol+rtol|y|``, where ``atol`` (the tolerance on absolute
differences) and ``rtol`` (the tolerance on relative differences)
are positive, typically very small numbers. See the *atol* and
*rtol* parameters.
:Parameters:
other:
The object to compare for equality.
atol: `float`, optional
The tolerance on absolute differences between real
numbers. The default value is set by the `ATOL` function.
rtol: `float`, optional
The tolerance on relative differences between real
numbers. The default value is set by the `RTOL` function.
'''
if self is other:
return True
# Check that each instance is the same type
if type(self) != type(other):
print("{}: Different types: {}, {}".format(
self.__class__.__name__,
self.__class__.__name__,
other.__class__.__name__))
return False
identity0 = self.identity()
identity1 = other.identity()
if identity0 is None or identity1 is None or identity0 != identity1:
# add traceback
return False
# ------------------------------------------------------------
# Check the special attributes
# ------------------------------------------------------------
self_special = self._private['special_attributes']
other_special = other._private['special_attributes']
if set(self_special) != set(other_special):
if traceback:
print("%s: Different attributes: %s" %
(self.__class__.__name__,
set(self_special).symmetric_difference(other_special)))
return False
for attr, x in self_special.iteritems():
y = other_special[attr]
result = cf_equivalent(x, y, rtol=rtol, atol=atol,
traceback=traceback)
if not result:
if traceback:
print("{}: Different {} attributes: {!r}, {!r}".format(
self.__class__.__name__, attr, x, y))
return False
#--- End: for
# ------------------------------------------------------------
# Check the data
# ------------------------------------------------------------
if not self._equivalent_data(other, rtol=rtol, atol=atol,
traceback=traceback):
# add traceback
return False
return True
def convert_reference_time(self, units=None,
calendar_months=False,
calendar_years=False, inplace=False,
i=False):
'''Convert reference time data values to have new units.
Conversion is done by decoding the reference times to date-time
objects and then re-encoding them for the new units.
Any conversions are possible, but this method is primarily for
conversions which require a change in the date-times originally
encoded. For example, use this method to reinterpret data values
in units of "months" since a reference time to data values in
"calendar months" since a reference time. This is often necessary
when units of "calendar months" were intended but encoded as
"months", which have special definition. See the note and examples
below for more details.
For conversions which do not require a change in the date-times
implied by the data values, this method will be considerably
slower than a simple reassignment of the units. For example, if
the original units are ``'days since 2000-12-1'`` then ``c.Units =
cf.Units('days since 1901-1-1')`` will give the same result and be
considerably faster than ``c.convert_reference_time(cf.Units('days
since 1901-1-1'))``.
.. note:: It is recommended that the units "year" and "month" be
used with caution, as explained in the following excerpt
from the CF conventions: "The Udunits package defines a
year to be exactly 365.242198781 days (the interval
between 2 successive passages of the sun through vernal
equinox). It is not a calendar year. Udunits includes
the following definitions for years: a common_year is
365 days, a leap_year is 366 days, a Julian_year is
365.25 days, and a Gregorian_year is 365.2425 days. For
similar reasons the unit ``month``, which is defined to
be exactly year/12, should also be used with caution.
:Parameters:
units: `Units`, optional
The reference time units to convert to. By default the
units days since the original reference time in the the
original calendar.
*Parameter example:*
If the original units are ``'months since 2000-1-1'`` in
the Gregorian calendar then the default units to convert
to are ``'days since 2000-1-1'`` in the Gregorian
calendar.
calendar_months: `bool`, optional
If True then treat units of ``'months'`` as if they were
calendar months (in whichever calendar is originally
specified), rather than a 12th of the interval between 2
successive passages of the sun through vernal equinox
(i.e. 365.242198781/12 days).
calendar_years: `bool`, optional
If True then treat units of ``'years'`` as if they were
calendar years (in whichever calendar is originally
specified), rather than the interval between 2 successive
passages of the sun through vernal equinox
(i.e. 365.242198781 days).
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
:Returns:
The construct with converted reference time data values.
**Examples:**
>>> print(f.array)
[1 2 3 4]
>>> f.Units
<Units: months since 2000-1-1>
>>> print(f.datetime_array)
[datetime.datetime(2000, 1, 31, 10, 29, 3, 831197) TODO
datetime.datetime(2000, 3, 1, 20, 58, 7, 662441)
datetime.datetime(2000, 4, 1, 7, 27, 11, 493645)
datetime.datetime(2000, 5, 1, 17, 56, 15, 324889)]
>>> f.convert_reference_time(calendar_months=True, inplace=True)
>>> print(f.datetime_array)
[datetime.datetime(2000, 2, 1, 0, 0) TODOx
datetime.datetime(2000, 3, 1, 0, 0)
datetime.datetime(2000, 4, 1, 0, 0)
datetime.datetime(2000, 5, 1, 0, 0)]
>>> print(f.array)
[ 31. 60. 91. 121.]
>>> f.Units
<Units: days since 2000-1-1>
'''
def _convert_reftime_units(value, units, reftime): #, calendar):
'''sads
:Parameters:
value: number
units: `Units`
:Returns:
`datetime.datetime` or `cf.Datetime`
'''
t = TimeDuration(value, units=units)
if value > 0:
return t.interval(reftime, end=False)[1]
else:
return t.interval(reftime, end=True)[0]
#--- End: def
if i:
_DEPRECATION_ERROR_KWARGS(
self, 'convert_reference_time', i=True) # pragma: no cover
if not self.Units.isreftime:
raise ValueError(
"{} must have reference time units, not {!r}".format(
self.__class__.__name__, self.Units))
if inplace:
v = self
else:
v = self.copy()
units0 = self.Units
if units is None:
# By default, set the target units to "days since
# <reference time of self.Units>,
# calendar=<self.calendar>"
units = Units('days since '+units0.units.split(' since ')[1],
calendar=units0._calendar)
elif not getattr(units, 'isreftime', False):
raise ValueError(
"New units must be reference time units, not {0!r}".format(units))
if units0._units_since_reftime in _month_units:
if calendar_months:
units0 = Units('calendar_'+units0.units, calendar=units0._calendar)
else:
units0 = Units('days since '+units0.units.split(' since ')[1],
calendar=units0._calendar)
v.Units = units0
elif units0._units_since_reftime in _year_units:
if calendar_years:
units0 = Units('calendar_'+units0.units, calendar=units0._calendar)
else:
units0 = Units('days since '+units0.units.split(' since ')[1],
calendar=units0._calendar)
v.Units = units0
# Not LAMAed!
v.set_data(Data(
numpy_vectorize(
functools_partial(_convert_reftime_units,
units=units0._units_since_reftime,
reftime=dt(units0.reftime, calendar=units0._calendar),
),
otypes=[object])(v),
units=units))
if inplace:
v = None
return v
def flatten(self, axes=None, inplace=False):
'''Flatten axes of the data
Any subset of the axes may be flattened.
The shape of the data may change, but the size will not.
The flattening is executed in row-major (C-style) order. For
example, the array ``[[1, 2], [3, 4]]`` would be flattened across
both dimensions to ``[1 2 3 4]``.
.. versionaddedd:: 3.0.2
.. seealso:: `insert_dimension`, `flip`, `swapaxes`, `transpose`
:Parameters:
axes: (sequence of) int or str, optional
Select the axes. By default all axes are flattened. The
*axes* argument may be one, or a sequence, of:
* An internal axis identifier. Selects this axis.
..
* An integer. Selects the axis coresponding to the given
position in the list of axes of the data array.
No axes are flattened if *axes* is an empty sequence.
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
The construct with flattened data, or `None` if the
operation was in-place.
**Examples**
>>> f.shape
(1, 2, 3, 4)
>>> f.flatten().shape
(24,)
>>> f.flatten([1, 3]).shape
(1, 8, 3)
>>> f.flatten([0, -1], inplace=True)
>>> f.shape
(4, 2, 3)
'''
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is not None:
data.flatten(axes, inplace=True)
if inplace:
v = None
return v
def floor(self, inplace=False, i=False):
'''Floor the data array, element-wise.
The floor of ``x`` is the largest integer ``n``, such that
``n<=x``.
.. versionadded:: 1.0
.. seealso:: `ceil`, `rint`, `trunc`
:Parameters:
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
:Returns:
The construct with floored data. If the operation was
in-place then `None` is returned.
**Examples:**
>>> print(f.array)
[-1.9 -1.5 -1.1 -1. 0. 1. 1.1 1.5 1.9]
>>> print(f.floor().array)
[-2. -2. -2. -1. 0. 1. 1. 1. 1.]
>>> f.floor(inplace=True)
>>> print(f.array)
[-2. -2. -2. -1. 0. 1. 1. 1. 1.]
'''
if i:
_DEPRECATION_ERROR_KWARGS(self, 'floor', i=True) # pragma: no cover
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is not None:
data.floor(inplace=True)
if inplace:
v = None
return v
def match_by_naxes(self, *naxes):
'''Whether or not the data has a given dimensionality.
.. versionadded:: 3.0.0
.. seealso:: `match`, `match_by_identity`, `match_by_property`,
`match_by_units`
:Parameters:
naxes: optional
Dimensionalities to be compared.
A dimensionality given by an `int` or a `Query` object.
If no numbers are provided then there is always a match.
:Returns:
`bool`
Whether or not there is a match.
**Examples:**
>>> f.ndim
3
>>> f.match_by_naxes(3)
True
>>> f.match_by_naxes(cf.ge(1))
True
>>> f.match_by_naxes(1, 2, 3)
True
>>> f.match_by_naxes(2)
False
>>> f.match_by_naxes(cf.gt(3))
False
'''
if not naxes:
return True
data = self.get_data(None)
if data is None:
return False
self_ndim = data.ndim
for ndim in naxes:
ok = (ndim == self_ndim)
if ok:
return True
#--- End: for
return False
def match_by_units(self, *units, exact=True):
'''Whether or not the construct has given units.
.. versionadded:: 3.0.0
.. seealso:: `match`, `match_by_identity`, `match_by_property`,
`match_by_naxes`
:Parameters:
units: optional
Units to be compared.
Units are specified by a string or compiled regular
expression (e.g. ``'km'``, ``'m s-1'``,
``re.compile('^kilo')``, etc.) or a `Units` object
(e.g. ``Units('km')``, ``Units('m s-1')``, etc.).
If no units are provided then there is always a match.
exact: `bool`, optional
If False then a match occurs if the construct's units
are equivalent to any of those given by *units*. For
example, metres and are equivelent to kilometres. By
default, a match only occurs if the construct's units are
exactly one of those given by *units*. Note that the
format of the units is not important, i.e. 'm' is exactly
the same as 'metres' for this purpose.
:Returns:
`bool`
Whether or not there is a match.
**Examples:**
>>> f.units
'metres'
>>> f.match_by_units('metres')
True
>>> f.match_by_units('m')
True
>>> f.match_by_units(Units('m'))
True
>>> f.match_by_units('m', 'kilogram')
True
>>> f.match_by_units('km', exact=False)
True
>>> f.match_by_units(cf.Units('km'), exact=False)
True
>>> f.match_by_units(re.compile('^met'))
True
>>> f.match_by_units(cf.Units('km'))
False
>>> f.match_by_units(cf.Units('kg m-2'))
False
'''
if not units:
return True
self_units = self.Units
ok = False
for value0 in units:
try:
# re.compile object
ok = value0.search(self_units.units)
except (AttributeError, TypeError):
if exact:
ok = Units(value0).equals(self_units)
else:
ok = Units(value0).equivalent(self_units)
#--- End: if
if ok:
break
#--- End: for
return ok
# ----------------------------------------------------------------
# Methods
# ----------------------------------------------------------------
def all(self):
'''Test whether all data elements evaluate to True.
Performs a logical "and" over the data array and returns the
result. Masked values are considered as True during computation.
.. seealso:: `allclose`, `any`
:Returns:
`bool`
Whether ot not all data elements evaluate to True.
**Examples:**
>>> print(f.array)
[[0 3 0]]
>>> f.all()
False
>>> print(f.array)
[[1 3 --]]
>>> f.all()
True
'''
data = self.get_data(None)
if data is not None:
return data.all()
return False
def allclose(self, y, atol=None, rtol=None):
'''Test whether all data are element-wise equal to other,
broadcastable data.
Two real numbers ``x`` and ``y`` are considered equal if
``|x-y|<=atol+rtol|y|``, where ``atol`` (the tolerance on absolute
differences) and ``rtol`` (the tolerance on relative differences)
are positive, typically very small numbers. See the *atol* and
*rtol* parameters.
.. seealso:: `all`, `any`, `isclose`
:Parameters:
y:
The object to be compared with the data array. *y* must be
broadcastable to the data array and if *y* has units then
they must be compatible. May be any object that can be
converted to a `Data` object (which includes numpy array
and `Data` objects).
atol: `float`, optional
The tolerance on absolute differences between real
numbers. The default value is set by the `ATOL` function.
rtol: `float`, optional
The tolerance on relative differences between real
numbers. The default value is set by the `RTOL` function.
:Returns:
`bool`
Returns `True` if the data are equal within the given
tolerance; `False` otherwise.
**Examples:**
>>> x = f.allclose(g)
'''
data = self.get_data(None)
if data is None:
return False
if isinstance(y, self.__class__):
y_data = y.get_data(None)
if y_data is None:
return False
y = self._conform_for_assignment(y)
y_data = y.get_data()
else:
try:
y_data = y.get_data(None)
except AttributeError:
y_data = y
else:
if y_data is None:
y_data = y
#--- End: if
return data.allclose(y_data, rtol=rtol, atol=atol)
def any(self):
'''Test whether any data elements evaluate to True.
Performs a logical "or" over the data array and returns the
result. Masked values are considered as False during computation.
.. seealso:: `all`, `allclose`
:Returns:
`bool`
Whether ot not any data elements evaluate to `True`.
**Examples:**
>>> print(f.array)
[[0 0 0]]
>>> f.any()
False
>>> print(f.array)
[[-- 0 0]]
>>> f.any()
False
>>> print(f.array)
[[-- 3 0]]
>>> f.any()
True
'''
data = self.get_data(None)
if data is not None:
return data.any()
return False
def files(self):
'''Return the names of any files containing parts of the data array.
.. seealso:: `close`
:Returns:
`!set`
The file names in normalized, absolute form.
**Examples:**
>>> f = cf.read_field('../file[123].nc')
>>> f.files()
{'/data/user/file1.nc',
'/data/user/file2.nc',
'/data/user/file3.nc'}
>>> a = f.array
>>> f.files()
set()
'''
data = self.get_data(None)
if data is None:
out = set()
else:
out = data.files()
return out
def fill_value(self, default=None):
'''Return the data array missing data value.
This is the value of the `missing_value` CF property, or if that
is not set, the value of the `_FillValue` CF property, else if
that is not set, ``None``. In the last case the default `numpy`
missing data value for the array's data type is assumed if a
missing data value is required.
.. seealso:: `cf.default_netCDF_fillvals`, `_FillValue`,
`missing_value`
:Parameters:
default: optional
If the missing value is unset then return this value. By
default, *default* is `None`. If *default* is the special
value ``'netCDF'`` then return the netCDF default value
appropriate to the data array's data type is used. These
may be found with the `cf.default_netCDF_fillvals`
function. For example:
>>> cf.default_netCDF_fillvals()
{'S1': '\x00',
'i1': -127,
'u1': 255,
'i2': -32767,
'u2': 65535,
'i4': -2147483647,
'u4': 4294967295,
'i8': -9223372036854775806,
'u8': 18446744073709551614,
'f4': 9.969209968386869e+36,
'f8': 9.969209968386869e+36}
:Returns:
The missing deata value or, if one has not been set, the
value specified by *default*
**Examples:**
>>> f.fill_value()
None
>>> f._FillValue = -1e30
>>> f.fill_value()
-1e30
>>> f.missing_value = 1073741824
>>> f.fill_value()
1073741824
>>> del f.missing_value
>>> f.fill_value()
-1e30
>>> del f._FillValue
>>> f.fill_value()
None
>>> f.dtype
dtype('float64')
>>> f.fill_value(default='netCDF')
9.969209968386869e+36
>>> f._FillValue = -999
>>> f.fill_value(default='netCDF')
-999
'''
fillval = self.get_property('missing_value', None)
if fillval is None:
fillval = self.get_property('_FillValue', None)
if fillval is None:
if default == 'netCDF':
d = self.dtype
fillval = default_netCDF_fillvals()[d.kind + str(d.itemsize)]
else:
fillval = default
#--- End: if
return fillval
def flip(self, axes=None, inplace=False, i=False):
'''Flip (reverse the direction of) data dimensions.
.. seealso:: `flatten`, `insert_dimension`, `squeeze`,
`transpose`, `unsqueeze`
:Parameters:
axes: optional
Select the domain axes to flip. One, or a sequence, of:
* The position of the dimension in the data.
If no axes are specified then all axes are flipped.
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use the *inplace* parameter instead.
:Returns:
The construct with flipped axes, or `None` if the
operation was in-place.
**Examples:**
>>> f.flip()
>>> f.flip(1)
>>> f.flip([0, 1])
>>> g = f[::-1, :, ::-1]
>>> f.flip([2, 0]).equals(g)
True
'''
if i:
_DEPRECATION_ERROR_KWARGS(self, 'flip', i=True) # pragma: no cover
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is not None:
data.flip(axes, inplace=True)
if inplace:
v = None
return v
def exp(self, inplace=False, i=False):
'''The exponential of the data, element-wise.
The "standard_name" and "long_name" properties are removed from
the result.
.. seealso:: `log`
:Parameters:
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
:Returns:
The construct with the exponential of data values. If the
operation was in-place then `None` is returned.
**Examples:**
>>> f.data
<CF Data(1, 2): [[1, 2]]>
>>> f.exp().data
<CF Data(1, 2): [[2.71828182846, 7.38905609893]]>
>>> f.data
<CF Data(1, 2): [[1, 2]] 2>
>>> f.exp().data
<CF Data(1, 2): [[7.38905609893, 54.5981500331]]>
>>> f.data
<CF Data(1, 2): [[1, 2]] kg m-1 s-2>
>>> f.exp()
ValueError: Can't take exponential of dimensional quantities: <Units: kg m-1 s-2>
'''
if i:
_DEPRECATION_ERROR_KWARGS(self, 'exp', i=True) # pragma: no cover
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is not None:
data.exp(inplace=True)
# Remove misleading identities
v.del_property('standard_name', None)
v.del_property('long_name', None)
if inplace:
v = None
return v
def sin(self, inplace=False, i=False):
'''The trigonometric sine of the data, element-wise.
Units are accounted for in the calculation. For example, the the
sine of 90 degrees_east is 1.0, as is the sine of 1.57079632
radians. If the units are not equivalent to radians (such as
Kelvin) then they are treated as if they were radians.
The Units are changed to '1' (nondimensionsal).
The "standard_name" and "long_name" properties are removed from
the result.
.. seealso:: `cos`, `tan`
:Parameters:
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
:Returns:
The construct with the sine of data values. If the
operation was in-place then `None` is returned.
**Examples:**
>>> f.Units
<Units: degrees_north>
>>> print(f.array)
[[-90 0 90 --]]
>>> f.sin()
>>> f.Units
<Units: 1>
>>> print(f.array)
[[-1.0 0.0 1.0 --]]
>>> f.Units
<Units: m s-1>
>>> print(f.array)
[[1 2 3 --]]
>>> f.sin()
>>> f.Units
<Units: 1>
>>> print(f.array)
[[0.841470984808 0.909297426826 0.14112000806 --]]
'''
if i:
_DEPRECATION_ERROR_KWARGS(self, 'sin', i=True) # pragma: no cover
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is not None:
data.sin(inplace=True)
# Remove misleading identities
v.del_property('standard_name', None)
v.del_property('long_name', None)
if inplace:
v = None
return v
def tan(self, inplace=False, i=False):
'''The trigonometric tangent of the data, element-wise.
Units are accounted for in the calculation, so that the the
tangent of 180 degrees_east is 0.0, as is the sine of
3.141592653589793 radians. If the units are not equivalent to
radians (such as Kelvin) then they are treated as if they were
radians.
The Units are changed to '1' (nondimensionsal).
The "standard_name" and "long_name" properties are removed from
the result.
.. seealso:: `cos`, `sin`
:Parameters:
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
:Returns:
The construct with the tangent of data values. If the
operation was in-place then `None` is returned.
**Examples:**
>>> f.Units
<Units: degrees_north>
>>> print(f.array)
[[-45 0 45 --]]
>>> f.tan()
>>> f.Units
<Units: 1>
>>> print(f.array)
[[-1.0 0.0 1.0 --]]
>>> f.Units
<Units: m s-1>
>>> print(f.array)
[[1 2 3 --]]
>>> f.tan()
>>> f.Units
<Units: 1>
>>> print(f.array)
[[1.55740772465 -2.18503986326 -0.142546543074 --]]
'''
if i:
_DEPRECATION_ERROR_KWARGS(self, 'tan', i=True) # pragma: no cover
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is not None:
data.tan(inplace=True)
# Remove misleading identities
v.del_property('standard_name', None)
v.del_property('long_name', None)
if inplace:
v = None
return v
def log(self, base=None, inplace=False, i=False):
'''The logarithm of the data array.
By default the natural logarithm is taken, but any base may be
specified.
The "standard_name" and "long_name" properties are removed from
the result.
.. seealso:: `exp`
:Parameters:
base: number, optional
The base of the logiarthm. By default a natural logiarithm
is taken.
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
:Returns:
The construct with the logarithm of data values.
**Examples:**
>>> f.data
<CF Data(1, 2): [[1, 2]]>
>>> f.log().data
<CF Data(1, 2): [[0.0, 0.69314718056]] ln(re 1)>
>>> f.data
<CF Data(1, 2): [[1, 2]] 2>
>>> f.log().data
<CF Data(1, 2): [[0.0, 0.69314718056]] ln(re 2 1)>
>>> f.data
<CF Data(1, 2): [[1, 2]] kg s-1 m-2>
>>> f.log().data
<CF Data(1, 2): [[0.0, 0.69314718056]] ln(re 1 m-2.kg.s-1)>
>>> f.log(inplace=True)
>>> f.Units
<Units: >
>>> f.log()
ValueError: Can't take the logarithm to the base 2.718281828459045 of <Units: >
'''
if i:
_DEPRECATION_ERROR_KWARGS(self, 'log', i=True) # pragma: no cover
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is not None:
data.log(base, inplace=True)
# Remove misleading identities
v.del_property('standard_name', None)
v.del_property('long_name', None)
if inplace:
v = None
return v
def trunc(self, inplace=False, i=False):
'''Truncate the data, element-wise.
The truncated value of the scalar ``x``, is the nearest integer
``i`` which is closer to zero than ``x`` is. I.e. the fractional
part of the signed number ``x`` is discarded.
.. versionadded:: 1.0
.. seealso:: `ceil`, `floor`, `rint`
:Parameters:
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
:Returns:
The construct with truncated data. If the operation was
in-place then `None` is returned.
**Examples:**
>>> print(f.array)
[-1.9 -1.5 -1.1 -1. 0. 1. 1.1 1.5 1.9]
>>> print(f.trunc().array)
[-1. -1. -1. -1. 0. 1. 1. 1. 1.]
>>> f.trunc(inplace=True)
>>> print(f.array)
[-1. -1. -1. -1. 0. 1. 1. 1. 1.]
'''
if i:
_DEPRECATION_ERROR_KWARGS(self, 'trunc', i=True) # pragma: no cover
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is not None:
data.trunc(inplace=True)
if inplace:
v = None
return v
# def uncompress(self, inplace=False):
# '''Uncompress the construct.
#
# Compression saves space by identifying and removing unwanted
# missing data. Such compression techniques store the data more
# efficiently and result in no precision loss.
#
# Whether or not the construct is compressed does not alter its
# functionality nor external appearance.
#
# The following type of compression are available:
#
# * Ragged arrays for discrete sampling geometries (DSG). Three
# different types of ragged array representation are
# supported.
#
# ..
#
# * Compression by gathering.
#
# .. versionadded:: 3.0.6
#
# .. seealso:: `cf.write`, `flatten`
#
# :Parameters:
#
# inplace: `bool`, optional
# If True then do the operation in-place and return `None`.
#
# :Returns:
#
# The uncompressed construct, or `None` if the operation was
# in-place.
#
# **Examples:**
#
# TODO
#
# '''
# if inplace:
# f = self
# else:
# f = self.copy()
#
# data = f.get_data(None)
# if data is not None:
# data.uncompress(inplace=True)
#
# if inplace:
# f = None
# return f
def unique(self):
'''The unique elements of the data.
:Returns:
`Data`
The unique data array values in a one dimensional `Data`
object.
**Examples:**
>>> print(f.array)
[[4 2 1]
[1 2 3]]
>>> print(f.unique().array)
[1 2 3 4]
>>> f[1, -1] = cf.masked
>>> print(f.array)
[[4 2 1]
[1 2 --]]
>>> print(f.unique().array)
[1 2 4]
'''
data = self.get_data(None)
if data is not None:
return data.unique()
raise ValueError(
"ERROR: Can't get unique values when there is no data array")
def identity(self, default='', strict=False, relaxed=False,
nc_only=False, relaxed_identity=None):
'''Return the canonical identity.
By default the identity is the first found of the following:
* The "standard_name" property.
* The "id" attribute, preceeded by ``'id%'``.
* The "cf_role" property, preceeded by ``'cf_role='``.
* The "axis" property, preceeded by ``'axis='``.
* The "long_name" property, preceeded by ``'long_name='``.
* The netCDF variable name, preceeded by ``'ncvar%'``.
* The coordinate type (``'X'``, ``'Y'``, ``'Z'`` or ``'T'``).
* The value of the *default* parameter.
.. versionadded:: 3.0.0
.. seealso:: `id`, `identities`
:Parameters:
default: optional
If no identity can be found then return the value of the
default parameter.
strict: `bool`, optional
If True then only take the identity from the
"standard_name" property or the "id" attribute, in that
order.
relaxed: `bool`, optional
If True then only take the identity from the
"standard_name" property, the "id" attribute, the
"long_name" property or netCDF variable name, in that
order.
nc_only: `bool`, optional
If True then only take the identity from the netCDF
variable name.
:Returns:
The identity.
**Examples:**
>>> f.properties()
{'foo': 'bar',
'long_name': 'Air Temperature',
'standard_name': 'air_temperature'}
>>> f.nc_get_variable()
'tas'
>>> f.identity()
'air_temperature'
>>> f.del_property('standard_name')
'air_temperature'
>>> f.identity(default='no identity')
'air_temperature'
>>> f.identity()
'long_name=Air Temperature'
>>> f.del_property('long_name')
>>> f.identity()
'ncvar%tas'
>>> f.nc_del_variable()
'tas'
>>> f.identity()
'ncvar%tas'
>>> f.identity()
''
>>> f.identity(default='no identity')
'no identity'
'''
if relaxed_identity:
_DEPRECATION_ERROR_KWARGS(self, 'identity',
relaxed_identity=True) # pragma: no cover
if nc_only:
if strict:
raise ValueError("'strict' and 'nc_only' parameters cannot both be True")
if relaxed:
raise ValueError("'relaxed' and 'nc_only' parameters cannot both be True")
n = self.nc_get_variable(None)
if n is not None:
return 'ncvar%{0}'.format(n)
return default
n = self.get_property('standard_name', None)
if n is not None:
return '{0}'.format(n)
n = getattr(self, 'id', None)
if n is not None:
return 'id%{0}'.format(n)
if relaxed:
n = self.get_property('long_name', None)
if n is not None:
return 'long_name={0}'.format(n)
n = self.nc_get_variable(None)
if n is not None:
return 'ncvar%{0}'.format(n)
return default
if strict:
return default
for prop in ('cf_role', 'axis', 'long_name'):
n = self.get_property(prop, None)
if n is not None:
return '{0}={1}'.format(prop, n)
#--- End: for
n = self.nc_get_variable(None)
if n is not None:
return 'ncvar%{0}'.format(n)
for ctype in ('X', 'Y', 'Z', 'T'):
if getattr(self, ctype, False):
return ctype
#--- End: for
return default
def identities(self):
'''Return all possible identities.
The identities comprise:
* The "standard_name" property.
* The "id" attribute, preceeded by ``'id%'``.
* The "cf_role" property, preceeded by ``'cf_role='``.
* The "axis" property, preceeded by ``'axis='``.
* The "long_name" property, preceeded by ``'long_name='``.
* All other properties (including "standard_name"), preceeded by
the property name and an ``'='``.
* The coordinate type (``'X'``, ``'Y'``, ``'Z'`` or ``'T'``).
* The netCDF variable name, preceeded by ``'ncvar%'``.
.. versionadded:: 3.0.0
.. seealso:: `id`, `identity`
:Returns:
`list`
The identities.
**Examples:**
>>> f.properties()
{'foo': 'bar',
'long_name': 'Air Temperature',
'standard_name': 'air_temperature'}
>>> f.nc_get_variable()
'tas'
>>> f.identities()
['air_temperature',
'long_name=Air Temperature',
'foo=bar',
'standard_name=air_temperature',
'ncvar%tas']
'''
out = super().identities()
i = getattr(self, 'id', None)
if i is not None:
# Insert id attribute
i = 'id%{0}'.format(i)
if not out:
out = [i]
else:
out0 = out[0]
if '=' in out0 or '%' in out0 or True in [a == out0 for a in 'XYZT']:
out.insert(0, i)
else:
out.insert(1, i)
#--- End: if
for ctype in ('X', 'Y', 'Z', 'T'):
if getattr(self, ctype, False):
out.append(ctype)
#--- End: for
return out
def inspect(self):
'''Inspect the object for debugging.
.. seealso:: `cf.inspect`
:Returns:
`None`
'''
print(cf_inspect(self)) # pragma: no cover
def get_data(self, default=ValueError()):
'''Return the data.
Note that a `Data` instance is returned. Use its `array` attribute
to return the data as an independent `numpy` array.
The units, calendar and fill value properties are, if set,
inserted into the data.
.. versionadded:: 1.7.0
.. seealso:: `array`, `data`, `del_data`, `has_data`, `set_data`
:Parameters:
default: optional
Return the value of the *default* parameter if data have
not been set. If set to an `Exception` instance then it
will be raised instead.
:Returns:
The data.
**Examples:**
>>> d = cf.Data(range(10))
>>> f.set_data(d)
>>> f.has_data()
True
>>> f.get_data()
<CF Data(10): [0, ..., 9]>
>>> f.del_data()
<CF Data(10): [0, ..., 9]>
>>> f.has_data()
False
>>> print(f.get_data(None))
None
>>> print(f.del_data(None))
None
'''
return super().get_data(default=default, _units=False)
def override_calendar(self, calendar, inplace=False, i=False):
'''Override the calendar of date-time units.
The new calendar **need not** be equivalent to the original one
and the data array elements will not be changed to reflect the new
units. Therefore, this method should only be used when it is known
that the data array values are correct but the calendar has been
incorrectly encoded.
Not to be confused with setting the `calendar` or `Units`
attributes to a calendar which is equivalent to the original
calendar
.. seealso:: `calendar`, `override_units`, `units`, `Units`
:Parameters:
calendar: `str`
The new calendar.
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
:Returns:
TODO
**Examples:**
TODO
>>> g = f.override_calendar('noleap')
'''
if i:
_DEPRECATION_ERROR_KWARGS(self, 'override_calendar', i=True) # pragma: no cover
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is not None:
data.override_calendar(calendar, inplace=True)
v._custom['Units'] = data.Units
else:
if not v.Units.isreftime:
raise ValueError(
"Can't override the calender of non-reference-time units: {0!r}".format(
self.Units))
v.Units = Units(getattr(v.Units, 'units', None), calendar=calendar)
if inplace:
v = None
return v
def override_units(self, units, inplace=False, i=False):
'''Override the units.
The new units **need not** be equivalent to the original ones and
the data array elements will not be changed to reflect the new
units. Therefore, this method should only be used when it is known
that the data array values are correct but the units have
incorrectly encoded.
Not to be confused with setting `units` or `Units` attributes to
units which are equivalent to the original units.
.. seealso:: `calendar`, `override_calendar`, `units`, `Units`
:Parameters:
units: `str` or `Units`
The new units for the data array.
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
:Returns:
TODO
**Examples:**
>>> f.Units
<Units: hPa>
>>> f.datum(0)
100000.0
>>> f.override_units('km')
>>> f.Units
<Units: km>
>>> f.datum(0)
100000.0
>>> f.override_units(Units('watts'))
>>> f.Units
<Units: watts>
>>> f.datum(0)
100000.0
'''
if i:
_DEPRECATION_ERROR_KWARGS(self, 'override_units', i=True) # pragma: no cover
if inplace:
v = self
else:
v = self.copy()
units = Units(units)
data = v.get_data(None)
if data is not None:
data.override_units(units, inplace=True)
v._custom['Units'] = units
v.Units = units
else:
v.Units = units
if inplace:
v = None
return v
def rint(self, inplace=False, i=False):
'''Round the data to the nearest integer, element-wise.
.. versionadded:: 1.0
.. seealso:: `ceil`, `floor`, `trunc`
:Parameters:
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
:Returns:
The construct with rounded data. If the operation was
in-place then `None` is returned.
**Examples:**
>>> print(f.array)
[-1.9 -1.5 -1.1 -1. 0. 1. 1.1 1.5 1.9]
>>> print(f.rint().array)
[-2. -2. -1. -1. 0. 1. 1. 2. 2.]
>>> f.rint(inplace=True)
>>> print(f.array)
[-2. -2. -1. -1. 0. 1. 1. 2. 2.]
'''
if i:
_DEPRECATION_ERROR_KWARGS(self, 'rint', i=True) # pragma: no cover
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is not None:
data.rint(inplace=True)
if inplace:
v = None
return v
def round(self, decimals=0, inplace=False, i=False):
'''Round the data to the given number of decimals.
Values exactly halfway between rounded decimal values are rounded
to the nearest even value. Thus 1.5 and 2.5 round to 2.0, -0.5 and
0.5 round to 0.0, etc. Results may also be surprising due to the
inexact representation of decimal fractions in the IEEE floating
point standard and errors introduced when scaling by powers of
ten.
.. versionadded:: 1.1.4
.. seealso:: `ceil`, `floor`, `rint`, `trunc`
:Parameters:
decimals: `int`, optional
Number of decimal places to round to (0 by default). If
decimals is negative, it specifies the number of positions
to the left of the decimal point.
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
:Returns:
The construct with rounded data. If the operation was
in-place then `None` is returned.
**Examples:**
>>> print(f.array)
[-1.81, -1.41, -1.01, -0.91, 0.09, 1.09, 1.19, 1.59, 1.99])
>>> print(f.round().array)
[-2., -1., -1., -1., 0., 1., 1., 2., 2.]
>>> print(f.round(1).array)
[-1.8, -1.4, -1. , -0.9, 0.1, 1.1, 1.2, 1.6, 2. ]
>>> print(f.round(-1).array)
[-0., -0., -0., -0., 0., 0., 0., 0., 0.]
'''
if i:
_DEPRECATION_ERROR_KWARGS(self, 'round', i=True) # pragma: no cover
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is not None:
data.round(decimals=decimals, inplace=True)
if inplace:
v = self
return v
def roll(self, iaxis, shift, inplace=False, i=False):
'''Roll the data along an axis.
.. seealso:: `flatten`, `insert_dimension`, `flip`, `squeeze`,
`transpose`
:Parameters:
iaxis: `int`
TODO
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
:Returns:
TODO
**Examples:**
TODO
'''
if i:
_DEPRECATION_ERROR_KWARGS(self, 'roll', i=True) # pragma: no cover
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is not None:
data.roll(iaxis, shift, inplace=True)
if inplace:
v = None
return v
def set_data(self, data, copy=True):
'''Set the data.
The units, calendar and fill value of the incoming `Data` instance
are removed prior to insertion.
.. versionadded:: 3.0.0
.. seealso:: `data`, `del_data`, `get_data`, `has_data`
:Parameters:
data: `Data`
The data to be inserted.
copy: `bool`, optional
If False then do not copy the data prior to insertion. By
default the data are copied.
:Returns:
`None`
**Examples:**
>>> d = Data(range(10))
>>> f.set_data(d)
>>> f.has_data()
True
>>> f.get_data()
<Data(10): [0, ..., 9]>
>>> f.del_data()
<Data(10): [0, ..., 9]>
>>> f.has_data()
False
>>> print(f.get_data(None))
None
>>> print(f.del_data(None))
None
'''
if not data.Units:
units = getattr(self, 'Units', None)
if units is not None:
if copy:
copy = False
data = data.override_units(units, inplace=False)
else:
data.override_units(units, inplace=True)
#--- End: if
if copy:
data = data.copy()
self._set_component('data', data, copy=False)
def where(self, condition, x=None, y=None, inplace=False, i=False,
_debug=False):
'''Set data array elements depending on a condition.
.. seealso:: `cf.masked`, `hardmask`, `subspace`
:Parameters:
TODO
:Returns:
TODO
**Examples:**
TODO
'''
if i:
_DEPRECATION_ERROR_KWARGS(self, 'where', i=True) # pragma: no cover
if inplace:
v = self
else:
v = self.copy()
data = v.get_data(None)
if data is None:
raise ValueError(
"ERROR: Can't set data in nonexistent data array")
try:
condition_data = condition.get_data(None)
except AttributeError:
pass
else:
if condition_data is None:
raise ValueError(
"ERROR: Can't set data from {} with no data array".format(
condition.__class__.__name__))
condition = condition_data
try:
x_data = x.get_data(None)
except AttributeError:
pass
else:
if x_data is None:
raise ValueError(
"ERROR: Can't set data from {} with no data array".format(
x.__class__.__name__))
x = x_data
try:
y_data = y.get_data(None)
except AttributeError:
pass
else:
if y_data is None:
raise ValueError(
"ERROR: Can't set data from {} with no data array".format(
y.__class__.__name__))
y = y_data
data.where(condition, x, y, inplace=True, _debug=_debug)
if inplace:
v = None
return v
# ----------------------------------------------------------------
# Aliases
# ----------------------------------------------------------------
@property
def dtarray(self):
'''Alias for `datetime_array`.
'''
return self.datetime_array
# ----------------------------------------------------------------
# Deprecated attributes and methods
# ----------------------------------------------------------------
@property
def attributes(self):
'''A dictionary of the attributes which are not CF properties.
Deprecated at version 3.0.0.
'''
_DEPRECATION_ERROR_ATTRIBUTE(self, 'attributes')
@property
def Data(self):
'''The `Data` object containing the data array.
Deprecated at version 3.0.0. Use 'data' attribute or
'get_data' method instead.
'''
_DEPRECATATION_ERROR_ATTRIBUTE(
self, 'Data',
"Use 'data' attribute or 'get_data' method instead.") # pragma: no cover
@data.setter
def Data(self, value):
_DEPRECATATION_ERROR_ATTRIBUTE(
self, 'Data',
"Use 'data' attribute or 'set_data' method instead.") # pragma: no cover
@data.deleter
def Data(self):
_DEPRECATATION_ERROR_ATTRIBUTE(
self, 'Data',
"Use 'data' attribute or 'del_data' method instead.") # pragma: no cover
@property
def dtvarray(self):
'''A numpy array view the data array converted to date-time objects.
Deprecated at version 3.0.0.
'''
_DEPRECATION_ERROR_ATTRIBUTE(self, 'dtvarray') # pragma: no cover
@property
def hasbounds(self):
'''`True` if there are cell bounds.
Deprecated at version 3.0.0. Use 'has_bounds' method instead.
If present, cell bounds are stored in the `!bounds` attribute.
**Examples:**
>>> if c.hasbounds:
... b = c.bounds
'''
_DEPRECATION_ERROR_ATTRIBUTE(self, 'hasbounds', "Use 'has_bounds' method instead")
@property
def hasdata(self):
'''True if there is a data array.
Deprecated at version 3.0.0. Use 'has_data' method instead.
If present, the data array is stored in the `data` attribute.
.. seealso:: `data`, `hasbounds`
**Examples:**
>>> if f.hasdata:
... print(f.data)
'''
_DEPRECATION_ERROR_ATTRIBUTE(self, 'hasdata', "Use 'has_data' method instead")
@property
def unsafe_array(self):
'''A numpy array of the data.
Deprecated at version 3.0.0. Use 'array' attribute instead.
'''
_DEPRECATION_ERROR_ATTRIBUTE(
self, 'unsafe_array',
"Use 'array' attribute instead.") # pragma: no cover
def asdatetime(self, i=False):
'''Convert the internal representation of data array elements to
date-time objects.
Only applicable to construct with reference time units.
If the calendar has not been set then the CF default calendar will be
used and the units will be updated accordingly.
.. seealso:: `asreftime`
:Parameters:
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
**Examples:**
>>> t.asdatetime().dtype
dtype('float64')
>>> t.asdatetime().dtype
dtype('O')
'''
_DEPRECATION_ERROR_METHOD(self, 'asdatetime') # pragma: no cover
def asreftime(self, i=False):
'''Convert the internal representation of data array elements
to numeric reference times.
Only applicable to constructs with reference time units.
If the calendar has not been set then the CF default calendar will be
used and the units will be updated accordingly.
.. seealso:: `asdatetime`
:Parameters:
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
i: deprecated at version 3.0.0
Use *inplace* parameter instead.
**Examples:**
>>> t.asdatetime().dtype
dtype('O')
>>> t.asreftime().dtype
dtype('float64')
'''
_DEPRECATION_ERROR_METHOD(self, 'asreftime') # pragma: no cover
def expand_dims(self, position=0, i=False):
'''Insert a size 1 axis into the data array.
Deprecated at version 3.0.0. Use method 'insert_dimension'
instead.
'''
_DEPRECATION_ERROR_METHOD(
self, 'expand_dims',
"Use method 'insert_dimension' instead.") # pragma: no cover
def insert_data(self, data, copy=True):
'''Deprecated at version 3.0.0. Use method 'set_data' instead.
'''
_DEPRECATION_ERROR_METHOD(
self, 'insert_data',
"Use method 'set_data' instead.") # pragma: no cover
def name(self, default=None, identity=False, ncvar=False,
relaxed_identity=None):
'''Return a name for construct.
Deprecated at version 3.0.0. Use method 'identity' instead.
'''
_DEPRECATION_ERROR_METHOD(
self, 'name',
"Use method 'identity' instead") # pragma: no cover
def remove_data(self):
'''Remove and return the data array.
Deprecated at version 3.0.0. Use method 'del_data' instead.
'''
_DEPRECATION_ERROR_METHOD(
self, 'remove_data',
"Use method 'del_data' instead.") # pragma: no cover
def select(self, *args, **kwargs):
'''Deprecated at version 3.0.0.
'''
_DEPRECATION_ERROR_METHOD(self, 'select') # pragma: no cover
#--- End: class
class Subspace:
'''TODO
'''
__slots__ = ('variable',)
def __init__(self, variable):
'''Set the contained variable.
'''
self.variable = variable
def __getitem__(self, indices):
'''Called to implement evaluation of x[indices].
x.__getitem__(indices) <==> x[indices]
'''
return self.variable[indices]
def __setitem__(self, indices, value):
'''Called to implement assignment to x[indices]
x.__setitem__(indices, value) <==> x[indices]
'''
if isinstance(value, self.__class__):
value = value.data
self.variable[indices] = value
#--- End: class
| [
"d.c.hassell@reading.ac.uk"
] | d.c.hassell@reading.ac.uk |
fccf0b7bd873beb81a2e03f845b1ab61e0cd8002 | a2dc75a80398dee58c49fa00759ac99cfefeea36 | /bluebottle/funding/migrations/0023_bankpayoutaccount.py | 4c2896a6cfd4e6e3a3b9b656a801977f9e27dd97 | [
"BSD-2-Clause"
] | permissive | onepercentclub/bluebottle | e38b0df2218772adf9febb8c6e25a2937889acc0 | 2b5f3562584137c8c9f5392265db1ab8ee8acf75 | refs/heads/master | 2023-08-29T14:01:50.565314 | 2023-08-24T11:18:58 | 2023-08-24T11:18:58 | 13,149,527 | 15 | 9 | BSD-3-Clause | 2023-09-13T10:46:20 | 2013-09-27T12:09:13 | Python | UTF-8 | Python | false | false | 1,430 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-08-26 10:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('funding', '0022_auto_20190804_1022'),
]
operations = [
migrations.CreateModel(
name='BankPayoutAccount',
fields=[
('payoutaccount_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='funding.PayoutAccount')),
('account_number', models.CharField(blank=True, max_length=100, null=True, verbose_name='bank account number')),
('account_holder_name', models.CharField(blank=True, max_length=100, null=True, verbose_name='account holder name')),
('account_holder_address', models.CharField(blank=True, max_length=500, null=True, verbose_name='account holder address')),
('account_bank_country', models.CharField(blank=True, max_length=100, null=True, verbose_name='bank country')),
('account_details', models.CharField(blank=True, max_length=500, null=True, verbose_name='account details')),
],
options={
'abstract': False,
},
bases=('funding.payoutaccount',),
),
]
| [
"loek@goodup.com"
] | loek@goodup.com |
c4fe86c6132760476a28ff976caa14c7b657506f | 691793de7d07b17918d076b319281c706f7275c0 | /test/test_notification_event.py | 547cb7800d3e17e7eddff3c860fb253bd77ae619 | [
"MIT"
] | permissive | signingtoday/signingtoday-sdk-python | 1ddfae5340690c80760c500436631d4a8ff9c87f | ed267279622fb59f2ad8fa289157fc9cdf9d8a5b | refs/heads/master | 2020-12-03T15:32:35.755222 | 2020-03-24T08:27:11 | 2020-03-24T08:27:11 | 231,372,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,254 | py | # coding: utf-8
"""
Signing Today Web
*Signing Today* is the perfect Digital Signature Gateway. Whenever in Your workflow You need to add one or more Digital Signatures to Your document, *Signing Today* is the right choice. You prepare Your documents, *Signing Today* takes care of all the rest: send invitations (`signature tickets`) to signers, collects their signatures, send You back the signed document. Integrating *Signing Today* in Your existing applications is very easy. Just follow these API specifications and get inspired by the many examples presented hereafter. # noqa: E501
The version of the OpenAPI document: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import signing_today_client
from signing_today_client.models.notification_event import NotificationEvent # noqa: E501
from signing_today_client.rest import ApiException
class TestNotificationEvent(unittest.TestCase):
"""NotificationEvent unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test NotificationEvent
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = signing_today_client.models.notification_event.NotificationEvent() # noqa: E501
if include_optional :
return NotificationEvent(
id = 1,
time = '2007-04-02T19:30:10Z',
dst_id = 'd6bb4c8f-37bf-4900-a1e4-dd9b0939cafb',
user_id = '1ca229c8-2a99-4f3e-9421-36377fd8d9e5',
dst_title = 'Contract',
username = 'johndoe',
email = 'jdo@bit4id.com',
event = 'evt_dst_status_signed'
)
else :
return NotificationEvent(
)
def testNotificationEvent(self):
"""Test NotificationEvent"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"smartcloud@bit4id.com"
] | smartcloud@bit4id.com |
6c5d71530c58702155e5b75c3fe0457827dc44a5 | b8e885e1546dfb7a45dc7da7718d73ae4103196e | /nebula/__init__.py | 7fd8272fa8e9757aa84f2b70df4bef4fdf4bb229 | [] | no_license | dannon/nebula | b4655b3b5401c528d25ae973763c4dd82d367ab6 | 5ca94404894f9a64b8adf2afbc37381757ae6b7a | refs/heads/master | 2021-01-18T16:00:41.768529 | 2014-09-23T20:29:55 | 2014-09-23T20:29:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py |
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from parser import NebulaCompile
from scheduler import Scheduler
from website import WebSite
from workrepo import WorkRepo
class Config:
def __init__(self, mesos=None, port=9999, host='localhost', workdir="/tmp", docker=None):
self.mesos = mesos
self.port = port
self.host = host
self.workdir = workdir
self.docker = docker
| [
"kellrott@gmail.com"
] | kellrott@gmail.com |
0d5998dc0bdef8074798becd45da509feac2c687 | 2f0aa66e14c6595289f6a0de2bdf71e9922052a7 | /nextApi/company/serializers.py | a8e40069f87047ad03eef4ae796f22982d9f0f9f | [] | no_license | aimethierry/NextApi | 8f83a2b0f499fdf5118eb930baa051584cfd9aa5 | 90884ee6d900ce71116b40276dda0e97bec0b521 | refs/heads/master | 2022-12-11T09:03:54.981284 | 2020-09-19T12:40:36 | 2020-09-19T12:40:36 | 296,866,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | from rest_framework import serializers
from .models import Company
class companySerializer(serializers.ModelSerializer):
class Meta:
model = Company
fields = ['name', 'country', 'city', 'street', 'pbox', 'description']
| [
"aime.thierry97@gmail.com"
] | aime.thierry97@gmail.com |
89859a5f2de10fec32b6d37f696243b99ef2ff8e | 0c785a2601f2b02c1636d57c70039f0c4f08294a | /pybles/PySrc/tests/test_jyserver.py | 81f3ce332de8e93c0ed3f6cf8c9c62c15242b1c1 | [] | no_license | SoftwareEngineeringToolDemos/ICSE-2012-CodeBubbles | bc26d9655fbd56e5f61364db1c176a3539653d7f | 6da209c1ff0f7fbfa958c97dc22ec478b2b5219c | refs/heads/master | 2021-01-17T13:35:48.729810 | 2016-06-24T19:42:07 | 2016-06-24T19:42:07 | 45,094,073 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,869 | py | '''
@author Fabio Zadrozny
'''
import sys
import unittest
import socket
import urllib
IS_JYTHON = sys.platform.find('java') != -1
if IS_JYTHON:
import os
#make it as if we were executing from the directory above this one (so that we can use jycompletionserver
#without the need for it being in the pythonpath)
sys.argv[0] = os.path.dirname(sys.argv[0])
#twice the dirname to get the previous level from this file.
sys.path.insert(1, os.path.join(os.path.dirname(sys.argv[0])))
import pycompletionserver as jycompletionserver
DEBUG = 0
def dbg(s):
if DEBUG:
sys.stdout.write('TEST %s\n' % s)
class Test(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
def testIt(self):
dbg('ok')
def testMessage(self):
t = jycompletionserver.T(0, 0)
l = []
l.append(('Def', 'description' , 'args'))
l.append(('Def1', 'description1', 'args1'))
l.append(('Def2', 'description2', 'args2'))
msg = t.processor.formatCompletionMessage('test_jyserver.py', l)
self.assertEquals('@@COMPLETIONS(test_jyserver.py,(Def,description,args),(Def1,description1,args1),(Def2,description2,args2))END@@', msg)
l = []
l.append(('Def', 'desc,,r,,i()ption', ''))
l.append(('Def(1', 'descriptio(n1', ''))
l.append(('De,f)2', 'de,s,c,ription2', ''))
msg = t.processor.formatCompletionMessage(None, l)
expected = '@@COMPLETIONS(None,(Def,desc%2C%2Cr%2C%2Ci%28%29ption, ),(Def%281,descriptio%28n1, ),(De%2Cf%292,de%2Cs%2Cc%2Cription2, ))END@@'
self.assertEquals(expected, msg)
def testCompletionSocketsAndMessages(self):
dbg('testCompletionSocketsAndMessages')
t, sToWrite, sToRead, self.connToRead, addr = self.createConnections()
dbg('connections created')
try:
#now that we have the connections all set up, check the code completion messages.
msg = urllib.quote_plus('math')
toWrite = '@@IMPORTS:%sEND@@' % msg
dbg('writing' + str(toWrite))
sToWrite.send(toWrite) #math completions
completions = self.readMsg()
dbg(urllib.unquote_plus(completions))
start = '@@COMPLETIONS('
self.assert_(completions.startswith(start), '%s DOESNT START WITH %s' % (completions, start))
self.assert_(completions.find('@@COMPLETIONS') != -1)
self.assert_(completions.find('END@@') != -1)
msg = urllib.quote_plus('__builtin__.str')
toWrite = '@@IMPORTS:%sEND@@' % msg
dbg('writing' + str(toWrite))
sToWrite.send(toWrite) #math completions
completions = self.readMsg()
dbg(urllib.unquote_plus(completions))
start = '@@COMPLETIONS('
self.assert_(completions.startswith(start), '%s DOESNT START WITH %s' % (completions, start))
self.assert_(completions.find('@@COMPLETIONS') != -1)
self.assert_(completions.find('END@@') != -1)
finally:
try:
self.sendKillMsg(sToWrite)
while not hasattr(t, 'ended'):
pass #wait until it receives the message and quits.
sToRead.close()
sToWrite.close()
self.connToRead.close()
except:
pass
def createConnections(self, p1=50002, p2=50003):
'''
Creates the connections needed for testing.
'''
t = jycompletionserver.T(p1, p2)
t.start()
sToWrite = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sToWrite.connect((jycompletionserver.HOST, p1))
sToRead = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sToRead.bind((jycompletionserver.HOST, p2))
sToRead.listen(1) #socket to receive messages.
connToRead, addr = sToRead.accept()
return t, sToWrite, sToRead, connToRead, addr
def readMsg(self):
msg = '@@PROCESSING_END@@'
while msg.startswith('@@PROCESSING'):
msg = self.connToRead.recv(1024)
if msg.startswith('@@PROCESSING:'):
dbg('Status msg:' + str(msg))
while msg.find('END@@') == -1:
msg += self.connToRead.recv(1024)
return msg
def sendKillMsg(self, socket):
socket.send(jycompletionserver.MSG_KILL_SERVER)
#"C:\Program Files\Java\jdk1.5.0_04\bin\java.exe" -Dpython.path="C:\bin\jython21\Lib";"C:\bin\jython21";"C:\Program Files\Java\jdk1.5.0_04\jre\lib\rt.jar" -classpath C:/bin/jython21/jython.jar org.python.util.jython D:\eclipse_workspace\org.python.pydev\PySrc\pycompletionserver.py 53795 58659
#
#"C:\Program Files\Java\jdk1.5.0_04\bin\java.exe" -Dpython.path="C:\bin\jython21\Lib";"C:\bin\jython21";"C:\Program Files\Java\jdk1.5.0_04\jre\lib\rt.jar" -classpath C:/bin/jython21/jython.jar org.python.util.jython D:\eclipse_workspace\org.python.pydev\PySrc\tests\test_jyserver.py
#
#"C:\Program Files\Java\jdk1.5.0_04\bin\java.exe" -Dpython.path="C:\bin\jython21\Lib";"C:\bin\jython21";"C:\Program Files\Java\jdk1.5.0_04\jre\lib\rt.jar" -classpath C:/bin/jython21/jython.jar org.python.util.jython d:\runtime-workbench-workspace\jython_test\src\test.py
if __name__ == '__main__':
if IS_JYTHON:
suite = unittest.makeSuite(Test)
unittest.TextTestRunner(verbosity=1).run(suite)
else:
sys.stdout.write('Not running jython tests for non-java platform: %s' % sys.platform)
| [
"you@example.com"
] | you@example.com |
e6f76838cc8948c487e9b5d7f982f891fd930d1a | 7c74ceb9f8addcc0816d012e0b84b174b96e0def | /src/azure-cli/azure/cli/command_modules/aro/_rbac.py | 3b993a002cd0239d028eae1e6aec6d2af2013b6b | [
"MIT",
"LGPL-2.1-only",
"LGPL-2.1-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | microsoft/azure-cli | 4c826290e7a6f6bd27da3829b05e4f02ff6dc8d9 | 9ba64b33f6f78e2c3e42f8a147f59484300e8779 | refs/heads/dev | 2023-08-31T08:51:39.526556 | 2022-11-28T19:08:23 | 2022-11-28T19:08:23 | 370,900,439 | 7 | 7 | MIT | 2023-08-01T23:34:50 | 2021-05-26T03:59:41 | Python | UTF-8 | Python | false | false | 2,862 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import uuid
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.profiles import get_sdk
from azure.cli.core.profiles import ResourceType
from knack.log import get_logger
from msrest.exceptions import ValidationError
from msrestazure.tools import resource_id
NETWORK_CONTRIBUTOR = '4d97b98b-1d4f-4787-a291-c67834d212e7'
logger = get_logger(__name__)
def _gen_uuid():
return uuid.uuid4()
def _create_role_assignment(auth_client, resource, params):
# retry "ValidationError: A hash conflict was encountered for the role Assignment ID. Please use a new Guid."
max_retries = 3
retries = 0
while True:
try:
return auth_client.role_assignments.create(resource, _gen_uuid(), params)
except ValidationError as ex:
if retries >= max_retries:
raise
retries += 1
logger.warning("%s; retry %d of %d", ex, retries, max_retries)
def assign_network_contributor_to_resource(cli_ctx, resource, object_id):
auth_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_AUTHORIZATION)
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
role_definition_id = resource_id(
subscription=get_subscription_id(cli_ctx),
namespace='Microsoft.Authorization',
type='roleDefinitions',
name=NETWORK_CONTRIBUTOR,
)
_create_role_assignment(auth_client, resource, RoleAssignmentCreateParameters(
role_definition_id=role_definition_id,
principal_id=object_id,
principal_type='ServicePrincipal',
))
def has_network_contributor_on_resource(cli_ctx, resource, object_id):
auth_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_AUTHORIZATION)
role_definition_id = resource_id(
subscription=get_subscription_id(cli_ctx),
namespace='Microsoft.Authorization',
type='roleDefinitions',
name=NETWORK_CONTRIBUTOR,
)
for assignment in auth_client.role_assignments.list_for_scope(resource):
if assignment.role_definition_id.lower() == role_definition_id.lower() and \
assignment.principal_id.lower() == object_id.lower():
return True
return False
| [
"noreply@github.com"
] | microsoft.noreply@github.com |
52ac0359f5fb4a6ae85782c49c80f98062649017 | 34a70bf642b6f678dce2b22efc598656a1a7d90a | /GraphCodes/CyclesUndirectedGraph.py | 5a36ee8efbc7fa46ab572f4fc8f242ead3212579 | [] | no_license | CodeForContribute/Algos-DataStructures | ce89a313b3e32de57f23b263966a84bb020e6a18 | d0ddc7c8f94270f9269a9a5233b3a07fe59c9a1f | refs/heads/master | 2022-03-28T03:08:17.934077 | 2019-11-23T21:58:03 | 2019-11-23T21:58:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,240 | py | from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.vertices = vertices
self.graph = defaultdict(list)
def addEdge(self, u, v):
self.graph[u].append(v)
# me Complexity of this method is same as time complexity of DFS traversal which is O(V+E)
def isCycleIndirected(self, v, visited, recStack):
visited[v] = True
recStack[v] = True
for neighbor in self.graph[v]:
if visited[neighbor] and recStack[neighbor]:
return True
if self.isCycleIndirected(neighbor, visited, recStack):
return True
def isCycle(self):
visited = [False for i in range(self.vertices)]
recStack = [False for i in range(self.vertices)]
for i in range(self.vertices):
if not visited[i]:
if self.isCycleIndirected(i, visited, recStack):
return True
return False
if __name__ == '__main__':
g = Graph(8)
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(1, 2)
g.addEdge(2, 5)
g.addEdge(2, 3)
g.addEdge(3, 7)
if g.isCycle() == 1:
print("Graph has a cycle")
else:
print("Graph has no cycle")
| [
"RAUSHAN.KUMAR2@otis.COM"
] | RAUSHAN.KUMAR2@otis.COM |
b107848bc925c50961146c951860c3bfc5fbe2c3 | 397e125e94f4f139f2bf5055824d81f24b8b1757 | /ABC/137/E.py | 7c09e79bfacdc5cdd6e89aa3d749feeb7e189c9a | [] | no_license | tails1434/Atcoder | ecbab6ee238e3f225551297db961b1b502841fa4 | e7c7fed36be46bbaaf020a70997842240ba98d62 | refs/heads/master | 2021-07-07T00:31:49.235625 | 2020-09-30T01:42:01 | 2020-09-30T01:42:01 | 189,009,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | from collections import deque
import sys
input = sys.stdin.readline
def main():
N, M, P = map(int, input().split())
edge = []
g = [[] for _ in range(N)]
rg = [[] for _ in range(N)]
for _ in range(M):
A, B, C = map(int, input().split())
A -= 1
B -= 1
C -= P
C = -C
edge.append((A,B,C))
g[A].append(B)
rg[B].append(A)
# 0 => N-1の間にある閉路を検出したいので
# 0とN-1からたどりつけない場所は前処理で取り除く
visited1 = set()
visited2 = set()
visited1.add(0)
visited2.add(N-1)
Q = deque()
Q.append(0)
while Q:
v = Q.popleft()
for dest in g[v]:
if dest in visited1:
continue
visited1.add(dest)
Q.append(dest)
Q.append(N-1)
while Q:
v = Q.popleft()
for dest in rg[v]:
if dest in visited2:
continue
visited2.add(dest)
Q.append(dest)
OK = visited1 & visited2
flag = True
d = [float('inf')] * N
d[0] = 0
step = 0
while flag:
flag = False
for A, B, C in edge:
if not A in OK:
continue
if not B in OK:
continue
newD = d[A] + C
if newD < d[B]:
d[B] = newD
flag = True
step += 1
if step > N:
print(-1)
exit()
print(max(0,-d[N-1]))
if __name__ == "__main__":
main() | [
"sososo1333@gmail.com"
] | sososo1333@gmail.com |
a09fbeb5fff9be004c863cc1d188a6c2e4edecd2 | ce196aba0adde47ea2767eae1d7983a1ef548bb8 | /T30-turtle-带轴sin(x).py | 65b1abeeb749519d38055915ef35d8a1c0f3e5ca | [] | no_license | xiang-daode/Python3_codes | 5d2639ffd5d65065b98d029e79b8f3608a37cf0b | 06c64f85ce2c299aef7f9311e9473e0203a05b09 | refs/heads/main | 2023-08-30T14:59:55.123128 | 2021-11-03T05:12:24 | 2021-11-03T05:12:24 | 333,632,892 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | import math
from turtle import *
N = 100
def f(x):
return x
def jumpto(x, y):
penup()
goto(x,y)
def line(x1, y1, x2, y2):
jumpto(x1, y1)
pendown()
goto(x2, y2)
def coosys():
width(4)
pencolor('red')
line(-N, 0, N+1, 0)
pencolor('blue')
line(0, -2, 0, 2.1)
def plot(fun, y, color):
pencolor(color)
width(2)
jumpto(-N, 0)
pendown()
#dot(5)
for i in range(-N,N,2):
yi=math.cos(i/10)
goto(i,yi)
#dot(5)
def main():
reset()
setworldcoordinates(-100,-2, 101, 2.1)
speed(0)
hideturtle()
coosys()
plot(f, 0, "green")
return "Done!"
if __name__ == "__main__":
main()
mainloop()
| [
"noreply@github.com"
] | xiang-daode.noreply@github.com |
a24ea528d1d4837e64a95d27a224929645603346 | 90c2619937019bb1145edfb2d9d6a7cdea460b57 | /src/783.py | 4a993223e7ea44eab55e13ecca14d8d860f897c5 | [
"MIT"
] | permissive | zhaoyi3264/leetcode-solutions | 2d289a7e5c74cfe7f8b019c6056ce16485ae057b | 1a3a2d441cdd07a17e80b0ea43b7b266844f530c | refs/heads/main | 2023-06-03T11:35:25.054669 | 2021-06-28T02:58:07 | 2021-06-28T02:58:07 | 349,618,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
prev = -float('inf')
ans = float('inf')
def minDiffInBST(self, root: TreeNode) -> int:
def inorder(root):
if not root:
return
inorder(root.left)
self.ans = min(self.ans, root.val - self.prev)
self.prev = root.val
inorder(root.right)
inorder(root)
return self.ans
| [
"zhaoyi3264@gmail.com"
] | zhaoyi3264@gmail.com |
51f1741be15eb364a9879aef8b51ed191d5ebdfa | de577e64440d2c330ff0018e8bfb7cf3abf11b70 | /fsleyes/plugins/tools/addroihistogram.py | e388e0aa2128585e355eae1a475c08d34bb2f652 | [
"Apache-2.0",
"CC-BY-3.0",
"BSD-3-Clause"
] | permissive | CGSchwarzMayo/fsleyes | bb887bf8e8dd46bb9e0e3d5c3028d97811fabad1 | 37b45d034d60660b6de3e4bdf5dd6349ed6d853b | refs/heads/master | 2023-09-01T01:39:38.508051 | 2023-08-21T18:21:34 | 2023-08-21T18:21:34 | 272,476,938 | 0 | 0 | NOASSERTION | 2020-06-15T15:34:36 | 2020-06-15T15:34:34 | null | UTF-8 | Python | false | false | 5,615 | py | #!/usr/bin/env python
#
# addroihistogram.py - The AddROIHistogramAction class.
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
"""This module provides the :class:`AddROIHistogramAction` class, an
action used by the :class:`.HistogramPanel`.
"""
import wx
import numpy as np
import fsl.data.image as fslimage
import fsleyes.views.histogrampanel as histogrampanel
import fsleyes.strings as strings
import fsleyes.plotting.dataseries as dataseries
import fsleyes.plotting.histogramseries as histogramseries
import fsleyes.plugins.tools.addmaskdataseries as addmaskdataseries
import fsleyes.actions.base as base
class AddROIHistogramAction(base.Action):
"""The ``AddROIHistogramAction`` class is used by the
:class:`.HistogramPanel`.
It performs a very similar task to the :class:`.AddMaskDataSeriesAction` -
the user selects a binary mask, the data within the base image is extracted
for that mask, and the histogram of that data is added to the plot.
"""
@staticmethod
def supportedViews():
"""The ``AddROIHistogramAction`` is restricted for use with the
:class:`.HistogramPanel`.
"""
return [histogrampanel.HistogramPanel]
def __init__(self, overlayList, displayCtx, plotPanel):
"""Create an ``AddROIHistogramAction``.
:arg overlayList: The :class:`.OverlayList`.
:arg displayCtx: The :class:`.DisplayContext`.
:arg plotPanel: The :class:`.HistogramPanel`.
"""
base.Action.__init__(
self, overlayList, displayCtx, self.__addROIHistogram)
self.__plotPanel = plotPanel
self.__roiOptions = []
overlayList.addListener('overlays',
self.name,
self.__overlayListChanged)
displayCtx .addListener('selectedOverlay',
self.name,
self.__overlayListChanged)
self.__overlayListChanged()
def destroy(self):
"""Must be called when this ``AddROIHistogramAction`` is no
longer in use.
"""
if self.destroyed:
return
self.overlayList.removeListener('overlays', self.name)
self.displayCtx .removeListener('selectedOverlay', self.name)
self.__plotPanel = None
self.__roiOptions = None
base.Action.destroy(self)
def __overlayListChanged(self, *a):
"""Called when the :class:`.OverlayList` or the
:attr:`.DisplayContext.selectedOverlay` changes. Updates a list of
valid mask images for the currently selected overlay.
"""
overlay = self.displayCtx.getSelectedOverlay()
if (len(self.overlayList) == 0 or
(not isinstance(overlay, fslimage.Image))):
self.enabled = False
return
self.__roiOptions = [o for o in self.overlayList if
isinstance(o, fslimage.Image) and
o is not overlay and
o.sameSpace(overlay)]
self.enabled = len(self.__roiOptions) > 0
def __addROIHistogram(self):
"""Prompts the user to select an ROI mask, calculates the histogram
of that mask on the currently selected overlay, and adds the result
to the ``HistogramPanel``.
"""
overlay = self.displayCtx.getSelectedOverlay()
opts = self.displayCtx.getOpts(overlay)
roiOptions = self.__roiOptions
frame = wx.GetApp().GetTopWindow()
msg = strings.messages[self, 'selectMask'].format(overlay.name)
title = strings.titles[ self, 'selectMask'].format(overlay.name)
dlg = addmaskdataseries.MaskDialog(
frame,
[o.name for o in roiOptions],
title=title,
message=msg,
checkbox=False)
if dlg.ShowModal() != wx.ID_OK:
return
maskimg = roiOptions[dlg.GetChoice()]
mask = maskimg[:] > 0
if overlay.ndim > 3: data = overlay[opts.index()][mask]
else: data = overlay[mask]
count = self.__plotPanel.histType == 'count'
drange = (np.nanmin(data), np.nanmax(data))
nbins = histogramseries.autoBin(data, drange)
xdata, ydata, _ = histogramseries.histogram(data,
nbins,
drange,
drange,
includeOutliers=False,
count=count)
ds = dataseries.DataSeries(overlay,
self.overlayList,
self.displayCtx,
self.__plotPanel)
ds.colour = self.__plotPanel.getOverlayPlotColour(overlay)
ds.lineStyle = self.__plotPanel.getOverlayPlotStyle(overlay)
ds.lineWidth = 2
ds.alpha = 1
ds.label = '{} [mask: {}]'.format(overlay.name, maskimg.name)
# We have to run the data through
# prepareDataSeries to preprocess
# (e.g. smooth) it
ds.setData(xdata, ydata)
ds.setData(*self.__plotPanel.prepareDataSeries(ds))
self.__plotPanel.canvas.dataSeries.append(ds)
| [
"pauldmccarthy@gmail.com"
] | pauldmccarthy@gmail.com |
ff1b4b20c92851e1bdcfd180c5b9f4b46b22dbdb | 0ea15da8de03fa9ad7acce50c03824ced868a4e7 | /awesome_itech_project/awesome_itech_project/wsgi.py | e4fd12ce9b44444d43b7c3da64df723fa319c0de | [] | no_license | Zhouhao12345/AwesomeITECH | 6b23d2c40c2c74be237eba2396b83188b7750e13 | 57f1f9bc304f407edbcb9f64ff289037d9aff7f2 | refs/heads/master | 2021-01-10T14:43:04.994235 | 2016-03-25T21:15:09 | 2016-03-25T21:15:09 | 52,460,370 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | """
WSGI config for awesome_itech_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awesome_itech_project.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
b7d41e8d6776f9362fcd567214b789af65441908 | 1719920a92f7194766624474b98d59ef8d6eddaf | /models/mobile_app_content_file.py | 51e891805d2376f4dff51e5a5f0d891a13e2d963 | [
"MIT"
] | permissive | MIchaelMainer/msgraph-v10-models-python | cfa5e3a65ba675383975a99779763211ed9fa0a9 | adad66363ebe151be2332f3ef74a664584385748 | refs/heads/master | 2020-03-19T12:51:06.370673 | 2018-06-08T00:16:12 | 2018-06-08T00:16:12 | 136,544,573 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,626 | py | # -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..model.mobile_app_content_file_upload_state import MobileAppContentFileUploadState
from datetime import datetime
from ..one_drive_object_base import OneDriveObjectBase
class MobileAppContentFile(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def azure_storage_uri(self):
"""
Gets and sets the azureStorageUri
Returns:
str:
The azureStorageUri
"""
if "azureStorageUri" in self._prop_dict:
return self._prop_dict["azureStorageUri"]
else:
return None
@azure_storage_uri.setter
def azure_storage_uri(self, val):
self._prop_dict["azureStorageUri"] = val
@property
def is_committed(self):
"""
Gets and sets the isCommitted
Returns:
bool:
The isCommitted
"""
if "isCommitted" in self._prop_dict:
return self._prop_dict["isCommitted"]
else:
return None
@is_committed.setter
def is_committed(self, val):
self._prop_dict["isCommitted"] = val
@property
def created_date_time(self):
"""
Gets and sets the createdDateTime
Returns:
datetime:
The createdDateTime
"""
if "createdDateTime" in self._prop_dict:
return datetime.strptime(self._prop_dict["createdDateTime"].replace("Z", ""), "%Y-%m-%dT%H:%M:%S.%f")
else:
return None
@created_date_time.setter
def created_date_time(self, val):
self._prop_dict["createdDateTime"] = val.isoformat()+"Z"
@property
def name(self):
"""
Gets and sets the name
Returns:
str:
The name
"""
if "name" in self._prop_dict:
return self._prop_dict["name"]
else:
return None
@name.setter
def name(self, val):
self._prop_dict["name"] = val
@property
def size(self):
"""
Gets and sets the size
Returns:
int:
The size
"""
if "size" in self._prop_dict:
return self._prop_dict["size"]
else:
return None
@size.setter
def size(self, val):
self._prop_dict["size"] = val
@property
def size_encrypted(self):
"""
Gets and sets the sizeEncrypted
Returns:
int:
The sizeEncrypted
"""
if "sizeEncrypted" in self._prop_dict:
return self._prop_dict["sizeEncrypted"]
else:
return None
@size_encrypted.setter
def size_encrypted(self, val):
self._prop_dict["sizeEncrypted"] = val
@property
def azure_storage_uri_expiration_date_time(self):
"""
Gets and sets the azureStorageUriExpirationDateTime
Returns:
datetime:
The azureStorageUriExpirationDateTime
"""
if "azureStorageUriExpirationDateTime" in self._prop_dict:
return datetime.strptime(self._prop_dict["azureStorageUriExpirationDateTime"].replace("Z", ""), "%Y-%m-%dT%H:%M:%S.%f")
else:
return None
@azure_storage_uri_expiration_date_time.setter
def azure_storage_uri_expiration_date_time(self, val):
self._prop_dict["azureStorageUriExpirationDateTime"] = val.isoformat()+"Z"
@property
def upload_state(self):
"""
Gets and sets the uploadState
Returns:
:class:`MobileAppContentFileUploadState<onedrivesdk.model.mobile_app_content_file_upload_state.MobileAppContentFileUploadState>`:
The uploadState
"""
if "uploadState" in self._prop_dict:
if isinstance(self._prop_dict["uploadState"], OneDriveObjectBase):
return self._prop_dict["uploadState"]
else :
self._prop_dict["uploadState"] = MobileAppContentFileUploadState(self._prop_dict["uploadState"])
return self._prop_dict["uploadState"]
return None
@upload_state.setter
def upload_state(self, val):
self._prop_dict["uploadState"] = val
| [
"mmainer@microsoft.com"
] | mmainer@microsoft.com |
ff268df9cf570e57179bf90ed24e13ec67a171d6 | 98ac0b139301285ece1a4bc9f13b75433d263419 | /torchreid/models/mlfn.py | ec712d66f781745c4f0c329560cfd572515eda3e | [
"MIT"
] | permissive | sovrasov/deep-person-reid | eb0e6b2f0bb3fa6dc22205fd443fd583f9951cd2 | 79773b88986c26e9ac2407af5999923426298a8f | refs/heads/master | 2021-09-13T04:31:15.620950 | 2019-11-14T08:06:34 | 2019-11-14T08:06:34 | 202,522,569 | 1 | 1 | MIT | 2019-11-14T08:06:36 | 2019-08-15T10:35:17 | Python | UTF-8 | Python | false | false | 7,569 | py | from __future__ import absolute_import
from __future__ import division
__all__ = ['mlfn']
import torch
from torch import nn
from torch.nn import functional as F
import torch.utils.model_zoo as model_zoo
model_urls = {
# training epoch = 5, top1 = 51.6
'imagenet': 'https://mega.nz/#!YHxAhaxC!yu9E6zWl0x5zscSouTdbZu8gdFFytDdl-RAdD2DEfpk',
}
class MLFNBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride, fsm_channels, groups=32):
super(MLFNBlock, self).__init__()
self.groups = groups
mid_channels = out_channels // 2
# Factor Modules
self.fm_conv1 = nn.Conv2d(in_channels, mid_channels, 1, bias=False)
self.fm_bn1 = nn.BatchNorm2d(mid_channels)
self.fm_conv2 = nn.Conv2d(mid_channels, mid_channels, 3, stride=stride, padding=1, bias=False, groups=self.groups)
self.fm_bn2 = nn.BatchNorm2d(mid_channels)
self.fm_conv3 = nn.Conv2d(mid_channels, out_channels, 1, bias=False)
self.fm_bn3 = nn.BatchNorm2d(out_channels)
# Factor Selection Module
self.fsm = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, fsm_channels[0], 1),
nn.BatchNorm2d(fsm_channels[0]),
nn.ReLU(inplace=True),
nn.Conv2d(fsm_channels[0], fsm_channels[1], 1),
nn.BatchNorm2d(fsm_channels[1]),
nn.ReLU(inplace=True),
nn.Conv2d(fsm_channels[1], self.groups, 1),
nn.BatchNorm2d(self.groups),
nn.Sigmoid(),
)
self.downsample = None
if in_channels != out_channels or stride > 1:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels),
)
def forward(self, x):
residual = x
s = self.fsm(x)
# reduce dimension
x = self.fm_conv1(x)
x = self.fm_bn1(x)
x = F.relu(x, inplace=True)
# group convolution
x = self.fm_conv2(x)
x = self.fm_bn2(x)
x = F.relu(x, inplace=True)
# factor selection
b, c = x.size(0), x.size(1)
n = c // self.groups
ss = s.repeat(1, n, 1, 1) # from (b, g, 1, 1) to (b, g*n=c, 1, 1)
ss = ss.view(b, n, self.groups, 1, 1)
ss = ss.permute(0, 2, 1, 3, 4).contiguous()
ss = ss.view(b, c, 1, 1)
x = ss * x
# recover dimension
x = self.fm_conv3(x)
x = self.fm_bn3(x)
x = F.relu(x, inplace=True)
if self.downsample is not None:
residual = self.downsample(residual)
return F.relu(residual + x, inplace=True), s
class MLFN(nn.Module):
"""Multi-Level Factorisation Net.
Reference:
Chang et al. Multi-Level Factorisation Net for
Person Re-Identification. CVPR 2018.
Public keys:
- ``mlfn``: MLFN (Multi-Level Factorisation Net).
"""
def __init__(self, num_classes, loss='softmax', groups=32, channels=[64, 256, 512, 1024, 2048], embed_dim=1024, **kwargs):
super(MLFN, self).__init__()
self.loss = loss
self.groups = groups
# first convolutional layer
self.conv1 = nn.Conv2d(3, channels[0], 7, stride=2, padding=3)
self.bn1 = nn.BatchNorm2d(channels[0])
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
# main body
self.feature = nn.ModuleList([
# layer 1-3
MLFNBlock(channels[0], channels[1], 1, [128, 64], self.groups),
MLFNBlock(channels[1], channels[1], 1, [128, 64], self.groups),
MLFNBlock(channels[1], channels[1], 1, [128, 64], self.groups),
# layer 4-7
MLFNBlock(channels[1], channels[2], 2, [256, 128], self.groups),
MLFNBlock(channels[2], channels[2], 1, [256, 128], self.groups),
MLFNBlock(channels[2], channels[2], 1, [256, 128], self.groups),
MLFNBlock(channels[2], channels[2], 1, [256, 128], self.groups),
# layer 8-13
MLFNBlock(channels[2], channels[3], 2, [512, 128], self.groups),
MLFNBlock(channels[3], channels[3], 1, [512, 128], self.groups),
MLFNBlock(channels[3], channels[3], 1, [512, 128], self.groups),
MLFNBlock(channels[3], channels[3], 1, [512, 128], self.groups),
MLFNBlock(channels[3], channels[3], 1, [512, 128], self.groups),
MLFNBlock(channels[3], channels[3], 1, [512, 128], self.groups),
# layer 14-16
MLFNBlock(channels[3], channels[4], 2, [512, 128], self.groups),
MLFNBlock(channels[4], channels[4], 1, [512, 128], self.groups),
MLFNBlock(channels[4], channels[4], 1, [512, 128], self.groups),
])
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
# projection functions
self.fc_x = nn.Sequential(
nn.Conv2d(channels[4], embed_dim, 1, bias=False),
nn.BatchNorm2d(embed_dim),
nn.ReLU(inplace=True),
)
self.fc_s = nn.Sequential(
nn.Conv2d(self.groups * 16, embed_dim, 1, bias=False),
nn.BatchNorm2d(embed_dim),
nn.ReLU(inplace=True),
)
self.classifier = nn.Linear(embed_dim, num_classes)
self.init_params()
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x, inplace=True)
x = self.maxpool(x)
s_hat = []
for block in self.feature:
x, s = block(x)
s_hat.append(s)
s_hat = torch.cat(s_hat, 1)
x = self.global_avgpool(x)
x = self.fc_x(x)
s_hat = self.fc_s(s_hat)
v = (x + s_hat) * 0.5
v = v.view(v.size(0), -1)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError('Unsupported loss: {}'.format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
def mlfn(num_classes, loss='softmax', pretrained=True, **kwargs):
model = MLFN(num_classes, loss, **kwargs)
if pretrained:
#init_pretrained_weights(model, model_urls['imagenet'])
import warnings
warnings.warn('The imagenet pretrained weights need to be manually downloaded from {}'.format(model_urls['imagenet']))
return model | [
"k.zhou@qmul.ac.uk"
] | k.zhou@qmul.ac.uk |
4fab87f0958d35670c3b721e447b2960328e7a17 | 4b7e282fe480415f5d52c0fc0429f144156190fe | /google/ads/googleads/v8/resources/types/change_event.py | 078c3f918e59efb2c2a1f5be9e63f15de9c21c82 | [
"Apache-2.0"
] | permissive | Z2Xsoft/google-ads-python | c4750357bb19da91bb3b6bf2fa84bef9d2df36d3 | 1779d52a0446c8afb2437b0a9e103dcb849f5590 | refs/heads/main | 2023-08-18T15:22:17.840364 | 2021-09-26T04:08:53 | 2021-09-26T04:08:53 | 410,444,398 | 0 | 0 | Apache-2.0 | 2021-09-26T04:08:53 | 2021-09-26T03:55:38 | null | UTF-8 | Python | false | false | 11,419 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.enums.types import change_client_type
from google.ads.googleads.v8.enums.types import change_event_resource_type
from google.ads.googleads.v8.enums.types import (
resource_change_operation as gage_resource_change_operation,
)
from google.ads.googleads.v8.resources.types import ad as gagr_ad
from google.ads.googleads.v8.resources.types import ad_group as gagr_ad_group
from google.ads.googleads.v8.resources.types import (
ad_group_ad as gagr_ad_group_ad,
)
from google.ads.googleads.v8.resources.types import (
ad_group_asset as gagr_ad_group_asset,
)
from google.ads.googleads.v8.resources.types import (
ad_group_bid_modifier as gagr_ad_group_bid_modifier,
)
from google.ads.googleads.v8.resources.types import (
ad_group_criterion as gagr_ad_group_criterion,
)
from google.ads.googleads.v8.resources.types import (
ad_group_feed as gagr_ad_group_feed,
)
from google.ads.googleads.v8.resources.types import asset as gagr_asset
from google.ads.googleads.v8.resources.types import campaign as gagr_campaign
from google.ads.googleads.v8.resources.types import (
campaign_asset as gagr_campaign_asset,
)
from google.ads.googleads.v8.resources.types import (
campaign_budget as gagr_campaign_budget,
)
from google.ads.googleads.v8.resources.types import (
campaign_criterion as gagr_campaign_criterion,
)
from google.ads.googleads.v8.resources.types import (
campaign_feed as gagr_campaign_feed,
)
from google.ads.googleads.v8.resources.types import (
customer_asset as gagr_customer_asset,
)
from google.ads.googleads.v8.resources.types import feed as gagr_feed
from google.ads.googleads.v8.resources.types import feed_item as gagr_feed_item
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.resources",
marshal="google.ads.googleads.v8",
manifest={"ChangeEvent",},
)
class ChangeEvent(proto.Message):
r"""Describes the granular change of returned resource of certain
resource types. Changes made through UI, API and new versions of
Editor by external users (including external users, and internal
users that can be shown externally) in the past 30 days will be
shown. The change shows the old values of the changed fields
before the change and the new values right after the change.
ChangeEvent could have up to 3 minutes delay to reflect a new
change.
Attributes:
resource_name (str):
Output only. The resource name of the change event. Change
event resource names have the form:
``customers/{customer_id}/changeEvents/{timestamp_micros}~{command_index}~{mutate_index}``
change_date_time (str):
Output only. Time at which the change was
committed on this resource.
change_resource_type (google.ads.googleads.v8.enums.types.ChangeEventResourceTypeEnum.ChangeEventResourceType):
Output only. The type of the changed resource. This dictates
what resource will be set in old_resource and new_resource.
change_resource_name (str):
Output only. The Simply resource this change
occurred on.
client_type (google.ads.googleads.v8.enums.types.ChangeClientTypeEnum.ChangeClientType):
Output only. Where the change was made
through.
user_email (str):
Output only. The email of the user who made
this change.
old_resource (google.ads.googleads.v8.resources.types.ChangeEvent.ChangedResource):
Output only. The old resource before the
change. Only changed fields will be populated.
new_resource (google.ads.googleads.v8.resources.types.ChangeEvent.ChangedResource):
Output only. The new resource after the
change. Only changed fields will be populated.
resource_change_operation (google.ads.googleads.v8.enums.types.ResourceChangeOperationEnum.ResourceChangeOperation):
Output only. The operation on the changed
resource.
changed_fields (google.protobuf.field_mask_pb2.FieldMask):
Output only. A list of fields that are
changed in the returned resource.
campaign (str):
Output only. The Campaign affected by this
change.
ad_group (str):
Output only. The AdGroup affected by this
change.
feed (str):
Output only. The Feed affected by this
change.
feed_item (str):
Output only. The FeedItem affected by this
change.
asset (str):
Output only. The Asset affected by this
change.
"""
class ChangedResource(proto.Message):
r"""A wrapper proto presenting all supported resources. Only the
resource of the change_resource_type will be set.
Attributes:
ad (google.ads.googleads.v8.resources.types.Ad):
Output only. Set if change_resource_type == AD.
ad_group (google.ads.googleads.v8.resources.types.AdGroup):
Output only. Set if change_resource_type == AD_GROUP.
ad_group_criterion (google.ads.googleads.v8.resources.types.AdGroupCriterion):
Output only. Set if change_resource_type ==
AD_GROUP_CRITERION.
campaign (google.ads.googleads.v8.resources.types.Campaign):
Output only. Set if change_resource_type == CAMPAIGN.
campaign_budget (google.ads.googleads.v8.resources.types.CampaignBudget):
Output only. Set if change_resource_type == CAMPAIGN_BUDGET.
ad_group_bid_modifier (google.ads.googleads.v8.resources.types.AdGroupBidModifier):
Output only. Set if change_resource_type ==
AD_GROUP_BID_MODIFIER.
campaign_criterion (google.ads.googleads.v8.resources.types.CampaignCriterion):
Output only. Set if change_resource_type ==
CAMPAIGN_CRITERION.
feed (google.ads.googleads.v8.resources.types.Feed):
Output only. Set if change_resource_type == FEED.
feed_item (google.ads.googleads.v8.resources.types.FeedItem):
Output only. Set if change_resource_type == FEED_ITEM.
campaign_feed (google.ads.googleads.v8.resources.types.CampaignFeed):
Output only. Set if change_resource_type == CAMPAIGN_FEED.
ad_group_feed (google.ads.googleads.v8.resources.types.AdGroupFeed):
Output only. Set if change_resource_type == AD_GROUP_FEED.
ad_group_ad (google.ads.googleads.v8.resources.types.AdGroupAd):
Output only. Set if change_resource_type == AD_GROUP_AD.
asset (google.ads.googleads.v8.resources.types.Asset):
Output only. Set if change_resource_type == ASSET.
customer_asset (google.ads.googleads.v8.resources.types.CustomerAsset):
Output only. Set if change_resource_type == CUSTOMER_ASSET.
campaign_asset (google.ads.googleads.v8.resources.types.CampaignAsset):
Output only. Set if change_resource_type == CAMPAIGN_ASSET.
ad_group_asset (google.ads.googleads.v8.resources.types.AdGroupAsset):
Output only. Set if change_resource_type == AD_GROUP_ASSET.
"""
ad = proto.Field(proto.MESSAGE, number=1, message=gagr_ad.Ad,)
ad_group = proto.Field(
proto.MESSAGE, number=2, message=gagr_ad_group.AdGroup,
)
ad_group_criterion = proto.Field(
proto.MESSAGE,
number=3,
message=gagr_ad_group_criterion.AdGroupCriterion,
)
campaign = proto.Field(
proto.MESSAGE, number=4, message=gagr_campaign.Campaign,
)
campaign_budget = proto.Field(
proto.MESSAGE,
number=5,
message=gagr_campaign_budget.CampaignBudget,
)
ad_group_bid_modifier = proto.Field(
proto.MESSAGE,
number=6,
message=gagr_ad_group_bid_modifier.AdGroupBidModifier,
)
campaign_criterion = proto.Field(
proto.MESSAGE,
number=7,
message=gagr_campaign_criterion.CampaignCriterion,
)
feed = proto.Field(proto.MESSAGE, number=8, message=gagr_feed.Feed,)
feed_item = proto.Field(
proto.MESSAGE, number=9, message=gagr_feed_item.FeedItem,
)
campaign_feed = proto.Field(
proto.MESSAGE, number=10, message=gagr_campaign_feed.CampaignFeed,
)
ad_group_feed = proto.Field(
proto.MESSAGE, number=11, message=gagr_ad_group_feed.AdGroupFeed,
)
ad_group_ad = proto.Field(
proto.MESSAGE, number=12, message=gagr_ad_group_ad.AdGroupAd,
)
asset = proto.Field(proto.MESSAGE, number=13, message=gagr_asset.Asset,)
customer_asset = proto.Field(
proto.MESSAGE, number=14, message=gagr_customer_asset.CustomerAsset,
)
campaign_asset = proto.Field(
proto.MESSAGE, number=15, message=gagr_campaign_asset.CampaignAsset,
)
ad_group_asset = proto.Field(
proto.MESSAGE, number=16, message=gagr_ad_group_asset.AdGroupAsset,
)
resource_name = proto.Field(proto.STRING, number=1,)
change_date_time = proto.Field(proto.STRING, number=2,)
change_resource_type = proto.Field(
proto.ENUM,
number=3,
enum=change_event_resource_type.ChangeEventResourceTypeEnum.ChangeEventResourceType,
)
change_resource_name = proto.Field(proto.STRING, number=4,)
client_type = proto.Field(
proto.ENUM,
number=5,
enum=change_client_type.ChangeClientTypeEnum.ChangeClientType,
)
user_email = proto.Field(proto.STRING, number=6,)
old_resource = proto.Field(
proto.MESSAGE, number=7, message=ChangedResource,
)
new_resource = proto.Field(
proto.MESSAGE, number=8, message=ChangedResource,
)
resource_change_operation = proto.Field(
proto.ENUM,
number=9,
enum=gage_resource_change_operation.ResourceChangeOperationEnum.ResourceChangeOperation,
)
changed_fields = proto.Field(
proto.MESSAGE, number=10, message=field_mask_pb2.FieldMask,
)
campaign = proto.Field(proto.STRING, number=11,)
ad_group = proto.Field(proto.STRING, number=12,)
feed = proto.Field(proto.STRING, number=13,)
feed_item = proto.Field(proto.STRING, number=14,)
asset = proto.Field(proto.STRING, number=20,)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | Z2Xsoft.noreply@github.com |
ca0e2147614ffa23c1d12256ceb2c465f8ef9ee1 | 8cefaf15f2b70bc3457047351151f85dbffc191e | /tools.py | 2a0c12b25219e29de93b58540ba2a51db4d1e70a | [] | no_license | tjacek/ortho_selection | 183fa86088d94f343b191538493035ad31c4d1d7 | ee923bfda63262ce62033f8e633f1ac37804ce21 | refs/heads/master | 2023-01-21T09:18:07.671478 | 2020-12-02T21:53:22 | 2020-12-02T21:53:22 | 265,209,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,182 | py | import itertools
import feats
def filtered_dict(names,dic):
return { name_i:dic[name_i] for name_i in names}
def split(names,selector=None):
if(type(names)==dict):
train,test=split(names.keys(),selector)
return filtered_dict(train,names),filtered_dict(test,names)
if(not selector):
selector=get_person
train,test=[],[]
for name_i in names:
if(selector(name_i)):
train.append(name_i)
else:
test.append(name_i)
return train,test
def get_person(name_i):
return (int(name_i.split('_')[1])%2)==1
def person_cats(y):
return ["%s_%d" %(y_i.split("_")[1],i)
for i,y_i in enumerate(y)]
def read_datasets(in_path):
if(type(in_path)==tuple):
common_path,deep_path=in_path
if(type(common_path)==list):
return multi_dataset(common_path,deep_path)
return combined_dataset(common_path,deep_path)
return feats.read(in_path)
def combined_dataset(common_path,deep_path,sub_datasets=False):
if(not common_path):
return feats.read(deep_path)
if(not deep_path):
return feats.read(common_path)
common_data=feats.read(common_path)[0]
deep_data=feats.read(deep_path)
datasets=[common_data+ data_i
for data_i in deep_data]
if(sub_datasets):
return datasets,common_data,deep_data
return datasets
def multi_dataset(common_path,deep_path):
datasets=[combined_dataset(common_i,deep_path)
for common_i in common_path]
return itertools.chain.from_iterable(datasets)
def concat_dataset(in_path):
if(type(in_path)==tuple):
common_path,deep_path=in_path
# raise Exception(type(common_path))
if(type(common_path)==list):
common_data=feats.read_unified(common_path)
else:
common_data=feats.read(common_path)
# return multi_dataset(common_path,deep_path)
# return combined_dataset(common_path,deep_path)
deep_data=feats.read(deep_path)
datasets=[common_data+ data_i
for data_i in deep_data]
return datasets
return feats.read(in_path) | [
"tjacek@agh.edu.pl"
] | tjacek@agh.edu.pl |
892a16eacf42bc8b91b6898224179f9044e23c40 | 4dc5944ffad7f251b467200ba5101000561c2bdf | /iniciante/2879.py | 0dab20c1683c9fc7cb5621f11332df7ce287b7bd | [] | no_license | DyogoBendo/URI-Python | 3215337229fdb5ef1a446231925c72da4d1ea64b | 40376e0fbb2e7dd97ba316a20863826b4753c601 | refs/heads/master | 2023-06-03T08:36:06.745086 | 2021-06-16T02:00:11 | 2021-06-16T02:00:11 | 314,667,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | if __name__ == "__main__":
n = int(input())
w = 0
for i in range(n):
p = int(input())
if p != 1:
w += 1
print(w) | [
"dyogoromagnabendo@gmail.com"
] | dyogoromagnabendo@gmail.com |
89049661b471aab41331bcd78d322b1768555aa0 | 44e14881c8e248c347dd81f0574c4f306c684d64 | /mysite/settings.py | c800c4c0e67641a4ccd20b759ee29dff8311daf4 | [] | no_license | eduarde/OrderHelperApp | f38af40fc492c6300c3ac70ba2e740789b1d7261 | 83938a7f50519f45dcbb96d6dbf1ea49559e28fa | refs/heads/master | 2021-01-10T12:30:47.142918 | 2016-03-17T20:05:38 | 2016-03-17T20:05:38 | 52,556,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,709 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import django.contrib.auth
django.contrib.auth.LOGIN_URL = '/'
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yan%@fw$+%fg*coibl7gnyog30wj$l5-uumhzl%8tt8r!lfxwd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['orderhelper.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'orderhelper',
'widget_tweaks',
'datetimewidget',
'pure_pagination',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Bucharest'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGIN_URL ='/'
LOGIN_REDIRECT_URL = '/pending'
PAGINATION_SETTINGS = {
'PAGE_RANGE_DISPLAYED': 5,
'MARGIN_PAGES_DISPLAYED': 2,
'SHOW_FIRST_PAGE_WHEN_INVALID': True,
}
# handler404 = 'orderhelper.views.handler404'
| [
"eduard.erja@gmail.com"
] | eduard.erja@gmail.com |
2ea70bfaa953a1c8cc5b12b1a97ff742bf1dc11c | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /tests/components/picnic/test_services.py | bc80ff73a11f0e084c64a9bd9984de5caa683527 | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 6,777 | py | """Tests for the Picnic services."""
from unittest.mock import MagicMock, patch
import pytest
from homeassistant.components.picnic import CONF_COUNTRY_CODE, DOMAIN
from homeassistant.components.picnic.const import SERVICE_ADD_PRODUCT_TO_CART
from homeassistant.components.picnic.services import PicnicServiceException
from homeassistant.const import CONF_ACCESS_TOKEN
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
UNIQUE_ID = "295-6y3-1nf4"
def create_picnic_api_client(unique_id):
"""Create PicnicAPI mock with set response data."""
auth_token = "af3wh738j3fa28l9fa23lhiufahu7l"
auth_data = {
"user_id": unique_id,
"address": {
"street": "Teststreet",
"house_number": 123,
"house_number_ext": "b",
},
}
picnic_mock = MagicMock()
picnic_mock.session.auth_token = auth_token
picnic_mock.get_user.return_value = auth_data
return picnic_mock
async def create_picnic_config_entry(hass: HomeAssistant, unique_id):
"""Create a Picnic config entry."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_ACCESS_TOKEN: "x-original-picnic-auth-token",
CONF_COUNTRY_CODE: "NL",
},
unique_id=unique_id,
)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
return config_entry
@pytest.fixture
def picnic_api_client():
"""Return the default picnic api client."""
with patch(
"homeassistant.components.picnic.create_picnic_client"
) as create_picnic_client_mock:
picnic_client_mock = create_picnic_api_client(UNIQUE_ID)
create_picnic_client_mock.return_value = picnic_client_mock
yield picnic_client_mock
@pytest.fixture
async def picnic_config_entry(hass: HomeAssistant):
"""Generate the default Picnic config entry."""
return await create_picnic_config_entry(hass, UNIQUE_ID)
async def test_add_product_using_id(
hass: HomeAssistant,
picnic_api_client: MagicMock,
picnic_config_entry: MockConfigEntry,
) -> None:
"""Test adding a product by id."""
await hass.services.async_call(
DOMAIN,
SERVICE_ADD_PRODUCT_TO_CART,
{
"config_entry_id": picnic_config_entry.entry_id,
"product_id": "5109348572",
"amount": 3,
},
blocking=True,
)
# Check that the right method is called on the api
picnic_api_client.add_product.assert_called_with("5109348572", 3)
async def test_add_product_using_name(
hass: HomeAssistant,
picnic_api_client: MagicMock,
picnic_config_entry: MockConfigEntry,
) -> None:
"""Test adding a product by name."""
# Set the return value of the search api endpoint
picnic_api_client.search.return_value = [
{
"items": [
{
"id": "2525404",
"name": "Best tea",
"display_price": 321,
"unit_quantity": "big bags",
},
{
"id": "2525500",
"name": "Cheap tea",
"display_price": 100,
"unit_quantity": "small bags",
},
]
}
]
await hass.services.async_call(
DOMAIN,
SERVICE_ADD_PRODUCT_TO_CART,
{"config_entry_id": picnic_config_entry.entry_id, "product_name": "Tea"},
blocking=True,
)
# Check that the right method is called on the api
picnic_api_client.add_product.assert_called_with("2525404", 1)
async def test_add_product_using_name_no_results(
hass: HomeAssistant,
picnic_api_client: MagicMock,
picnic_config_entry: MockConfigEntry,
) -> None:
"""Test adding a product by name that can't be found."""
# Set the search return value and check that the right exception is raised during the service call
picnic_api_client.search.return_value = []
with pytest.raises(PicnicServiceException):
await hass.services.async_call(
DOMAIN,
SERVICE_ADD_PRODUCT_TO_CART,
{
"config_entry_id": picnic_config_entry.entry_id,
"product_name": "Random non existing product",
},
blocking=True,
)
async def test_add_product_using_name_no_named_results(
hass: HomeAssistant,
picnic_api_client: MagicMock,
picnic_config_entry: MockConfigEntry,
) -> None:
"""Test adding a product by name for which no named results are returned."""
# Set the search return value and check that the right exception is raised during the service call
picnic_api_client.search.return_value = [{"items": [{"attr": "test"}]}]
with pytest.raises(PicnicServiceException):
await hass.services.async_call(
DOMAIN,
SERVICE_ADD_PRODUCT_TO_CART,
{
"config_entry_id": picnic_config_entry.entry_id,
"product_name": "Random product",
},
blocking=True,
)
async def test_add_product_multiple_config_entries(
hass: HomeAssistant,
picnic_api_client: MagicMock,
picnic_config_entry: MockConfigEntry,
) -> None:
"""Test adding a product for a specific Picnic service while multiple are configured."""
with patch(
"homeassistant.components.picnic.create_picnic_client"
) as create_picnic_client_mock:
picnic_api_client_2 = create_picnic_api_client("3fj9-9gju-236")
create_picnic_client_mock.return_value = picnic_api_client_2
picnic_config_entry_2 = await create_picnic_config_entry(hass, "3fj9-9gju-236")
await hass.services.async_call(
DOMAIN,
SERVICE_ADD_PRODUCT_TO_CART,
{"product_id": "5109348572", "config_entry_id": picnic_config_entry_2.entry_id},
blocking=True,
)
# Check that the right method is called on the api
picnic_api_client.add_product.assert_not_called()
picnic_api_client_2.add_product.assert_called_with("5109348572", 1)
async def test_add_product_device_doesnt_exist(
hass: HomeAssistant,
picnic_api_client: MagicMock,
picnic_config_entry: MockConfigEntry,
) -> None:
"""Test adding a product for a specific Picnic service, which doesn't exist."""
with pytest.raises(ValueError):
await hass.services.async_call(
DOMAIN,
SERVICE_ADD_PRODUCT_TO_CART,
{"product_id": "5109348572", "config_entry_id": 12345},
blocking=True,
)
# Check that the right method is called on the api
picnic_api_client.add_product.assert_not_called()
| [
"noreply@github.com"
] | home-assistant.noreply@github.com |
dfe1ced20ccf2b601d73682bc0dc97808d4d7108 | 4e82bbef275a42ea7c9d58cab546de938fd82064 | /pywad/browser/firefox.py | 3f6db16f9a6599f59f12f7e7f4568f41eea8103a | [] | no_license | TakesxiSximada/pywad | 9a1f115f92f362ebaf5ee4767dd6b468ba9a1f51 | 8ad88595a8f00f4232c24167b5517db8e7c4993e | refs/heads/master | 2016-09-02T04:39:17.736770 | 2015-03-16T09:37:45 | 2015-03-16T09:37:45 | 27,847,444 | 2 | 0 | null | 2015-03-16T09:34:31 | 2014-12-11T00:57:55 | Python | UTF-8 | Python | false | false | 2,066 | py | # -*- coding: utf-8 -*-
from selenium.webdriver import Firefox, Proxy, FirefoxProfile
class FirefoxFactory(object):
"""The borwser factory class of Firefox.
"""
default_profile = {
'security.warn_entering_secure': False,
'security.warn_entering_secure.show_once': True,
'security.warn_entering_weak': False,
'security.warn_entering_weak._show_once': True,
'security.warn_leaving_secure': False,
'security.warn_leaving_secure.show_once': True,
'security.warn_leaving_weak': False,
'security.warn_leaving_weak._show_once': True,
'security.warn_submit_insecure': False,
'security.warn_viewing_mixed': False,
'security.warn_viewing_mixed.show_once': True,
}
def __init__(self, proxy=None, implicitly_wait=10, clear_cookies=False):
"""Constructor.
"""
self.implicitly_wait = implicitly_wait
self.clear_cookies = clear_cookies
self.proxy = proxy
def _create_proxy_setting(self):
"""Create proxy object.
"""
proxy = Proxy()
if self.proxy:
proxy.ftp_proxy = proxy.ssl_proxy = proxy.http_proxy = self.proxy
return proxy
def _create_profile(self):
"""Create profile object.
"""
profile = FirefoxProfile()
for name, value in self.default_profile.items():
profile.set_preference(name, value)
return profile
def _create_browser_instance(self):
"""Start browser.
"""
profile = self._create_profile()
proxy = self._create_proxy_setting()
return Firefox(firefox_profile=profile, proxy=proxy)
def create(self):
"""The browser factory method.
"""
browser = self._create_browser_instance()
browser.implicitly_wait(self.implicitly_wait)
if self.clear_cookies:
browser.delete_allcookies()
return browser
def __call__(self):
"""Emurate factory function.
"""
return self.create()
| [
"takesxi.sximada@gmail.com"
] | takesxi.sximada@gmail.com |
618d07f2b7895b24d0458ce034dfafb50ecff6dd | 39597cb5c9a04470381b630a070217506e054d3b | /deadfish.py | 018611f4b61da3bb3a2a40c0f60595dc75f65877 | [] | no_license | tahentx/gridshift | bc3aef88ac7736f62d187c486d079543ce0b8d68 | 9cc8fcc06f0784a321faee103f8ccfe3b3aa13d2 | refs/heads/master | 2021-06-28T23:05:20.059776 | 2020-05-17T16:46:46 | 2020-05-17T16:46:46 | 237,519,217 | 0 | 0 | null | 2021-06-10T15:47:36 | 2020-01-31T21:28:49 | Jupyter Notebook | UTF-8 | Python | false | false | 488 | py | def parse(data):
data = list(data)
box = []
value = 5
for char in data:
if char == "i":
value = value + 1
box.append(value)
elif char == "d":
value = value - 1
box.append(value)
elif char == "s":
value = value ** 2
box.append(value)
elif char == "o":
if len(box) == 0:
box.append(0)
return box
return box
parse("ii22ds") | [
"hendricks.ta@gmail.com"
] | hendricks.ta@gmail.com |
f576cc3486e53efcdc3ec4c131c5bba9f36f9abd | bd08d0532f20b7285b437c9bf620de1bbcd5b9ea | /aalh_iit_jdoylewitgencollection/debug-convert-dates.py | bd1f624cc039e31df7b49158095233069740cbae | [
"Unlicense"
] | permissive | johndewees/iitmigration | a9e8a31ba6ceb541ce12c22fd612596cc243dbca | 4dadfbecda719d6e7d60af076a231aedec3c862f | refs/heads/main | 2023-03-14T17:06:58.777683 | 2021-03-27T20:44:58 | 2021-03-27T20:44:58 | 320,086,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,787 | py | from openpyxl import load_workbook
filename = 'aalh_iit_jdoylewitgencollection.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 31
maximumcol = 31
minimumrow = 7
maximumrow = 200
iterationrow = 7
targetcol = 31
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
for cell in row:
print(iterationrow)
testvar = ws.cell(row=iterationrow, column=targetcol).value
print(testvar)
if testvar == None:
print('No Date Digitized')
elif testvar.find('/') != -1:
testvarlist = testvar.split('/')
testvaryear = testvarlist[2]
testvaryear = testvaryear.strip()
testvarmonth = testvarlist[0]
testvarmonth = testvarmonth.strip()
testvarmonth = int(testvarmonth)
if testvarmonth < 10:
testvarmonth = str(testvarmonth)
testvarmonth = '0' + testvarmonth
else:
testvarmonth = str(testvarmonth)
testvarday = testvarlist[1]
testvarday = testvarday.strip()
testvarday = int(testvarday)
if testvarday < 10:
testvarday = str(testvarday)
testvarday = '0' + testvarday
else:
testvarday = str(testvarday)
isodate = testvaryear + '-' + testvarmonth + '-' + testvarday
ws.cell(row=iterationrow, column=targetcol).value = isodate
else:
print('Date is already formatted correctly')
print(ws.cell(row=iterationrow, column=targetcol).value)
iterationrow = iterationrow + 1
wb.save('aalh_iit_jdoylewitgencollection.xlsx') | [
"noreply@github.com"
] | johndewees.noreply@github.com |
05377152968e7bb7a8e5d0c27cb1d6ff1ab8d0c6 | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-policysimulator/google/cloud/policysimulator_v1/services/simulator/client.py | ae3999eec2b3489fc83fb3e8a06a8c1b4416a267 | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 39,404 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import (
Dict,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.policysimulator_v1 import gapic_version as package_version
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.longrunning import operations_pb2
from google.cloud.policysimulator_v1.services.simulator import pagers
from google.cloud.policysimulator_v1.types import simulator
from .transports.base import DEFAULT_CLIENT_INFO, SimulatorTransport
from .transports.grpc import SimulatorGrpcTransport
from .transports.grpc_asyncio import SimulatorGrpcAsyncIOTransport
from .transports.rest import SimulatorRestTransport
class SimulatorClientMeta(type):
"""Metaclass for the Simulator client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[SimulatorTransport]]
_transport_registry["grpc"] = SimulatorGrpcTransport
_transport_registry["grpc_asyncio"] = SimulatorGrpcAsyncIOTransport
_transport_registry["rest"] = SimulatorRestTransport
def get_transport_class(
cls,
label: Optional[str] = None,
) -> Type[SimulatorTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class SimulatorClient(metaclass=SimulatorClientMeta):
"""Policy Simulator API service.
Policy Simulator is a collection of endpoints for creating, running,
and viewing a [Replay][google.cloud.policysimulator.v1.Replay]. A
[Replay][google.cloud.policysimulator.v1.Replay] is a type of
simulation that lets you see how your principals' access to
resources might change if you changed your IAM policy.
During a [Replay][google.cloud.policysimulator.v1.Replay], Policy
Simulator re-evaluates, or replays, past access attempts under both
the current policy and your proposed policy, and compares those
results to determine how your principals' access might change under
the proposed policy.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "policysimulator.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SimulatorClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SimulatorClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> SimulatorTransport:
"""Returns the transport used by the client instance.
Returns:
SimulatorTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def replay_path(
project: str,
location: str,
replay: str,
) -> str:
"""Returns a fully-qualified replay string."""
return "projects/{project}/locations/{location}/replays/{replay}".format(
project=project,
location=location,
replay=replay,
)
@staticmethod
def parse_replay_path(path: str) -> Dict[str, str]:
"""Parses a replay path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/replays/(?P<replay>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def replay_result_path(
project: str,
location: str,
replay: str,
replay_result: str,
) -> str:
"""Returns a fully-qualified replay_result string."""
return "projects/{project}/locations/{location}/replays/{replay}/results/{replay_result}".format(
project=project,
location=location,
replay=replay,
replay_result=replay_result,
)
@staticmethod
def parse_replay_result_path(path: str) -> Dict[str, str]:
"""Parses a replay_result path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/replays/(?P<replay>.+?)/results/(?P<replay_result>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variable is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Optional[Union[str, SimulatorTransport]] = None,
client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the simulator client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, SimulatorTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
client_options = cast(client_options_lib.ClientOptions, client_options)
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, SimulatorTransport):
# transport is a SimulatorTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
api_audience=client_options.api_audience,
)
def get_replay(
self,
request: Optional[Union[simulator.GetReplayRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> simulator.Replay:
r"""Gets the specified
[Replay][google.cloud.policysimulator.v1.Replay]. Each
``Replay`` is available for at least 7 days.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import policysimulator_v1
def sample_get_replay():
# Create a client
client = policysimulator_v1.SimulatorClient()
# Initialize request argument(s)
request = policysimulator_v1.GetReplayRequest(
name="name_value",
)
# Make the request
response = client.get_replay(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.policysimulator_v1.types.GetReplayRequest, dict]):
The request object. Request message for
[Simulator.GetReplay][google.cloud.policysimulator.v1.Simulator.GetReplay].
name (str):
Required. The name of the
[Replay][google.cloud.policysimulator.v1.Replay] to
retrieve, in the following format:
``{projects|folders|organizations}/{resource-id}/locations/global/replays/{replay-id}``,
where ``{resource-id}`` is the ID of the project,
folder, or organization that owns the ``Replay``.
Example:
``projects/my-example-project/locations/global/replays/506a5f7f-38ce-4d7d-8e03-479ce1833c36``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.policysimulator_v1.types.Replay:
A resource describing a Replay, or simulation.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a simulator.GetReplayRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, simulator.GetReplayRequest):
request = simulator.GetReplayRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_replay]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def create_replay(
self,
request: Optional[Union[simulator.CreateReplayRequest, dict]] = None,
*,
parent: Optional[str] = None,
replay: Optional[simulator.Replay] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates and starts a
[Replay][google.cloud.policysimulator.v1.Replay] using the given
[ReplayConfig][google.cloud.policysimulator.v1.ReplayConfig].
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import policysimulator_v1
def sample_create_replay():
# Create a client
client = policysimulator_v1.SimulatorClient()
# Initialize request argument(s)
request = policysimulator_v1.CreateReplayRequest(
parent="parent_value",
)
# Make the request
operation = client.create_replay(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.policysimulator_v1.types.CreateReplayRequest, dict]):
The request object. Request message for
[Simulator.CreateReplay][google.cloud.policysimulator.v1.Simulator.CreateReplay].
parent (str):
Required. The parent resource where this
[Replay][google.cloud.policysimulator.v1.Replay] will be
created. This resource must be a project, folder, or
organization with a location.
Example:
``projects/my-example-project/locations/global``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
replay (google.cloud.policysimulator_v1.types.Replay):
Required. The
[Replay][google.cloud.policysimulator.v1.Replay] to
create. Set ``Replay.ReplayConfig`` to configure the
replay.
This corresponds to the ``replay`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.policysimulator_v1.types.Replay` A
resource describing a Replay, or simulation.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, replay])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a simulator.CreateReplayRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, simulator.CreateReplayRequest):
request = simulator.CreateReplayRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if replay is not None:
request.replay = replay
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_replay]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
simulator.Replay,
metadata_type=simulator.ReplayOperationMetadata,
)
# Done; return the response.
return response
def list_replay_results(
self,
request: Optional[Union[simulator.ListReplayResultsRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListReplayResultsPager:
r"""Lists the results of running a
[Replay][google.cloud.policysimulator.v1.Replay].
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import policysimulator_v1
def sample_list_replay_results():
# Create a client
client = policysimulator_v1.SimulatorClient()
# Initialize request argument(s)
request = policysimulator_v1.ListReplayResultsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_replay_results(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.policysimulator_v1.types.ListReplayResultsRequest, dict]):
The request object. Request message for
[Simulator.ListReplayResults][google.cloud.policysimulator.v1.Simulator.ListReplayResults].
parent (str):
Required. The
[Replay][google.cloud.policysimulator.v1.Replay] whose
results are listed, in the following format:
``{projects|folders|organizations}/{resource-id}/locations/global/replays/{replay-id}``
Example:
``projects/my-project/locations/global/replays/506a5f7f-38ce-4d7d-8e03-479ce1833c36``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.policysimulator_v1.services.simulator.pagers.ListReplayResultsPager:
Response message for
[Simulator.ListReplayResults][google.cloud.policysimulator.v1.Simulator.ListReplayResults].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a simulator.ListReplayResultsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, simulator.ListReplayResultsRequest):
request = simulator.ListReplayResultsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_replay_results]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListReplayResultsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def __enter__(self) -> "SimulatorClient":
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
def list_operations(
self,
request: Optional[operations_pb2.ListOperationsRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.ListOperationsResponse:
r"""Lists operations that match the specified filter in the request.
Args:
request (:class:`~.operations_pb2.ListOperationsRequest`):
The request object. Request message for
`ListOperations` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.ListOperationsResponse:
Response message for ``ListOperations`` method.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.ListOperationsRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.list_operations,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_operation(
self,
request: Optional[operations_pb2.GetOperationRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Gets the latest state of a long-running operation.
Args:
request (:class:`~.operations_pb2.GetOperationRequest`):
The request object. Request message for
`GetOperation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
An ``Operation`` object.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.GetOperationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.get_operation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=package_version.__version__
)
__all__ = ("SimulatorClient",)
| [
"noreply@github.com"
] | googleapis.noreply@github.com |
d08c52d6ec07887f972f0b6586973801cc248350 | 6cbaade56c5db347d1be9a3422a69af52df39b97 | /python_workspace/3_bigdata/02_Standardization_Analysis/2_Excel/12_excel_introspect_all_workbooks.py | c11c104903944b5e2737168189535a1e3837ade5 | [] | no_license | baewonje/iot_bigdata_- | b54e3772f64b9695efee8632183590628b679e11 | 2ce1af67d2f05abeb2ecd442b7299f349bdb9753 | refs/heads/master | 2020-09-06T09:53:53.018320 | 2019-12-06T08:19:33 | 2019-12-06T08:19:33 | 220,390,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | # !/usr/bin/env python 3
import glob
import sys
import os
from xlrd import open_workbook
input_directory = sys.argv[1] # Parameters = .
workbook_counter = 0
for input_file in glob.glob(os.path.join(input_directory, '*.xls*')):
workbook = open_workbook(input_file)
print('Workbook: {}'.format(os.path.basename(input_file)))
print('Number of worksheets: {}'.format(workbook.nsheets))
for worksheet in workbook.sheets():
print('Worksheet name:', worksheet.name, '\tRows', worksheet.nrows, '\tColumns:', worksheet.ncols)
workbook_counter += 1
print('Number of Excel workbooks: {}'.format(workbook_counter))
| [
"50129576+baewonje@users.noreply.github.com"
] | 50129576+baewonje@users.noreply.github.com |
863ee150019c6ff7cf81360895900a5538763127 | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/keyvault/v20161001/secret.py | a96895debc5f3cf376aad0b19747acf8f1dd3b64 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,848 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SecretArgs', 'Secret']
@pulumi.input_type
class SecretArgs:
def __init__(__self__, *,
properties: pulumi.Input['SecretPropertiesArgs'],
resource_group_name: pulumi.Input[str],
vault_name: pulumi.Input[str],
secret_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Secret resource.
:param pulumi.Input['SecretPropertiesArgs'] properties: Properties of the secret
:param pulumi.Input[str] resource_group_name: The name of the Resource Group to which the vault belongs.
:param pulumi.Input[str] vault_name: Name of the vault
:param pulumi.Input[str] secret_name: Name of the secret
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags that will be assigned to the secret.
"""
pulumi.set(__self__, "properties", properties)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "vault_name", vault_name)
if secret_name is not None:
pulumi.set(__self__, "secret_name", secret_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def properties(self) -> pulumi.Input['SecretPropertiesArgs']:
"""
Properties of the secret
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: pulumi.Input['SecretPropertiesArgs']):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group to which the vault belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="vaultName")
def vault_name(self) -> pulumi.Input[str]:
"""
Name of the vault
"""
return pulumi.get(self, "vault_name")
@vault_name.setter
def vault_name(self, value: pulumi.Input[str]):
pulumi.set(self, "vault_name", value)
@property
@pulumi.getter(name="secretName")
def secret_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the secret
"""
return pulumi.get(self, "secret_name")
@secret_name.setter
def secret_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The tags that will be assigned to the secret.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Secret(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
properties: Optional[pulumi.Input[pulumi.InputType['SecretPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
secret_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Resource information with extended details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['SecretPropertiesArgs']] properties: Properties of the secret
:param pulumi.Input[str] resource_group_name: The name of the Resource Group to which the vault belongs.
:param pulumi.Input[str] secret_name: Name of the secret
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags that will be assigned to the secret.
:param pulumi.Input[str] vault_name: Name of the vault
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SecretArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource information with extended details.
:param str resource_name: The name of the resource.
:param SecretArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecretArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
properties: Optional[pulumi.Input[pulumi.InputType['SecretPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
secret_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecretArgs.__new__(SecretArgs)
if properties is None and not opts.urn:
raise TypeError("Missing required property 'properties'")
__props__.__dict__["properties"] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["secret_name"] = secret_name
__props__.__dict__["tags"] = tags
if vault_name is None and not opts.urn:
raise TypeError("Missing required property 'vault_name'")
__props__.__dict__["vault_name"] = vault_name
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:keyvault/v20161001:Secret"), pulumi.Alias(type_="azure-native:keyvault:Secret"), pulumi.Alias(type_="azure-nextgen:keyvault:Secret"), pulumi.Alias(type_="azure-native:keyvault/v20180214:Secret"), pulumi.Alias(type_="azure-nextgen:keyvault/v20180214:Secret"), pulumi.Alias(type_="azure-native:keyvault/v20180214preview:Secret"), pulumi.Alias(type_="azure-nextgen:keyvault/v20180214preview:Secret"), pulumi.Alias(type_="azure-native:keyvault/v20190901:Secret"), pulumi.Alias(type_="azure-nextgen:keyvault/v20190901:Secret"), pulumi.Alias(type_="azure-native:keyvault/v20200401preview:Secret"), pulumi.Alias(type_="azure-nextgen:keyvault/v20200401preview:Secret"), pulumi.Alias(type_="azure-native:keyvault/v20210401preview:Secret"), pulumi.Alias(type_="azure-nextgen:keyvault/v20210401preview:Secret"), pulumi.Alias(type_="azure-native:keyvault/v20210601preview:Secret"), pulumi.Alias(type_="azure-nextgen:keyvault/v20210601preview:Secret")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Secret, __self__).__init__(
'azure-native:keyvault/v20161001:Secret',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Secret':
"""
Get an existing Secret resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SecretArgs.__new__(SecretArgs)
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return Secret(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The supported Azure location where the key vault should be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the key vault.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.SecretPropertiesResponse']:
"""
Properties of the secret
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The tags that will be assigned to the key vault.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The resource type of the key vault.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | vivimouret29.noreply@github.com |
e08c2dc8691e3a89398eb76dc78a075bebb81438 | 7e616a3b1928940467ec09a82b52d5b4d83984a1 | /MODULE1/Activities/PREFECT/create_dw.py | 1dede11f56bea15c470fed5c431d3cb08e61a576 | [] | no_license | gustavo32/DataEngineeringBootcamp | 70e4c2fb06a387418718df2929b89820a0a76c0d | 704dbe11f33f27ab9eda5649990685f153048429 | refs/heads/main | 2023-01-28T17:38:36.182545 | 2020-12-07T20:12:37 | 2020-12-07T20:12:37 | 316,643,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,135 | py | from datetime import datetime, timedelta
import pendulum
import prefect
from prefect import task, Flow
from prefect.schedules import CronSchedule
import pandas as pd
from io import BytesIO
import zipfile
import requests
import sqlalchemy
import psycopg2
schedule = CronSchedule(
cron= '*/10 * * * *',
start_date=pendulum.datetime(2020, 12, 5, 14, tz="America/Sao_Paulo")
)
@task
def get_raw_date():
url = 'http://download.inep.gov.br/microdados/Enade_Microdados/microdados_enade_2019.zip'
filebytes = BytesIO(requests.get(url).content)
zipped = zipfile.ZipFile(filebytes)
zipped.extractall()
return './microdados_enade_2019/2019/3.DADOS/'
@task
def apply_filters(path):
interested_cols = ['CO_GRUPO', 'NU_IDADE', 'TP_SEXO', 'NT_GER',
'NT_FG', 'NT_CE', 'QE_I01', 'QE_I02', 'QE_I04', 'QE_I05', 'QE_I08']
df = pd.read_csv(path + 'microdados_enade_2019.txt', sep=';', decimal=',',
usecols=interested_cols)
df[(df.NU_IDADE > 20) &
(df.NU_IDADE < 40) &
(df.NT_GER > 0)]
return df
@task
def get_mean_normalized_age(df):
coppied_df = df.copy()
coppied_df['mean_normalized_age'] = coppied_df.NU_IDADE - coppied_df.NU_IDADE.mean()
return coppied_df[['mean_normalized_age']]
@task
def get_squared_mean_normalized_age(df):
coppied_df = df.copy()
coppied_df['squared_mean_normalized_age'] = coppied_df['mean_normalized_age'] ** 2
return coppied_df[['squared_mean_normalized_age']]
@task
def get_marital_status(df):
coppied_df = df.copy()
coppied_df['marital_status'] = coppied_df.QE_I01.replace({
'A': 'SINGLE',
'B': 'MARRIED',
'C': 'DIVORCED',
'D': 'WIDOWED',
'E': 'OTHERS'
})
return coppied_df[['marital_status']]
@task
def get_skin_color(df):
coppied_df = df.copy()
coppied_df['skin_color'] = coppied_df.QE_I01.replace({
'A': 'WHITE',
'B': 'BLACK',
'C': 'YELLOW',
'D': 'BROWN',
'E': 'INDIGENOUS',
'F': '',
' ': ''
})
return coppied_df[['skin_color']]
@task
def join_data(dfs):
final = pd.concat(dfs, axis=1)
logger = prefect.context.get('logger')
logger.info(final.head(2).to_json())
return final
@task
def write_dw(df):
engine = sqlalchemy.create_engine(
'postgresql://postgres:123456@localhost:5432/enade')
df.to_sql('enade', con=engine, index=False, if_exists='replace', method='multi', chunksize=100000)
with Flow('Enade', schedule) as flow:
path = get_raw_date()
df = apply_filters(path)
normalized_mean_age = get_mean_normalized_age(df)
normalized_squared_mean_age = get_squared_mean_normalized_age(normalized_mean_age)
marital_status = get_marital_status(df)
skin_color = get_skin_color(df)
final = join_data([
df,
normalized_mean_age,
normalized_squared_mean_age,
marital_status,
skin_color
])
dw = write_dw(final)
flow.register(project_name='IGTI', idempotency_key=flow.serialized_hash())
flow.run_agent(token='D_0wWDFgx0e67I2IIbf7Ew')
| [
"you@example.com"
] | you@example.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.