hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e6cd1947b04550b8b66e0e8bebd04311cc615380
| 257
|
py
|
Python
|
lit_management/lit_management/doctype/ct_docs_photographs/ct_docs_photographs.py
|
bittssystem/lit_management
|
81411eca2925ae24990eb1a2dfbbe5e63271ee18
|
[
"MIT"
] | 1
|
2019-11-07T05:40:43.000Z
|
2019-11-07T05:40:43.000Z
|
lit_management/lit_management/doctype/ct_docs_photographs/ct_docs_photographs.py
|
bittssystem/lit_management
|
81411eca2925ae24990eb1a2dfbbe5e63271ee18
|
[
"MIT"
] | null | null | null |
lit_management/lit_management/doctype/ct_docs_photographs/ct_docs_photographs.py
|
bittssystem/lit_management
|
81411eca2925ae24990eb1a2dfbbe5e63271ee18
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, KEA and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class CTDOCSPHOTOGRAPHS(Document):
pass
| 23.363636
| 49
| 0.782101
|
4c0aa5d5d4e3f13df56b16a59ab74793f8451d93
| 7,864
|
py
|
Python
|
are_you_the_one.py
|
Panda4817/Mastermind-solver
|
eb994f436664f1103c8ace99aec6cda1ccfca45f
|
[
"MIT"
] | 1
|
2021-02-25T17:55:20.000Z
|
2021-02-25T17:55:20.000Z
|
are_you_the_one.py
|
Panda4817/Mastermind-solver
|
eb994f436664f1103c8ace99aec6cda1ccfca45f
|
[
"MIT"
] | null | null | null |
are_you_the_one.py
|
Panda4817/Mastermind-solver
|
eb994f436664f1103c8ace99aec6cda1ccfca45f
|
[
"MIT"
] | null | null | null |
from itertools import combinations
from copy import deepcopy
# Number of pairs
num_of_pairs = 10
# Retrive name data from txt file
f = open("/home/kanta/are_you_the_one/are_you_the_one.txt", "r")
data = f.read()
lst = data.split("\n\n")
male = {}
female = {}
for l in lst[0].split("\n"):
sub_lst = l.split()
male[sub_lst[0]] = sub_lst[1]
for l in lst[1].split("\n"):
sub_lst = l.split()
female[sub_lst[0]] = sub_lst[1]
# Set up knowledge base
male_set = set(male.keys())
female_set = set(female.keys())
all_combinations = [(x, y) for x in male_set for y in female_set]
# Filter knowledge base
# From truth booth of the show
not_matched = [("D", "8"), ("F","5"), ("H","9"), ("E","5"), ("B","1"), ("I", "6"), ("H", "4")]
matches = [("E","3"), ("D","7"), ("J", "6")]
for pair in not_matched:
all_combinations.remove(pair)
for pair in matches:
for i in range(num_of_pairs):
if str(i) != pair[1] and (pair[0], str(i)) in all_combinations:
all_combinations.remove((pair[0], str(i)))
for m in male_set:
if m != pair[0] and (m, pair[1]) in all_combinations:
all_combinations.remove((m, pair[1]))
# Store possible combos
possible_combos = []
# Function to calculate number of known matches in the total matches found
def confirmed_match_per_ep(ep):
number_confirmed = 0
for m in matches:
if m in ep:
number_confirmed += 1
return number_confirmed
# Use what is known to update the knowledge base
def eliminate_matches(ep, num):
global all_combinations, possible_combos, matches, not_matched
number_confirmed = confirmed_match_per_ep(ep)
if number_confirmed == num:
for pair in ep:
if pair not in matches:
try:
all_combinations.remove(pair)
not_matched.append(pair)
except ValueError:
continue
# Evaluate an episode to gain likely combos of people
def evaluate_episode(ep, num, most_likely=[]):
global all_combinations, possible_combos, matches, not_matched
number_confirmed = confirmed_match_per_ep(ep)
# Combinations
combi = {c: deepcopy(ep) for c in combinations(ep, num)}
for k, v in combi.items():
# Filter the keys - known matches and/or potential matches
cont = False
found_confirmed = 0
for pair in k:
if pair in not_matched:
cont = True
break
if pair in matches:
found_confirmed += 1
continue
for m in matches:
if pair[0] == m[0] or pair[1] == m[1]:
cont = True
break
if cont or found_confirmed != number_confirmed:
continue
# Find other matches (ignoring letters and numbers in the keys)
combos = []
for pair in v:
if pair in k and pair not in combos:
combos.append(pair)
else:
for p in all_combinations:
if p != pair and p not in combos:
for g in k:
if p[0] == g[0] or p[1] == g[1]:
break
else:
for m in most_likely:
if p != m and p[0] == m[0]:
break
elif p != m and p[1] == m[1]:
break
else:
combos.append(p)
if len(combos) >= num_of_pairs and len(combos) <= 11:
# Reduce the combos to 10 if possible
combos.sort(key=lambda x: x[0])
counts = {}
for c in combos:
if c[0] in counts:
counts[c[0]].append(c[1])
else:
counts[c[0]] = [c[1]]
for m in male_set:
if m not in counts:
break
else:
new_combo = []
for k, v in counts.items():
if len(v) > 1:
for ke, ve in counts.items():
if ke != k and len(ve) == 1 and ve[0] in v:
v.remove(ve[0])
for val in v:
new_combo.append((k, val))
# Check a combo is not already in the list of combos
for p in possible_combos:
p.sort(key=lambda x: x[0])
if "".join(f"{x}" for x in new_combo) == "".join(f"{x}" for x in p):
break
else:
possible_combos.append(new_combo)
# Find most likely pairs based on how many match ups there have been
# Ignore known matches and remove any matches that cannot be
def most_likely(episodes, matches_per_ep):
global matches, not_matched
common_matches = {}
for ep, i in zip(episodes, range(8)):
confirmed_matches = confirmed_match_per_ep(ep)
if confirmed_matches == matches_per_ep[i]:
continue
for pair in ep:
if pair in not_matched:
continue
contradict = [m for m in matches if m[0] == pair[0] or m[1] == pair[1]]
if contradict:
continue
if pair not in common_matches:
common_matches[pair] = 1
else:
common_matches[pair] += 1
sorted_lst = sorted(common_matches.items(), key=lambda x: x[1], reverse=True)
# Return only the top 4
return [m[0] for m in sorted_lst][0:4]
# Data from show
ep1 = [("J", "6"), ("F", "8"), ("A", "2"), ("B", "4"), ("H", "9"), ("D", "5"), ("G", "7"), ("I", "0"), ("E", "3"), ("C", "1")]
# 2 matches
eliminate_matches(ep1, 2)
ep2 = [("F", "0"), ("B", "1"), ("G", "2"), ("J", "3"), ("H", "4"), ("E", "5"), ("I", "6"), ("D", "7"), ("A", "8"), ("C", "9")]
# 4 matches
eliminate_matches(ep2, 4)
ep3 = [("A", "2"), ("B", "1"), ("C", "7"), ("D", "9"), ("E", "3"), ("F", "0"), ("G", "8"), ("H", "5"), ("I", "6"), ("J", "4")]
# 2 matches
eliminate_matches(ep3, 2)
ep4 = [("E", "3"), ("C", "7"), ("J", "5"), ("B", "9"), ("I", "2"), ("F", "6"), ("A", "0"), ("D", "1"), ("H", "8"), ("G", "4")]
# 2 matches
eliminate_matches(ep4, 2)
ep5 = [("E", "3"), ("F", "0"), ("G", "5"), ("J", "6"), ("H", "4"), ("C", "9"), ("A", "8"), ("D", "7"), ("B", "2"), ("I", "1")]
# 5 matches
eliminate_matches(ep5, 5)
ep7 = [("E", "3"), ("D", "7"), ("H", "4"), ("J", "6"), ("G", "9"), ("C", "2"), ("A", "1"), ("B", "8"), ("I", "5"), ("F", "0")]
# 5 matches
eliminate_matches(ep7, 5)
ep8 = [("E", "3"), ("D", "7"), ("C", "4"), ("J", "6"), ("B", "9"), ("H", "2"), ("I", "1"), ("A", "8"), ("G", "5"), ("F", "0")]
# 7 matches
eliminate_matches(ep8, 7)
ep9 = [("E", "3"), ("D", "7"), ("J", "6"), ("H", "4"), ("I", "5"), ("A", "8"), ("B", "9"), ("G", "2"), ("C", "1"), ("F", "0")]
# 8 matches
eliminate_matches(ep9, 8)
# Rank from most likely to least (not including known matches)
episodes = [ep1, ep2, ep3, ep4, ep5, ep7, ep8, ep9]
matches_per_ep = [2, 4, 2, 2, 5, 5, 7, 8]
most_likely_lst = most_likely(episodes, matches_per_ep)
# evaluate likely combos from episode with the max matches
index = matches_per_ep.index(max(matches_per_ep))
evaluate_episode(episodes[index], matches_per_ep[index]) # Could add a third argument - most likely list
# Print out likely combos
if possible_combos:
for p in possible_combos:
p.sort(key=lambda x:x[0])
for pair in p:
print(pair[0], male[pair[0]], pair[1], female[pair[1]])
print()
else:
print("No conclusive result. Add most likely list to the evaluation function.")
| 36.407407
| 126
| 0.492879
|
ae4a84d86df6bab4709444b02432a1206a74df3e
| 2,839
|
py
|
Python
|
IIC/archs/cluster/residual.py
|
jizongFox/IIC
|
572076d5c0c26516ff3e807f2bad4e3498ab12c1
|
[
"MIT"
] | 1
|
2021-05-25T20:45:53.000Z
|
2021-05-25T20:45:53.000Z
|
src/archs/cluster/residual.py
|
MihaiAnton/tudelft-iic-reproduction
|
342247c444aa1f8b09ea18e3ff9135258d599373
|
[
"MIT"
] | 1
|
2022-02-02T23:32:37.000Z
|
2022-02-02T23:32:37.000Z
|
src/archs/cluster/residual.py
|
MihaiAnton/tudelft-iic-reproduction
|
342247c444aa1f8b09ea18e3ff9135258d599373
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
track_running_stats=None):
super(BasicBlock, self).__init__()
assert (track_running_stats is not None)
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(
planes, track_running_stats=track_running_stats)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(
planes, track_running_stats=track_running_stats)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNetTrunk(nn.Module):
def __init__(self):
super(ResNetTrunk, self).__init__()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion,
track_running_stats=self.batchnorm_track),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample,
track_running_stats=self.batchnorm_track))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(self.inplanes, planes, track_running_stats=self.batchnorm_track))
return nn.Sequential(*layers)
class ResNet(nn.Module):
def __init__(self):
super(ResNet, self).__init__()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
assert (m.track_running_stats == self.batchnorm_track)
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
| 31.898876
| 87
| 0.58577
|
87f1bcba5ce83c40becf335229214af0b043b0c9
| 983
|
py
|
Python
|
skunkbooth/filters/grid.py
|
josflesan/SkunkBooth
|
29dea6e5301ac7e6c21d6c1f8beec4143d22d4b4
|
[
"MIT"
] | null | null | null |
skunkbooth/filters/grid.py
|
josflesan/SkunkBooth
|
29dea6e5301ac7e6c21d6c1f8beec4143d22d4b4
|
[
"MIT"
] | null | null | null |
skunkbooth/filters/grid.py
|
josflesan/SkunkBooth
|
29dea6e5301ac7e6c21d6c1f8beec4143d22d4b4
|
[
"MIT"
] | null | null | null |
from random import randint
from typing import List, Tuple
class filter:
"""Sample filter for ASCII operations"""
def __init__(self):
"""
Init required values.
textOp True = operation on ASCII art, False = operation on PIL image
name will be the name of the filter to be used for display/indexing
"""
self.textOp = True
self.name = "Behind Small bars"
def load(self) -> None:
"""Load environment variables to be used in the filter operation"""
pass
def unload(self) -> None:
"""Unload environment variables to be used in the filter operation"""
pass
def filter(
self, image: List[List[Tuple[int, int, int, int]]]
) -> List[List[Tuple[int, int, int, int]]]:
"""Process an ASCII image and return an image of the same format and dims"""
return [
[("╬" if randint(0, 50) else "╬", j[1], j[2], 0) for j in i] for i in image
]
| 29.787879
| 87
| 0.592065
|
8397bfdc0a7dc1c461ea25b908034c0479d2b71a
| 5,626
|
py
|
Python
|
google/cloud/aiplatform_v1beta1/types/tensorboard_data.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | 1
|
2022-03-30T05:23:29.000Z
|
2022-03-30T05:23:29.000Z
|
google/cloud/aiplatform_v1beta1/types/tensorboard_data.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1beta1/types/tensorboard_data.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1",
manifest={
"TimeSeriesData",
"TimeSeriesDataPoint",
"Scalar",
"TensorboardTensor",
"TensorboardBlobSequence",
"TensorboardBlob",
},
)
class TimeSeriesData(proto.Message):
r"""All the data stored in a TensorboardTimeSeries.
Attributes:
tensorboard_time_series_id (str):
Required. The ID of the
TensorboardTimeSeries, which will become the
final component of the TensorboardTimeSeries'
resource name
value_type (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries.ValueType):
Required. Immutable. The value type of this
time series. All the values in this time series
data must match this value type.
values (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesDataPoint]):
Required. Data points in this time series.
"""
tensorboard_time_series_id = proto.Field(proto.STRING, number=1,)
value_type = proto.Field(
proto.ENUM,
number=2,
enum=tensorboard_time_series.TensorboardTimeSeries.ValueType,
)
values = proto.RepeatedField(
proto.MESSAGE, number=3, message="TimeSeriesDataPoint",
)
class TimeSeriesDataPoint(proto.Message):
r"""A TensorboardTimeSeries data point.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
scalar (google.cloud.aiplatform_v1beta1.types.Scalar):
A scalar value.
This field is a member of `oneof`_ ``value``.
tensor (google.cloud.aiplatform_v1beta1.types.TensorboardTensor):
A tensor value.
This field is a member of `oneof`_ ``value``.
blobs (google.cloud.aiplatform_v1beta1.types.TensorboardBlobSequence):
A blob sequence value.
This field is a member of `oneof`_ ``value``.
wall_time (google.protobuf.timestamp_pb2.Timestamp):
Wall clock timestamp when this data point is
generated by the end user.
step (int):
Step index of this data point within the run.
"""
scalar = proto.Field(proto.MESSAGE, number=3, oneof="value", message="Scalar",)
tensor = proto.Field(
proto.MESSAGE, number=4, oneof="value", message="TensorboardTensor",
)
blobs = proto.Field(
proto.MESSAGE, number=5, oneof="value", message="TensorboardBlobSequence",
)
wall_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
step = proto.Field(proto.INT64, number=2,)
class Scalar(proto.Message):
r"""One point viewable on a scalar metric plot.
Attributes:
value (float):
Value of the point at this step / timestamp.
"""
value = proto.Field(proto.DOUBLE, number=1,)
class TensorboardTensor(proto.Message):
r"""One point viewable on a tensor metric plot.
Attributes:
value (bytes):
Required. Serialized form of
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/tensor.proto
version_number (int):
Optional. Version number of TensorProto used to serialize
[value][google.cloud.aiplatform.v1beta1.TensorboardTensor.value].
"""
value = proto.Field(proto.BYTES, number=1,)
version_number = proto.Field(proto.INT32, number=2,)
class TensorboardBlobSequence(proto.Message):
r"""One point viewable on a blob metric plot, but mostly just a wrapper
message to work around repeated fields can't be used directly within
``oneof`` fields.
Attributes:
values (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardBlob]):
List of blobs contained within the sequence.
"""
values = proto.RepeatedField(proto.MESSAGE, number=1, message="TensorboardBlob",)
class TensorboardBlob(proto.Message):
r"""One blob (e.g, image, graph) viewable on a blob metric plot.
Attributes:
id (str):
Output only. A URI safe key uniquely
identifying a blob. Can be used to locate the
blob stored in the Cloud Storage bucket of the
consumer project.
data (bytes):
Optional. The bytes of the blob is not
present unless it's returned by the
ReadTensorboardBlobData endpoint.
"""
id = proto.Field(proto.STRING, number=1,)
data = proto.Field(proto.BYTES, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
| 34.304878
| 110
| 0.675791
|
4c1d33b772bfa2fece32185a27f5d0fc53750915
| 5,304
|
py
|
Python
|
test/test_sinks.py
|
sixty-north/python-transducers
|
575357e3a17ff3b4c757967afd396bf0ea042c08
|
[
"MIT"
] | 54
|
2015-10-02T02:45:36.000Z
|
2021-06-22T04:40:33.000Z
|
test/test_sinks.py
|
sixty-north/python-transducers
|
575357e3a17ff3b4c757967afd396bf0ea042c08
|
[
"MIT"
] | 3
|
2017-06-11T13:39:18.000Z
|
2017-06-12T06:07:24.000Z
|
test/test_sinks.py
|
sixty-north/python-transducers
|
575357e3a17ff3b4c757967afd396bf0ea042c08
|
[
"MIT"
] | 9
|
2015-10-28T23:36:50.000Z
|
2019-01-11T13:47:05.000Z
|
import unittest
from io import StringIO
from transducer.sinks import rprint, null_sink, CollectingSink, SingularSink
class TestNullSink(unittest.TestCase):
def test_sent_items_are_sunk(self):
sink = null_sink()
for i in range(100):
sink.send(100)
sink.close()
def test_closed_sink_raises_stop_iteration(self):
sink = null_sink()
sink.close()
with self.assertRaises(StopIteration):
sink.send(42)
class TestRPrint(unittest.TestCase):
def test_sent_items_are_printed(self):
with StringIO() as stream:
sink = rprint(file=stream, flush=True)
sink.send(10)
sink.send(20)
sink.send(30)
result = stream.getvalue()
self.assertEqual(result, "10\n20\n30")
sink.close()
def test_separators_are_printed(self):
with StringIO() as stream:
sink = rprint(sep=', ', file=stream, flush=True)
sink.send(12)
sink.send(24)
sink.send(36)
result = stream.getvalue()
self.assertEqual(result, "12, 24, 36")
sink.close()
def test_end_terminator_is_printed(self):
with StringIO() as stream:
sink = rprint(end='END', file=stream, flush=True)
sink.send(7)
sink.send(14)
sink.send(21)
sink.close()
result = stream.getvalue()
self.assertEqual(result, "7\n14\n21END")
def test_closed_sink_raises_stop_iteration(self):
with StringIO() as stream:
sink = rprint()
sink.close()
with self.assertRaises(StopIteration):
sink.send("StopIteration should be raised")
class TestCollectingSink(unittest.TestCase):
def test_no_items_is_empty(self):
collection = CollectingSink()
self.assertEqual(len(collection), 0)
def test_send_single_item_has_len_one(self):
collection = CollectingSink()
sink = collection()
sink.send(42)
self.assertEqual(len(collection), 1)
def test_send_single_item_is_retrievable(self):
collection = CollectingSink()
sink = collection()
sink.send(64)
result = list(collection)
self.assertListEqual(result, [64])
def test_multiple_items_are_retrievable(self):
collection = CollectingSink()
sink = collection()
sink.send(64)
sink.send(128)
sink.send(256)
result = list(collection)
self.assertListEqual(result, [64, 128, 256])
def test_three_items_added_two_dequeued(self):
collection = CollectingSink()
sink = collection()
sink.send(64)
sink.send(128)
sink.send(256)
i = iter(collection)
next(i)
next(i)
self.assertEqual(len(collection), 1)
def test_three_items_added_four_dequeued_raises_stop_iteration(self):
collection = CollectingSink()
sink = collection()
sink.send(64)
sink.send(128)
sink.send(256)
i = iter(collection)
next(i)
next(i)
next(i)
with self.assertRaises(StopIteration):
next(i)
def test_send_items_to_multiple_sinks(self):
collection = CollectingSink()
sink1 = collection()
sink2 = collection()
sink1.send(64)
sink2.send(128)
sink1.send(256)
sink2.send(512)
result = list(collection)
self.assertListEqual(result, [64, 128, 256, 512])
def test_send_items_then_clear_is_empty(self):
collection = CollectingSink()
sink = collection()
sink.send(64)
sink.send(128)
sink.send(256)
collection.clear()
self.assertEqual(len(collection), 0)
def test_closed_sink_raises_stop_iteration(self):
collection = CollectingSink()
sink = collection()
sink.close()
with self.assertRaises(StopIteration):
sink.send(42)
class TestSingularSink(unittest.TestCase):
def test_no_items_raises_runtime_error(self):
singular_sink = SingularSink()
sink = singular_sink()
with self.assertRaises(RuntimeError):
_ = singular_sink.value
def test_one_sent_item_can_be_retrieved(self):
singular_sink = SingularSink()
sink = singular_sink()
sink.send(496)
self.assertEqual(singular_sink.value, 496)
def test_two_items_sent_raises_stop_iteration(self):
singular_sink = SingularSink()
sink = singular_sink()
sink.send(342)
with self.assertRaises(StopIteration):
sink.send(124)
def test_closed_sink_raises_stop_iteration(self):
singular_sink = SingularSink()
sink = singular_sink()
sink.close()
with self.assertRaises(StopIteration):
sink.send(42)
def test_zero_sent_items_has_no_value(self):
singular_sink = SingularSink()
sink = singular_sink()
self.assertFalse(singular_sink.has_value)
def test_one_sent_item_has_value(self):
singular_sink = SingularSink()
sink = singular_sink()
sink.send(78)
self.assertTrue(singular_sink.has_value)
| 29.631285
| 76
| 0.613499
|
a5c142e3049cb83c9b47246d1e01f250fe8c61dc
| 6,390
|
py
|
Python
|
full_process_test.py
|
Dootmaan/Point-Unet
|
f5f9732702f991d277c1b006ca8164f76d295b22
|
[
"MIT"
] | 5
|
2021-11-19T13:01:15.000Z
|
2022-02-27T13:18:03.000Z
|
full_process_test.py
|
Dootmaan/Point-Unet
|
f5f9732702f991d277c1b006ca8164f76d295b22
|
[
"MIT"
] | null | null | null |
full_process_test.py
|
Dootmaan/Point-Unet
|
f5f9732702f991d277c1b006ca8164f76d295b22
|
[
"MIT"
] | null | null | null |
import numpy as np
import random
import torch as pt
from config import config
from dataset.BraTSDataset3D import BraTSDataset3D
from dataset.MVILiverDataset3D import MVILiverDataset3D
from model.SaliencyAttentionNet import SaliencyAttentionNet
from model.PointUnet import PointUnet
from config import config
import time
model_path='/newdata/why/Saved_models'
crop_size = config.crop_size
size = crop_size[2] * 2 # 用于最后cv2显示
img_size = config.input_img_size
testset1 = BraTSDataset3D('/newdata/why/BraTS20', mode='test', augment=False)
testset2 = MVILiverDataset3D(
'/newdata/why/MVI_Liver_Formatted', mode='test', augment=False)
test_dataset1 = pt.utils.data.DataLoader(testset1,
batch_size=1,
shuffle=True,
drop_last=True)
test_dataset2 = pt.utils.data.DataLoader(testset2,
batch_size=1,
shuffle=True,
drop_last=True)
device = pt.device('cuda:0' if pt.cuda.is_available() else 'cpu')
model1 = SaliencyAttentionNet().to(device)
model2 = PointUnet(4, 2,device=device).to(device)
model1.load_state_dict(pt.load(model_path +
'/PointUnet/SaliencyAttentionNet_3D_BraTS_patch-free_bs1_best.pt',
map_location='cpu'))
model2.load_state_dict(pt.load(model_path+'/PointUnet/PointUNet_3D_BraTS_patch-free_bs1_best.pt',map_location = 'cpu'))
model3 = SaliencyAttentionNet().to(device)
model4 = PointUnet(4, 2,device=device).to(device)
model3.load_state_dict(pt.load(model_path +
'/PointUnet/SaliencyAttentionNet_3D_Liver_patch-free_bs1_best.pt',
map_location='cpu'))
model4.load_state_dict(pt.load(model_path+'/PointUnet/PointUNet_3D_Liver_patch-free_bs1_best.pt',map_location = 'cpu'))
model1.eval()
model2.eval()
model3.eval()
model4.eval()
def TestModel():
start_time = time.time()
for whatever, data in enumerate(test_dataset1):
print(whatever)
(_, labels, inputs) = data # use label_sr as input
pointSet=[]
inputs3D = pt.autograd.Variable(inputs).type(pt.FloatTensor).to(device).unsqueeze(1)
with pt.no_grad():
outputs3D = model1(inputs3D)
output_list = np.array(outputs3D.squeeze(0).squeeze(0).cpu().data.numpy())
# output_list[output_list<0.5]=0
# output_list[output_list>=0.5]=1
image=inputs.squeeze(0).squeeze(0).cpu().data.numpy()
for i in range(output_list.shape[0]):
for j in range(output_list.shape[1]):
for k in range(output_list.shape[2]):
pointSet.append([i, j, k, image[i, j, k]])
output_list=output_list.flatten()
pointSet=np.array(pointSet)
none_tumor = list(np.where(output_list <0.5)[0])
tumor = list(np.where(output_list >= 0.5)[0])
print(len(tumor))
queried_idx = tumor + random.sample(none_tumor, k=365000 - len(tumor))
queried_idx = np.array(queried_idx)
random.shuffle(queried_idx)
queried_points=pointSet[queried_idx,...]
queried_points[:,0:3]/=image.shape
# queried_labels=output_list[queried_idx,...]
# pointSet=np.array(pointSet)
inputs3D = pt.autograd.Variable(pt.from_numpy(queried_points)).type(pt.FloatTensor).unsqueeze(0).to(device)
with pt.no_grad():
outputs3D = model2(inputs3D)
outputs3D[outputs3D<0.5]=0
outputs3D[outputs3D>=0.5]=1
# output_list=outputs3D.squeeze(0).squeeze(0).cpu().data.numpy()
# output_list[output_list < 0.5] = 0
# output_list[output_list >= 0.5] = 1
# final_img = np.zeros(shape=(2 * img_size[1], 2 * 2 * img_size[2]))
# final_img[:, :2 * img_size[2]] = output_list[0, 0, 64, :, :] * 255
# final_img[:, 2 * img_size[2]:] = label_list[0, 0, 64, :, :] * 255
# cv2.imwrite('TestPhase_Res_patchfree_Liver.png', final_img)
# pr_sum = output_list.sum()
# gt_sum = label_list.sum()
# pr_gt_sum = np.sum(output_list[label_list == 1])
# dice = 2 * pr_gt_sum / (pr_sum + gt_sum)
# dice_sum += dice
# # print("dice:",dice)
# try:
# hausdorff = hd95(
# output_list.squeeze(0).squeeze(0),
# label_list.squeeze(0).squeeze(0))
# except:
# hausdorff = 0
# jaccard = jc(
# output_list.squeeze(0).squeeze(0),
# label_list.squeeze(0).squeeze(0))
# print("dice:", dice, ";hd95:", hausdorff, ";jaccard:", jaccard)
# hd_sum += hausdorff
# jc_sum += jaccard
for whatever, data in enumerate(test_dataset2):
print(whatever)
(_, labels, inputs) = data # use label_sr as input
pointSet=[]
inputs3D = pt.autograd.Variable(inputs).type(pt.FloatTensor).cuda().unsqueeze(1)
with pt.no_grad():
outputs3D = model3(inputs3D)
output_list = np.array(outputs3D.squeeze(0).squeeze(0).cpu().data.numpy())
image=inputs.squeeze(0).squeeze(0).cpu().data.numpy()
for i in range(output_list.shape[0]):
for j in range(output_list.shape[1]):
for k in range(output_list.shape[2]):
pointSet.append([i, j, k, image[i, j, k]])
output_list=output_list.flatten()
pointSet=np.array(pointSet)
none_tumor = list(np.where(output_list <0.5)[0])
tumor = list(np.where(output_list >= 0.5)[0])
print(len(tumor))
queried_idx = tumor + random.sample(none_tumor, k=800000 - len(tumor))
queried_idx = np.array(queried_idx)
random.shuffle(queried_idx)
queried_points=pointSet[queried_idx,...]
queried_points[:,0:3]/=image.shape
inputs3D = pt.autograd.Variable(pt.from_numpy(queried_points)).type(pt.FloatTensor).unsqueeze(0).to(device)
with pt.no_grad():
outputs3D = model4(inputs3D)
outputs3D[outputs3D<0.5]=0
outputs3D[outputs3D>=0.5]=1
# output_list=outputs3D.squeeze(0).squeeze(0).cpu().data.numpy()
total_time=time.time()-start_time
print(total_time / (len(test_dataset1)+len(test_dataset2)))
return total_time / (len(test_dataset1)+len(test_dataset2))
TestModel()
| 39.202454
| 119
| 0.620814
|
05230dfea4a1861ee60d5f6498aad7a053b4f087
| 9,086
|
py
|
Python
|
bluezero/adapter.py
|
MarkusPiotrowski/python-bluezero
|
82b79039e9f0cd041f10e655d3c0810a58b5cd85
|
[
"MIT"
] | null | null | null |
bluezero/adapter.py
|
MarkusPiotrowski/python-bluezero
|
82b79039e9f0cd041f10e655d3c0810a58b5cd85
|
[
"MIT"
] | null | null | null |
bluezero/adapter.py
|
MarkusPiotrowski/python-bluezero
|
82b79039e9f0cd041f10e655d3c0810a58b5cd85
|
[
"MIT"
] | null | null | null |
"""Class and methods that represent a Bluetooth Adapter."""
from __future__ import absolute_import, print_function, unicode_literals
# D-Bus imports
import dbus
# python-bluezero imports
from bluezero import constants
from bluezero import dbus_tools
from bluezero import async_tools
from bluezero import device
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
logger.addHandler(NullHandler())
class AdapterError(Exception):
pass
def list_adapters():
"""Return list of adapters address available on system."""
paths = []
addresses = []
bus = dbus.SystemBus()
manager = dbus.Interface(
bus.get_object(constants.BLUEZ_SERVICE_NAME, '/'),
constants.DBUS_OM_IFACE)
manager_obj = manager.GetManagedObjects()
for path, ifaces in manager_obj.items():
if constants.ADAPTER_INTERFACE in ifaces:
paths.append(path)
addresses.append(
manager_obj[path][constants.ADAPTER_INTERFACE]['Address'])
if len(paths) < 1:
raise AdapterError('No Bluetooth adapter found')
else:
return addresses
class Adapter(object):
"""Bluetooth Adapter Class.
This class instantiates an object that interacts with the physical
Bluetooth device.
:Example:
>>> from bluezero import adapter
>>> dongle = adapter.Adapter()
>>> dongle.powered = True
"""
def __init__(self, adapter_addr=None):
"""Default initialiser.
Creates the interface to the local Bluetooth adapter device.
If address is not given then first device is list is used.
:param adapter_addr: Address of Bluetooth adapter to use.
"""
self.bus = dbus.SystemBus()
if adapter_addr is None:
adapters = list_adapters()
if len(adapters) > 0:
adapter_addr = adapters[0]
self.path = dbus_tools.get_dbus_path(adapter=adapter_addr)
self.adapter_object = self.bus.get_object(
constants.BLUEZ_SERVICE_NAME,
self.path)
self.adapter_methods = dbus.Interface(self.adapter_object,
constants.ADAPTER_INTERFACE)
self.adapter_props = dbus.Interface(self.adapter_object,
dbus.PROPERTIES_IFACE)
self._nearby_timeout = 10
self._nearby_count = 0
self.mainloop = async_tools.EventLoop()
self.on_disconnect = None
self.on_device_found = None
self.bus.add_signal_receiver(self._interfaces_added,
dbus_interface=constants.DBUS_OM_IFACE,
signal_name='InterfacesAdded')
self.bus.add_signal_receiver(self._interfaces_removed,
dbus_interface=constants.DBUS_OM_IFACE,
signal_name='InterfacesRemoved')
self.bus.add_signal_receiver(self._properties_changed,
dbus_interface=dbus.PROPERTIES_IFACE,
signal_name='PropertiesChanged',
arg0=constants.DEVICE_INTERFACE,
path_keyword='path')
@property
def address(self):
"""Return the adapter MAC address."""
return self.adapter_props.Get(constants.ADAPTER_INTERFACE, 'Address')
@property
def name(self):
"""Return the adapter name."""
return self.adapter_props.Get(constants.ADAPTER_INTERFACE, 'Name')
@property
def bt_class(self):
"""Return the Bluetooth class of device."""
return self.adapter_props.Get(constants.ADAPTER_INTERFACE, 'Class')
@property
def alias(self):
"""Return the adapter alias.
:param new_alias: the new alias of the adapter.
"""
return self.adapter_props.Get(
constants.ADAPTER_INTERFACE, 'Alias')
@alias.setter
def alias(self, new_alias):
self.adapter_props.Set(
constants.ADAPTER_INTERFACE, 'Alias', new_alias)
def get_all(self):
"""Return dictionary of all the Adapter attributes."""
return self.adapter_props.GetAll(constants.ADAPTER_INTERFACE)
@property
def powered(self):
"""power state of the Adapter.
:param new_state: boolean.
"""
return self.adapter_props.Get(
constants.ADAPTER_INTERFACE, 'Powered')
@powered.setter
def powered(self, new_state):
self.adapter_props.Set(
constants.ADAPTER_INTERFACE, 'Powered', new_state)
@property
def pairable(self):
"""pairable state of the Adapter.
:param new_state: boolean.
"""
return self.adapter_props.Get(
constants.ADAPTER_INTERFACE, 'Pairable')
@pairable.setter
def pairable(self, new_state):
self.adapter_props.Set(
constants.ADAPTER_INTERFACE, 'Pairable', new_state)
@property
def pairabletimeout(self):
"""The pairable timeout of the Adapter."""
return self.adapter_props.Get(constants.ADAPTER_INTERFACE,
'PairableTimeout')
@pairabletimeout.setter
def pairabletimeout(self, new_timeout):
self.adapter_props.Set(constants.ADAPTER_INTERFACE,
'PairableTimeout', new_timeout)
@property
def discoverable(self):
"""Discoverable state of the Adapter."""
return self.adapter_props.Get(
constants.ADAPTER_INTERFACE, 'Discoverable')
@discoverable.setter
def discoverable(self, new_state):
self.adapter_props.Set(constants.ADAPTER_INTERFACE,
'Discoverable', new_state)
@property
def discoverabletimeout(self):
"""Discoverable timeout of the Adapter."""
return self.adapter_props.Get(constants.ADAPTER_INTERFACE,
'DiscoverableTimeout')
@discoverabletimeout.setter
def discoverabletimeout(self, new_timeout):
self.adapter_props.Set(constants.ADAPTER_INTERFACE,
'DiscoverableTimeout', new_timeout)
@property
def discovering(self):
"""Return whether the adapter is discovering."""
return self.adapter_props.Get(
constants.ADAPTER_INTERFACE, 'Discovering')
def _discovering_timeout(self):
"""Test to see if discovering should stop."""
self._nearby_count += 1
if self._nearby_count > self._nearby_timeout:
self.stop_discovery()
self.mainloop.quit()
return False
return True
@property
def uuids(self):
"""List of 128-bit UUIDs that represent available remote services."""
return self.adapter_props.Get(
constants.ADAPTER_INTERFACE, 'UUIDs')
def nearby_discovery(self, timeout=10):
"""Start discovery of nearby Bluetooth devices."""
self._nearby_timeout = timeout
self._nearby_count = 0
# GLib.timeout_add(1000, self._discovering_timeout)
self.mainloop.add_timer(1000, self._discovering_timeout)
self.adapter_methods.StartDiscovery()
self.mainloop.run()
def start_discovery(self):
"""
Start discovery of nearby Bluetooth devices.
:return: True on success otherwise False
"""
self.adapter_methods.StartDiscovery()
def stop_discovery(self):
"""Stop scanning of nearby Bluetooth devices."""
self.adapter_methods.StopDiscovery()
def run(self):
"""Start the EventLoop for async operations"""
self.mainloop.run()
def quit(self):
"""Stop the EventLoop for async operations"""
self.mainloop.quit()
def _properties_changed(self, interface, changed, invalidated, path):
"""
Handle DBus PropertiesChanged signal and
call appropriate user callback
"""
if self.on_disconnect is not None:
if 'Connected' in changed:
if not changed['Connected']:
self.on_disconnect()
def _interfaces_added(self, path, device_info):
"""
Handle DBus InterfacesAdded signal and
call appropriate user callback
"""
dev_iface = constants.DEVICE_INTERFACE
if constants.DEVICE_INTERFACE in device_info:
if self.on_device_found is not None:
new_dev = device.Device(
adapter_addr=self.address,
device_addr=device_info[dev_iface]['Address'])
self.on_device_found(new_dev)
def _interfaces_removed(self, path, device_info):
"""
Handle DBus InterfacesRemoved signal and
call appropriate user callback
"""
pass
| 31.880702
| 77
| 0.621175
|
f3c45868f1a650b84fc14670d3ae7a2bb3085388
| 4,368
|
py
|
Python
|
code/training.py
|
parinitaedke/CNN-Binary-Classification
|
7bdf59910417e7b295cbd6b4f065ad4ebf5ac03f
|
[
"MIT"
] | 4
|
2021-04-13T06:55:20.000Z
|
2022-01-01T02:11:06.000Z
|
code/training.py
|
parinitaedke/CNN-Binary-Classification
|
7bdf59910417e7b295cbd6b4f065ad4ebf5ac03f
|
[
"MIT"
] | 4
|
2021-06-08T22:40:36.000Z
|
2022-03-12T00:50:47.000Z
|
code/training.py
|
parinitaedke/CNN-Binary-Classification
|
7bdf59910417e7b295cbd6b4f065ad4ebf5ac03f
|
[
"MIT"
] | 1
|
2021-11-19T09:00:17.000Z
|
2021-11-19T09:00:17.000Z
|
"""
Author: Mauro Mendez.
Date: 02/11/2020.
File to implement the training and validation cycles.
"""
import torch
from barbar import Bar
from metrics import Metrics
def train(model, dataloader, optimizer, criterion, device):
"""
train Runs one epoch of training.
@param model Model to train.
@param dataloader Images to train with.
@param optimizer Optimizer to update weights.
@param criterion Loss criterion.
@param device Use of GPU.
"""
# Prepare the model
model.to(device)
model.train()
# Creates metrics recorder
metrics = Metrics()
# Iterates over batches
for (_, inputs, labels) in Bar(dataloader):
# Clean gradients in the optimizer
optimizer.zero_grad()
# Transforming inputs
inputs, labels = inputs.to(device), labels.to(device)
# Forward Pass
outputs = model(inputs)
# Get loss
loss = criterion(outputs, labels)
# Backward Pass, updates weights and optimizer
loss.backward()
optimizer.step()
# Register on metrics
_, predicted = torch.max(outputs.data, 1)
metrics.batch(labels=labels, preds=predicted, loss=loss.item())
# Print training metrics
metrics.print_one_liner()
return metrics.summary()
def validate(model, dataloader, criterion, device):
"""
validate Runs one epoch of validation.
@param model Model to train.
@param dataloader Images to train with.
@param criterion Loss criterion.
@param device Use of GPU.
"""
# Prepare the model
model.to(device)
model.eval()
# Creates metrics recorder
metrics = Metrics()
with torch.no_grad():
# Iterates over batches
for (_, inputs, labels) in Bar(dataloader):
# Transforming inputs
inputs, labels = inputs.to(device), labels.to(device)
# Forward Pass
outputs = model(inputs)
# Get loss
loss = criterion(outputs, labels)
# Register on metrics
_, predicted = torch.max(outputs.data, 1)
metrics.batch(labels=labels, preds=predicted, loss=loss.item())
# Print and return validation metrics
metrics.print_one_liner(phase='Val')
return metrics.summary()
def train_validate(model, train_loader, val_loader, optimizer,\
criterion, device, epochs, save_criteria, weights_path, save_name):
"""
train_validate Trains and validates a model.
@param model Model to train on.
@param train_loader Images to train with.
@param val_loader Images to use for validation.
@param optimizer Optimizer to update weights.
@param criterion Loss criterion.
@param device Use of GPU.
@param epochs Amount of epochs to train.
@param save_criteria What metric to use to save best weights.
@param weights_path Path to the folder to save best weights.
@param save_name Filename of the best weights.
"""
# Initial best model values
best_criteria = 0
best_model = {}
# Iterates over total epochs
for epoch in range(1, epochs+1):
print(f'Epoch {epoch}')
# Train
metrics = train(model, train_loader, optimizer, criterion, device)
# Validate
if val_loader:
metrics = validate(model, val_loader, criterion, device)
# Update best model
if save_criteria == 'Loss': metrics['Model Loss'][0] *= -1 # Change sign of loss
if epoch == 1 or metrics['Model '+save_criteria][0] >= best_criteria:
best_criteria = metrics['Model '+save_criteria][0]
best_model = {'epoch': epoch,\
'model_state_dict': model.state_dict(),\
'optimizer_state_dict': optimizer.state_dict(),\
'accuracy': metrics['Model Accuracy'][0],\
'loss': metrics["Model Loss"][0],\
'sensitivity': metrics["Model Sensitivity"][0],\
'specificity': metrics["Model Specificity"][0]}
# Save model
save_path = '{}{}_{}_{:.6}.pth'.format(weights_path, save_name,\
save_criteria, str(best_criteria).replace('.', '_'))
torch.save(best_model, save_path)
return save_path
| 29.714286
| 88
| 0.614927
|
53ca7f959c582f5b6fe63abc8de475863f026174
| 16,663
|
py
|
Python
|
src/cct.py
|
Babars7/SDPS-Net
|
ea96c6933485c5a50e4151179b6d2fea898b1898
|
[
"MIT"
] | null | null | null |
src/cct.py
|
Babars7/SDPS-Net
|
ea96c6933485c5a50e4151179b6d2fea898b1898
|
[
"MIT"
] | null | null | null |
src/cct.py
|
Babars7/SDPS-Net
|
ea96c6933485c5a50e4151179b6d2fea898b1898
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .transformers import TransformerEncoderLayer
__all__ = ['cct_2', 'cct_4', 'cct_6', 'cct_7', 'cct_8',
'cct_10', 'cct_12', 'cct_24', 'cct_32',
'cvt_2', 'cvt_4', 'cvt_6', 'cvt_7', 'cvt_8',
'cvt_10', 'cvt_12', 'cvt_24', 'cvt_32',
'vit_lite_2', 'vit_lite_4', 'vit_lite_6',
'vit_lite_7', 'vit_lite_8', 'vit_lite_10',
'vit_lite_12', 'vit_lite_24', 'vit_lite_32']
class Tokenizer(nn.Module):
def __init__(self,
kernel_size, stride, padding,
pooling_kernel_size=3, pooling_stride=2, pooling_padding=1,
n_conv_layers=1,
n_input_channels=4,
n_output_channels=64,
in_planes=64,
activation=None,
max_pool=True):
super(Tokenizer, self).__init__()
n_filter_list = [n_input_channels] + \
[in_planes for _ in range(n_conv_layers - 1)] + \
[n_output_channels]
self.conv_layers = nn.Sequential(
*[nn.Sequential(
nn.Conv2d(n_filter_list[i], n_filter_list[i + 1],
kernel_size=(kernel_size, kernel_size),
stride=(stride, stride),
padding=(padding, padding), bias=False),
nn.Identity() if activation is None else activation(),
nn.MaxPool2d(kernel_size=pooling_kernel_size,
stride=pooling_stride,
padding=pooling_padding) if max_pool else nn.Identity()
)
for i in range(n_conv_layers)
])
self.flattener = nn.Flatten(2, 3)
self.apply(self.init_weight)
def sequence_length(self, n_channels=4, height=224, width=224):
return self.forward(torch.zeros((1, n_channels, height, width))).shape[1]
def forward(self, x):
return self.flattener(self.conv_layers(x)).transpose(-2, -1)
@staticmethod
def init_weight(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
class TransformerClassifier(nn.Module):
def __init__(self,
seq_pool=True,
embedding_dim=768,
num_layers=12,
num_heads=12,
mlp_ratio=4.0,
#num_classes=1000,
dropout_rate=0.1,
attention_dropout=0.1,
stochastic_depth_rate=0.1,
positional_embedding='sine',
sequence_length=None,
*args, **kwargs):
super().__init__()
positional_embedding = positional_embedding if \
positional_embedding in ['sine', 'learnable', 'none'] else 'sine'
dim_feedforward = int(embedding_dim * mlp_ratio)
self.embedding_dim = embedding_dim
self.sequence_length = sequence_length
self.seq_pool = seq_pool
assert sequence_length is not None or positional_embedding == 'none', \
f"Positional embedding is set to {positional_embedding} and" \
f" the sequence length was not specified."
if not seq_pool:
sequence_length += 1
self.class_emb = nn.Parameter(torch.zeros(1, 1, self.embedding_dim),
requires_grad=True)
else:
self.attention_pool = nn.Linear(self.embedding_dim, 1)
if positional_embedding != 'none':
if positional_embedding == 'learnable':
self.positional_emb = nn.Parameter(torch.zeros(1, sequence_length, embedding_dim),
requires_grad=True)
nn.init.trunc_normal_(self.positional_emb, std=0.2)
else:
self.positional_emb = nn.Parameter(self.sinusoidal_embedding(sequence_length, embedding_dim),
requires_grad=False)
else:
self.positional_emb = None
self.dropout = nn.Dropout(p=dropout_rate)
dpr = [x.item() for x in torch.linspace(0, stochastic_depth_rate, num_layers)]
self.blocks = nn.ModuleList([
TransformerEncoderLayer(d_model=embedding_dim, nhead=num_heads,
dim_feedforward=dim_feedforward, dropout=dropout_rate,
attention_dropout=attention_dropout, drop_path_rate=dpr[i])
for i in range(num_layers)])
self.norm = nn.LayerNorm(embedding_dim)
self.fc_dirx = nn.Linear(embedding_dim, 36) #it is here I have to modify MLP Head
self.fc_diry = nn.Linear(embedding_dim, 36)
self.fc_intens = nn.Linear(embedding_dim, 20)
self.apply(self.init_weight)
def forward(self, x):
#print('classifierinput', x.shape)
if self.positional_emb is None and x.size(1) < self.sequence_length: #note done
x = F.pad(x, (0, 0, 0, self.n_channels - x.size(1)), mode='constant', value=0)
if not self.seq_pool:
cls_token = self.class_emb.expand(x.shape[0], -1, -1) #not done
x = torch.cat((cls_token, x), dim=1)
if self.positional_emb is not None: #this is done
x += self.positional_emb
#print('afterpositionalembeding', x.shape)
x = self.dropout(x)
for blk in self.blocks: #Transformer encoder layer
x = blk(x)
x = self.norm(x)
#print('aftertransformer', x.shape)
if self.seq_pool: #Sequence Pooling
x = torch.matmul(F.softmax(self.attention_pool(x), dim=1).transpose(-1, -2), x).squeeze(-2)
else:
x = x[:, 0]
##################
#print('afterseqpool', x.shape)
#x_out = self.fc_dirx(x)
#print('afterMLP1', x_out.shape)
#y_out = self.fc_diry(x)
#print('afterMLP2', y_out.shape)
#ints_out = self.fc_intens(x)
#return x_out, y_out, ints_out
######
return x
@staticmethod
def init_weight(m):
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@staticmethod
def sinusoidal_embedding(n_channels, dim):
pe = torch.FloatTensor([[p / (10000 ** (2 * (i // 2) / dim)) for i in range(dim)]
for p in range(n_channels)])
pe[:, 0::2] = torch.sin(pe[:, 0::2])
pe[:, 1::2] = torch.cos(pe[:, 1::2])
return pe.unsqueeze(0)
class ViTLite(nn.Module):
def __init__(self,
img_size=224,
embedding_dim=768,
n_input_channels=4,
patch_size=16,
*args, **kwargs):
super(ViTLite, self).__init__()
assert img_size % patch_size == 0, f"Image size ({img_size}) has to be" \
f"divisible by patch size ({patch_size})"
self.tokenizer = Tokenizer(n_input_channels=n_input_channels,
n_output_channels=embedding_dim,
kernel_size=patch_size,
stride=patch_size,
padding=0,
max_pool=False,
activation=None,
n_conv_layers=1)
self.classifier = TransformerClassifier(
sequence_length=self.tokenizer.sequence_length(n_channels=n_input_channels,
height=img_size,
width=img_size),
embedding_dim=embedding_dim,
seq_pool=False,
dropout=0.1,
attention_dropout=0.,
stochastic_depth=0.,
*args, **kwargs)
def forward(self, x):
x = self.tokenizer(x)
return self.classifier(x)
class CVT(nn.Module):
def __init__(self,
img_size=224,
embedding_dim=768,
n_input_channels=4,
patch_size=16,
*args, **kwargs):
super(CVT, self).__init__()
assert img_size % patch_size == 0, f"Image size ({img_size}) has to be" \
f"divisible by patch size ({patch_size})"
self.tokenizer = Tokenizer(n_input_channels=n_input_channels,
n_output_channels=embedding_dim,
kernel_size=patch_size,
stride=patch_size,
padding=0,
max_pool=False,
activation=None,
n_conv_layers=1)
self.classifier = TransformerClassifier(
sequence_length=self.tokenizer.sequence_length(n_channels=n_input_channels,
height=img_size,
width=img_size),
embedding_dim=embedding_dim,
seq_pool=True,
dropout=0.,
attention_dropout=0.1,
stochastic_depth=0.1,
*args, **kwargs)
def forward(self, x):
x = self.tokenizer(x)
return self.classifier(x)
class CCT(nn.Module):
def __init__(self,
img_size=224,
embedding_dim=768,
n_input_channels=4,
n_conv_layers=1,
kernel_size=7,
stride=2,
padding=3,
pooling_kernel_size=3,
pooling_stride=2,
pooling_padding=1,
*args, **kwargs):
super(CCT, self).__init__()
self.tokenizer = Tokenizer(n_input_channels=n_input_channels,
n_output_channels=embedding_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
pooling_kernel_size=pooling_kernel_size,
pooling_stride=pooling_stride,
pooling_padding=pooling_padding,
max_pool=True,
activation=nn.ReLU,
n_conv_layers=n_conv_layers)
self.classifier = TransformerClassifier(
sequence_length=self.tokenizer.sequence_length(n_channels=n_input_channels,
height=img_size,
width=img_size),
embedding_dim=embedding_dim,
seq_pool=True,
dropout=0.,
attention_dropout=0.1,
stochastic_depth=0.1,
*args, **kwargs)
def forward(self, x):
x = self.tokenizer(x)
return self.classifier(x)
def _cct(num_layers, num_heads, mlp_ratio, embedding_dim,
kernel_size=3, stride=None, padding=None,
*args, **kwargs):
#print('2')
stride = stride if stride is not None else max(1, (kernel_size // 2) - 1)
padding = padding if padding is not None else max(1, (kernel_size // 2))
return CCT(num_layers=num_layers,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
embedding_dim=embedding_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
*args, **kwargs)
def _cvt(num_layers, num_heads, mlp_ratio, embedding_dim,
patch_size=4, *args, **kwargs):
return CVT(num_layers=num_layers,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
embedding_dim=embedding_dim,
patch_size=patch_size,
*args, **kwargs)
def _vit_lite(num_layers, num_heads, mlp_ratio, embedding_dim,
patch_size=4, *args, **kwargs):
return ViTLite(num_layers=num_layers,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
embedding_dim=embedding_dim,
patch_size=patch_size,
*args, **kwargs)
def cct_2(*args, **kwargs):
return _cct(num_layers=2, num_heads=2, mlp_ratio=1, embedding_dim=128,
*args, **kwargs)
def cct_4(*args, **kwargs):
return _cct(num_layers=4, num_heads=2, mlp_ratio=1, embedding_dim=128,
*args, **kwargs)
def cct_6(*args, **kwargs):
return _cct(num_layers=6, num_heads=4, mlp_ratio=2, embedding_dim=256,
*args, **kwargs)
def cct_7(*args, **kwargs):
#print('1')
return _cct(num_layers=7, num_heads=4, mlp_ratio=2, embedding_dim=256,
*args, **kwargs)
def cct_8(*args, **kwargs):
return _cct(num_layers=8, num_heads=4, mlp_ratio=2, embedding_dim=256,
*args, **kwargs)
def cct_10(*args, **kwargs):
return _cct(num_layers=10, num_heads=8, mlp_ratio=3, embedding_dim=512,
*args, **kwargs)
def cct_12(*args, **kwargs):
return _cct(num_layers=12, num_heads=12, mlp_ratio=4, embedding_dim=768,
*args, **kwargs)
def cct_24(*args, **kwargs):
return _cct(num_layers=24, num_heads=16, mlp_ratio=4, embedding_dim=1024,
*args, **kwargs)
def cct_32(*args, **kwargs):
return _cct(num_layers=32, num_heads=16, mlp_ratio=4, embedding_dim=1280,
*args, **kwargs)
def cvt_2(*args, **kwargs):
return _cvt(num_layers=2, num_heads=2, mlp_ratio=1, embedding_dim=128,
*args, **kwargs)
def cvt_4(*args, **kwargs):
return _cvt(num_layers=4, num_heads=2, mlp_ratio=1, embedding_dim=128,
*args, **kwargs)
def cvt_6(*args, **kwargs):
return _cvt(num_layers=6, num_heads=4, mlp_ratio=2, embedding_dim=256,
*args, **kwargs)
def cvt_7(*args, **kwargs):
return _cvt(num_layers=7, num_heads=4, mlp_ratio=2, embedding_dim=256,
*args, **kwargs)
def cvt_8(*args, **kwargs):
return _cvt(num_layers=8, num_heads=4, mlp_ratio=2, embedding_dim=256,
*args, **kwargs)
def cvt_10(*args, **kwargs):
return _cvt(num_layers=10, num_heads=8, mlp_ratio=3, embedding_dim=512,
*args, **kwargs)
def cvt_12(*args, **kwargs):
return _cvt(num_layers=12, num_heads=12, mlp_ratio=4, embedding_dim=768,
*args, **kwargs)
def cvt_24(*args, **kwargs):
return _cvt(num_layers=24, num_heads=16, mlp_ratio=4, embedding_dim=1024,
*args, **kwargs)
def cvt_32(*args, **kwargs):
return _cvt(num_layers=32, num_heads=16, mlp_ratio=4, embedding_dim=1280,
*args, **kwargs)
def vit_lite_2(*args, **kwargs):
return _vit_lite(num_layers=2, num_heads=2, mlp_ratio=1, embedding_dim=128,
*args, **kwargs)
def vit_lite_4(*args, **kwargs):
return _vit_lite(num_layers=4, num_heads=2, mlp_ratio=1, embedding_dim=128,
*args, **kwargs)
def vit_lite_6(*args, **kwargs):
return _vit_lite(num_layers=6, num_heads=4, mlp_ratio=2, embedding_dim=256,
*args, **kwargs)
def vit_lite_7(*args, **kwargs):
return _vit_lite(num_layers=7, num_heads=4, mlp_ratio=2, embedding_dim=256,
*args, **kwargs)
def vit_lite_8(*args, **kwargs):
return _vit_lite(num_layers=8, num_heads=4, mlp_ratio=2, embedding_dim=256,
*args, **kwargs)
def vit_lite_10(*args, **kwargs):
return _vit_lite(num_layers=10, num_heads=8, mlp_ratio=3, embedding_dim=512,
*args, **kwargs)
def vit_lite_12(*args, **kwargs):
return _vit_lite(num_layers=12, num_heads=12, mlp_ratio=4, embedding_dim=768,
*args, **kwargs)
def vit_lite_24(*args, **kwargs):
return _vit_lite(num_layers=24, num_heads=16, mlp_ratio=4, embedding_dim=1024,
*args, **kwargs)
def vit_lite_32(*args, **kwargs):
return _vit_lite(num_layers=32, num_heads=16, mlp_ratio=4, embedding_dim=1280,
*args, **kwargs)
| 36.541667
| 109
| 0.541919
|
6051efe76158a31874e9d80e0577af7e190f23c9
| 1,117
|
py
|
Python
|
graphbrain/meaning/concepts.py
|
danielvasic/CroatianGraphBrain
|
01177c1441129792c8fbde3f75092bab6856af6f
|
[
"MIT"
] | 1
|
2021-04-24T04:52:31.000Z
|
2021-04-24T04:52:31.000Z
|
graphbrain/meaning/concepts.py
|
danielvasic/CroatianGraphBrain
|
01177c1441129792c8fbde3f75092bab6856af6f
|
[
"MIT"
] | null | null | null |
graphbrain/meaning/concepts.py
|
danielvasic/CroatianGraphBrain
|
01177c1441129792c8fbde3f75092bab6856af6f
|
[
"MIT"
] | null | null | null |
def strip_concept(edge):
"""Strip away nesting edges with connectors such as triggers and
subpredicates, to expose the outmost and leftmost concept that can be
found. May be the edge itself.
For example:
(against/t (the/m (of/b treaty/c paris/c)))
becomes
(the/m (of/b treaty/c paris/c))
"""
if edge.type()[0] == 'c':
return edge
elif not edge.is_atom():
return strip_concept(edge[1])
else:
return None
def has_proper_concept(edge):
"""Check if the concept either is a proper edge, or contains one."""
if edge.is_atom():
return edge.type()[:2] == 'cp'
else:
for subedge in edge[1:]:
if has_proper_concept(subedge):
return True
return False
def all_concepts(edge):
"""Recursively search for all concepts contained in the edge, returning
a set that can also contain itself."""
concepts = set()
if edge.type()[0] == 'c':
concepts.add(edge)
if not edge.is_atom():
for item in edge:
concepts |= all_concepts(item)
return concepts
| 25.976744
| 75
| 0.607878
|
0c8e67e6046d248bf2b634c70b31e6be4939d969
| 141
|
py
|
Python
|
start.py
|
ErikLetson/Lottobot2
|
87e81bb653c70c94e4d3fd5b368d07f63b575651
|
[
"MIT"
] | null | null | null |
start.py
|
ErikLetson/Lottobot2
|
87e81bb653c70c94e4d3fd5b368d07f63b575651
|
[
"MIT"
] | null | null | null |
start.py
|
ErikLetson/Lottobot2
|
87e81bb653c70c94e4d3fd5b368d07f63b575651
|
[
"MIT"
] | null | null | null |
from lib import lottobot
def Main():
l = lottobot.Lottobot(None, 'lottobot')
l.mainloop()
if __name__ == '__main__':
Main()
| 11.75
| 43
| 0.624113
|
4a214ca13dc21d98f0d35535dcd7ebe6890a226a
| 24,668
|
py
|
Python
|
Exp 4 4 with USRCAT/evolution_functions.py
|
Mattarian/GAD-USRCAT
|
306a21e8afbeff3d8b56ae4641ac1195a12f2036
|
[
"Apache-2.0"
] | null | null | null |
Exp 4 4 with USRCAT/evolution_functions.py
|
Mattarian/GAD-USRCAT
|
306a21e8afbeff3d8b56ae4641ac1195a12f2036
|
[
"Apache-2.0"
] | null | null | null |
Exp 4 4 with USRCAT/evolution_functions.py
|
Mattarian/GAD-USRCAT
|
306a21e8afbeff3d8b56ae4641ac1195a12f2036
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import os
import rdkit
import shutil
import multiprocessing
from rdkit import Chem
from rdkit.Chem import Draw
from rdkit.Chem import MolFromSmiles as smi2mol
from rdkit.Chem import MolToSmiles as mol2smi
from rdkit.Chem import Descriptors
from rdkit.Chem import rdMolDescriptors
from rdkit import DataStructs
from selfies import decoder
import numpy as np
import inspect
from collections import OrderedDict
manager = multiprocessing.Manager()
lock = multiprocessing.Lock()
def get_logP(mol):
'''Calculate logP of a molecule
Parameters:
mol (rdkit.Chem.rdchem.Mol) : RdKit mol object, for which logP is to calculates
Returns:
float : logP of molecule (mol)
'''
return Descriptors.MolLogP(mol)
def molecule_similarity(mol, target, radius=2, nBits=2048,
useChirality=True):
"""
Reward for a target molecule similarity, based on tanimoto similarity
between the ECFP fingerprints of the x molecule and target molecule
:param mol: rdkit mol object
:param target: rdkit mol object
:return: float, [0.0, 1.0]
"""
x = rdMolDescriptors.GetMorganFingerprintAsBitVect(mol, radius=radius,
nBits=nBits,
useChirality=useChirality)
target = rdMolDescriptors.GetMorganFingerprintAsBitVect(target,
radius=radius,
nBits=nBits,
useChirality=useChirality)
return DataStructs.TanimotoSimilarity(x, target)
def make_clean_results_dir():
# Create the results folder
root_folder = './results'
if not os.path.exists(root_folder):
os.makedirs(root_folder)
else:
shutil.rmtree(root_folder)
os.makedirs(root_folder)
return root_folder
def make_clean_directories(beta, root_folder, iteration):
'''Create or clean directories: 'images' & 'saved_models'
Create directories from scratch, if they do not exist
Clean (remove all content) if directories already exist
Parameters:
None
Returns:
None : Folders in current directory modified
'''
image_dir= root_folder + '/images_generation_' + str(beta) + '_' + str(iteration)
if not os.path.exists(image_dir):
os.makedirs(image_dir)
else:
if len(os.listdir(image_dir)) > 0:
os.system("rm -r %s/*"%(image_dir))
models_dir = root_folder + '/saved_models_' + str(beta) + '_' + str(iteration)
if not os.path.exists(models_dir):
os.makedirs(models_dir)
else:
if len(os.listdir(models_dir)) > 0:
os.system("rm -r %s/*"%(models_dir))
data_dir = root_folder + '/results_' + str(beta) + '_' + str(iteration)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
else:
if len(os.listdir(data_dir)) > 0:
os.system("rm -r %s/*"%(data_dir))
return (image_dir, models_dir, data_dir)
def sanitize_smiles(smi):
'''Return a canonical smile representation of smi
Parameters:
smi (string) : smile string to be canonicalized
Returns:
mol (rdkit.Chem.rdchem.Mol) : RdKit mol object (None if invalid smile string smi)
smi_canon (string) : Canonicalized smile representation of smi (None if invalid smile string smi)
conversion_successful (bool): True/False to indicate if conversion was successful
'''
try:
mol = smi2mol(smi, sanitize=True)
smi_canon = mol2smi(mol, isomericSmiles=False, canonical=True)
return (mol, smi_canon, True)
except:
return (None, None, False)
def sanitize_multiple_smiles(smi_ls):
'''Calls function sanitize_smiles for each item in list smi_ls
'''
sanitized_smiles = []
for smi in smi_ls:
smi_converted = sanitize_smiles(smi)
sanitized_smiles.append(smi_converted[1])
if smi_converted[2] == False or smi_converted[1] == '':
raise Exception("Invalid SMILE ecncountered. Value =", smi)
return sanitized_smiles
def read_dataset(filename):
'''Return a list of smiles contained in file filename
Parameters:
filename (string) : Name of file containg smiles seperated by '\n'
Returns
content (list) : list of smile string in file filename
'''
with open(filename) as f:
content = f.readlines()
content = [x.strip() for x in content]
return content
def read_dataset_encoding(disc_enc_type):
'''Return zinc-data set based on disc_enc_type choice of 'smiles' or 'selfies'
Parameters:
disc_enc_type (string): 'smiles' or 'selfies'
'''
if disc_enc_type == 'smiles' or disc_enc_type == 'properties_rdkit':
smiles_reference = read_dataset(filename='./datasets/zinc_dearom.txt')
return smiles_reference
elif disc_enc_type == 'selfies':
selfies_reference = read_dataset(filename='./datasets/SELFIES_zinc.txt')
return selfies_reference
def create_100_mol_image(mol_list, file_name, fitness, logP, SAS, RingCount, USRSim): #!#
'''Create a single picture of multiple molecules in a single Grid.
'''
assert len(mol_list) == 100
if logP == None and SAS == None and RingCount == None:
Draw.MolsToGridImage(mol_list, molsPerRow=10, subImgSize=(200,200)).save(file_name)
return
for i,m in enumerate(mol_list):
m.SetProp('_Name','%s %s %s %s %s' % (round(fitness[i], 3), round(logP[i], 3), round(SAS[i], 3), round(RingCount[i], 3), round(USRSim[i], 3)))
try:
Draw.MolsToGridImage(mol_list, molsPerRow=10, subImgSize=(200,200), legends=[x.GetProp("_Name") for x in mol_list]).save(file_name)
except:
print('Failed to produce image!')
return
def get_selfie_chars(selfie):
'''Obtain a list of all selfie characters in string selfie
Parameters:
selfie (string) : A selfie string - representing a molecule
Example:
>>> get_selfie_chars('[C][=C][C][=C][C][=C][Ring1][Branch1_1]')
['[C]', '[=C]', '[C]', '[=C]', '[C]', '[=C]', '[Ring1]', '[Branch1_1]']
Returns:
chars_selfie: list of selfie characters present in molecule selfie
'''
chars_selfie = [] # A list of all SELFIE sybols from string selfie
while selfie != '':
chars_selfie.append(selfie[selfie.find('['): selfie.find(']')+1])
selfie = selfie[selfie.find(']')+1:]
return chars_selfie
def smiles_alphabet(disc_enc_type):
'''Return a list of characters present in the zinc dataset
Parameters:
disc_enc_type (string): Indicates whether to return SMILES/SELFiES characters
Returns:
alphabet: list of SELFIE/SMILE alphabets in Zinc
'''
if disc_enc_type == 'smiles':
alphabet = ['C', 'c', 'H','O','o', 'N','n', 'S','s', 'F', 'P', 'I',
'Cl','Br', '=','#','(',')','[',']','1','2','3','4','5',
'6','7','8','9','+','-','X'] # SMILES Alphabets in zinc
elif disc_enc_type == 'selfies':
alphabet = ['[Ring1]', '[Branch1_1]', '[Branch1_2]','[Branch1_3]', '[Cl]',
'[Ring2]', '[Branch2_1]', '[Branch2_2]','[Branch2_3]', '[NH3+]',
'[N]', '[=N]', '[#N]', '[C]', '[=C]',
'[#C]', '[S]', '[=S]', '[=O]', '[Br]',
'[epsilon]', '[N+]', '[NH+]', '[NH2+]', '[=NH+]',
'[=NH2+]', '[I]', '[O-]', '[P]', '[=P]',
'[S-]', '[=N-]', '[NH-]', '[=O+]', '[CH-]',
'[PH+]', '[=S+]', '[S+]', '[CH2-]', '[P+]',
'[O+]', '[=N+]', '[N-]' , '[=SH+]', '[=OH+]',
'[#N+]', '[=PH2]', 'X', '[F]', '[O]',
] # SELFIES Alphabets in zinc
else:
exit('Invalid choice. Only possible choices are: smiles/selfies.')
return alphabet
def _to_onehot(molecule_str, disc_enc_type, max_molecules_len):
'''Convert given molecule string into a one-hot encoding, with characters
obtained from function 'smiles_alphabet'.
One-hot encoding of arbitrary molecules is converted to len
'max_molecules_len' by padding with character 'X'
Parameters:
molecule_str (string): SMILE/SELFIE string of molecule
disc_enc_type (string): Indicating weather molecule string is either
SMILE or SELFIE
max_molecules_len (string): Length of the one-hot encoding
Returns:
one_hots (list of lists): One-Hot encoding of molecule string, padding
till length max_molecules_len (dim: len(alphabet) * max_molecules_len)
'''
one_hots=[]
alphabet = smiles_alphabet(disc_enc_type)
alphabet_length = len(alphabet)
if disc_enc_type == 'smiles':
alphabet.remove('Cl') # Replace 'Cl' & 'Br' with 'Y' & 'Z' for convenience
alphabet.remove('Br') # (Searching for single characters is easier)
alphabet.append('Y')
alphabet.append('Z')
for smi in molecule_str:
# Relace 'Cl' and 'Br' with 'Y', 'Z' from smi (for conveninece)
if disc_enc_type == 'smiles':
smi = smi.replace('Cl', 'Y')
smi = smi.replace('Br', 'Z')
one_hot=[]
if disc_enc_type == 'selfies':
smi = get_selfie_chars(smi)
if len(smi) > max_molecules_len:
exit("Molecule is too large!")
for char in smi:
if char not in alphabet:
print("smiles character %s not in alphabet MOLECULE: %s"%(char, smi))
zeros = np.zeros((alphabet_length)).astype(np.int32).tolist()
zeros[alphabet.index(char)] = 1
one_hot+=zeros
# Padding with 'X's
for char in range(max_molecules_len-len(smi)):
zeros = np.zeros((alphabet_length)).astype(np.int32).tolist()
zeros[alphabet.index("X")] = 1
one_hot += zeros
one_hots.append(one_hot)
one_hots = np.array(one_hots)
return (one_hots)
def mutations_random_grin(selfie, max_molecules_len, write_fail_cases=False):
'''Return a mutated selfie string
Mutations are done until a valid molecule is obtained
Rules of mutation: With a 50% propbabily, either:
1. Add a random SELFIE character in the string
2. Replace a random SELFIE character with another
Parameters:
selfie (string) : SELFIE string to be mutated
max_molecules_len (int) : Mutations of SELFIE string are allowed up to this length
write_fail_cases (bool) : If true, failed mutations are recorded in "selfie_failure_cases.txt"
Returns:
selfie_mutated (string) : Mutated SELFIE string
smiles_canon (string) : canonical smile of mutated SELFIE string
'''
valid=False
fail_counter = 0
chars_selfie = get_selfie_chars(selfie)
while not valid:
fail_counter += 1
alphabet = ['[Branch1_1]', '[Branch1_2]','[Branch1_3]', '[epsilon]', '[Ring1]', '[Ring2]', '[Branch2_1]', '[Branch2_2]', '[Branch2_3]', '[F]', '[O]', '[=O]', '[N]', '[=N]', '[#N]', '[C]', '[=C]', '[#C]', '[S]', '[=S]', '[C][=C][C][=C][C][=C][Ring1][Branch1_1]']
# Insert a character in a Random Location
if np.random.random() < 0.5:
random_index = np.random.randint(len(chars_selfie)+1)
random_character = np.random.choice(alphabet, size=1)[0]
selfie_mutated_chars = chars_selfie[:random_index] + [random_character] + chars_selfie[random_index:]
# Replace a random character
else:
random_index = np.random.randint(len(chars_selfie))
random_character = np.random.choice(alphabet, size=1)[0]
if random_index==0:
selfie_mutated_chars = [random_character] + chars_selfie[random_index+1:]
else:
selfie_mutated_chars = chars_selfie[:random_index] + [random_character] + chars_selfie[random_index+1:]
selfie_mutated = "".join(x for x in selfie_mutated_chars)
sf = "".join(x for x in chars_selfie)
try:
smiles = decoder(selfie_mutated)
mol, smiles_canon, done = sanitize_smiles(smiles)
if len(smiles_canon) > max_molecules_len or smiles_canon=="":
done=False
if done:
valid=True
else:
valid=False
except:
valid=False
if fail_counter > 1 and write_fail_cases == True:
f = open("selfie_failure_cases.txt", "a+")
f.write('Tried to mutate SELFIE: '+str(sf)+' To Obtain: '+str(selfie_mutated) + '\n')
f.close()
return (selfie_mutated, smiles_canon)
def count_atoms(mol, atomic_num):
'''Count the number of atoms in mol with atomic number atomic_num
Parameters:
mol (rdkit.Chem.rdchem.Mol) : Molecule in which search is conducted
atomic_num (int) : Counting is done in mol for atoms with this atomic number
Returns:
(int) : final count of atom
'''
pat = Chem.MolFromSmarts("[#{}]".format(atomic_num))
return len(mol.GetSubstructMatches(pat))
def get_num_bond_types(mol):
'''Calculate the ratio of total number of (single, double, triple, aromatic) bonds to the
total number of bonds.
Parameters:
mol (rdkit.Chem.rdchem.Mol) : Molecule for which ratios arre retuned
Returns:
(list): [num_single/num_bonds, num_double/num_bonds, num_triple/num_bonds, num_aromatic/num_bonds]
'''
bonds = mol.GetBonds()
num_bonds = 0
num_double = 0
num_triple = 0
num_single = 0
num_aromatic = 0
for b in bonds:
num_bonds += 1
if b.GetBondType() == rdkit.Chem.rdchem.BondType.SINGLE:
num_single += 1
if b.GetBondType() == rdkit.Chem.rdchem.BondType.DOUBLE:
num_double += 1
if b.GetBondType() == rdkit.Chem.rdchem.BondType.TRIPLE:
num_triple += 1
if b.GetBondType() == rdkit.Chem.rdchem.BondType.AROMATIC:
num_aromatic += 1
if num_bonds == 0:
return [0, 0, 0, 0]
else:
return [num_single/num_bonds, num_double/num_bonds, num_triple/num_bonds, num_aromatic/num_bonds]
def count_conseq_double(mol):
'''Return the number of consequtive double bonds in an entire molecule
including rings
Examples
>>> count_conseq_double(Chem.MolFromSmiles('C1=CC=C=C=C1'))
2
>>> count_conseq_double(Chem.MolFromSmiles('C1=CC=CC=C1'))
0
>>> count_conseq_double(Chem.MolFromSmiles('C1=CC2=C(C=C1)C=C=C=C2'))
2
Parameters:
mol (rdkit.Chem.rdchem.Mol) : Molecule for conseq. double bonds are to be counted
Returns:
(int): The integer number of coseq. double bonds
'''
bonds = mol.GetBonds()
previous_BType = None
count_conseq_doub = 0
for b in bonds:
curr_BType = b.GetBondType()
if previous_BType == curr_BType and curr_BType == rdkit.Chem.rdchem.BondType.DOUBLE:
count_conseq_doub += 1
previous_BType = curr_BType
return count_conseq_doub
def get_rot_bonds_posn(mol):
'''Return atom indices with Rotatable bonds
Examples:
>>> get_rot_bonds_posn('CC1=CC=CC=C1') # Toluene (Rotatable Bonds At: CH3 & Benzene)
((0, 1),)
>>> get_rot_bonds_posn('CCC1=CC=CC=C1') # (Rotatable Bonds At: CH3, CH3 & Benzene)
((0, 1), (1, 2))
'''
RotatableBond = Chem.MolFromSmarts('*-&!@*')
rot = mol.GetSubstructMatches(RotatableBond)
return rot
def get_bond_indeces(mol, rot):
'''Get all the bond indices with Rotatable bonds atoms (generated from 'get_rot_bonds_posn')
'''
bonds_idx = []
for i in range(len(rot)):
bond = mol.GetBondBetweenAtoms(rot[i][0],rot[i][1])
bonds_idx.append(bond.GetIdx())
return bonds_idx
def obtain_rings(smi):
'''Obtain a list of all rings present in SMILE string smi
Examples:
>>> obtain_rings('CCC1=CC=CC=C1')
['c1ccccc1']
>>> obtain_rings('C1=CC=C(C=C1)C1=CC=CC=C1')
['c1ccccc1', 'c1ccccc1']
>>> obtain_rings('C1=CC2=C(C=C1)C=CC=C2')
(None, None)
Parameters:
smi (string) : SMILE string of a molecule
Returns
(list) : List if all rings in a SMILE string
'''
mol = Chem.MolFromSmiles(smi)
rot = get_rot_bonds_posn(mol) # Get rotatble bond positions
if len(rot) == 0:
return None, None
bond_idx = get_bond_indeces(mol, rot)
new_mol = Chem.FragmentOnBonds(mol, bond_idx, addDummies=False)
new_smile = Chem.MolToSmiles(new_mol)
smile_split_list = new_smile.split(".")
rings = []
for item in smile_split_list:
if '1' in item:
rings.append(item)
return rings
def size_ring_counter(ring_ls):
'''Get the number of rings of sizes 3 to 20 and the number of consequtive double bonds in a ring
Parameters:
ring_ls (list) : list of rings of a molecule
Returns
(list) : Of size 19 (1 for number of conseq. double bonds)
(18 for number of rings between size 3 to 20)
'''
ring_counter = []
if ring_ls == (None, None): # Presence of no rings, return 0s for the 19 feature
return [0 for i in range(19)]
mol_ring_ls = [Chem.MolFromSmiles(smi) for smi in ring_ls]
# Cont number consequtive double bonds in ring
conseq_dbl_bnd_in_ring = 0
for item in mol_ring_ls:
conseq_dbl_bnd_in_ring += count_conseq_double(item)
ring_counter.append(conseq_dbl_bnd_in_ring) # concatenate onto list ring_counter
# Count the number of consequtive double bonds in rings
for i in range(3, 21):
count = 0
for mol_ring in mol_ring_ls:
if mol_ring.GetNumAtoms() == i:
count += 1
ring_counter.append(count)
return ring_counter
def get_mol_info(smi):
''' Calculate a set of 51 RdKit properties, collected from above helper functions.
Parameters:
smi (string) : SMILE string of molecule
Returns:
(list of float) : list of 51 calculated properties
'''
mol = Chem.MolFromSmiles(smi)
num_atoms = mol.GetNumAtoms()
num_hydro = Chem.AddHs(mol).GetNumAtoms() - num_atoms
num_carbon = count_atoms(mol, 6)
num_nitro = count_atoms(mol, 7)
num_sulphur = count_atoms(mol, 16)
num_oxy = count_atoms(mol, 8)
num_clorine = count_atoms(mol, 17)
num_bromine = count_atoms(mol, 35)
num_florine = count_atoms(mol, 9)
if num_carbon == 0: # Avoid division by zero error, set num_carbon to a very small value
num_carbon = 0.0001
basic_props = [num_atoms/num_carbon, num_hydro/num_carbon, num_nitro/num_carbon,
num_sulphur/num_carbon, num_oxy/num_carbon, num_clorine/num_carbon,
num_bromine/num_carbon, num_florine/num_carbon]
to_caculate = ["RingCount", "HallKierAlpha", "BalabanJ", "NumAliphaticCarbocycles","NumAliphaticHeterocycles",
"NumAliphaticRings","NumAromaticCarbocycles","NumAromaticHeterocycles",
"NumAromaticRings","NumHAcceptors","NumHDonors","NumHeteroatoms",
"NumRadicalElectrons","NumSaturatedCarbocycles","NumSaturatedHeterocycles",
"NumSaturatedRings","NumValenceElectrons"]
# Calculate all propoerties listed in 'to_calculate'
calc_props = OrderedDict(inspect.getmembers(Descriptors, inspect.isfunction))
for key in list(calc_props.keys()):
if key.startswith('_'):
del calc_props[key]
continue
if len(to_caculate)!=0 and key not in to_caculate:
del calc_props[key]
features = [val(mol) for key,val in calc_props.items()] # List of properties
# Ratio of total number of (single, double, triple, aromatic) bonds to the total number of bonds.
simple_bond_info = get_num_bond_types(mol)
# Obtain all rings in a molecule and calc. #of triple bonds in rings & #of rings in molecule
ring_ls = obtain_rings(smi)
num_triple = 0 # num triple bonds in ring
if len(ring_ls) > 0 and ring_ls != (None, None):
for item in ring_ls:
num_triple += item.count('#')
simple_bond_info.append(len(ring_ls)) # append number of Rings in molecule
else: simple_bond_info.append(0) # no rotatable bonds
simple_bond_info.append(num_triple) # number of triple bonds in rings
# appended onto 'simple_bond_info'
# Calculate the number of rings of size 3 to 20 & number of conseq. double bonds in rings
simple_bond_info = simple_bond_info + size_ring_counter(ring_ls)
# Calculate the number of consequitve double bonds in entire molecule
simple_bond_info.append(count_conseq_double(mol))
return np.array(features + basic_props + simple_bond_info)
def get_chunks(arr, num_processors, ratio):
"""
Get chunks based on a list
"""
chunks = [] # Collect arrays that will be sent to different processorr
counter = int(ratio)
for i in range(num_processors):
if i == 0:
chunks.append(arr[0:counter])
if i != 0 and i<num_processors-1:
chunks.append(arr[counter-int(ratio): counter])
if i == num_processors-1:
chunks.append(arr[counter-int(ratio): ])
counter += int(ratio)
return chunks
def get_mult_mol_info(smiles_list):
''' Collect results of 'get_mol_info' for multiple smiles (smiles_list)
Parameters:
smiles_list (list) : List of SMILE strings
Returns:
np.array : Concatenated array of results with shape (len(smiles_list), 51)
51 is the number of RdKit properties calculated in 'get_mol_info'.
'''
concat_arr = []
for smi in smiles_list:
concat_arr.append(get_mol_info(smi))
return np.array(concat_arr)
def get_mult_mol_info_parr(smiles_list, dataset_x):
''' Record calculated rdkit property results for each smile in smiles_list,
and add record result in dictionary dataset_x.
'''
for smi in smiles_list:
dataset_x['properties_rdkit'][smi] = get_mol_info(smi)
def create_parr_process(chunks):
'''This function initiates parallel execution (based on the number of cpu cores)
to calculate all the properties mentioned in 'get_mol_info()'
Parameters:
chunks (list) : List of lists, contining smile strings. Each sub list is
sent to a different process
dataset_x (dict): Locked dictionary for recording results from different processes.
Locking allows communication between different processes.
Returns:
None : All results are recorde in dictionary 'dataset_x'
'''
# Assign data to each process
process_collector = []
collect_dictionaries = []
for chunk in chunks: # process initialization
dataset_x = manager.dict(lock=True)
smiles_map_props = manager.dict(lock=True)
dataset_x['properties_rdkit'] = smiles_map_props
collect_dictionaries.append(dataset_x)
process_collector.append(multiprocessing.Process(target=get_mult_mol_info_parr, args=(chunk, dataset_x, )))
for item in process_collector: # initite all process
item.start()
for item in process_collector: # wait for all processes to finish
item.join()
combined_dict = {}
for i,item in enumerate(collect_dictionaries):
combined_dict.update(item['properties_rdkit'])
return combined_dict
| 36.383481
| 269
| 0.602927
|
3d335de55598aded12fe220c72b6688cdc4f7ad8
| 12,237
|
py
|
Python
|
tensorflow/python/saved_model/nested_structure_coder.py
|
guptarohit/tensorflow
|
e1ab41387a255fe4a98b76589cd36dc8206c7f77
|
[
"Apache-2.0"
] | 2
|
2019-02-12T01:37:54.000Z
|
2019-09-17T18:20:54.000Z
|
tensorflow/python/saved_model/nested_structure_coder.py
|
illaMcbender/tensorflow
|
a0b0a503287d019a28ef4f670b157eb3605a12f3
|
[
"Apache-2.0"
] | 1
|
2019-02-22T00:50:13.000Z
|
2019-02-22T00:50:13.000Z
|
tensorflow/python/saved_model/nested_structure_coder.py
|
illaMcbender/tensorflow
|
a0b0a503287d019a28ef4f670b157eb3605a12f3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module that encodes (decodes) nested structures into (from) protos.
The intended use is to serialize everything needed to restore a `Function` that
was saved into a SavedModel. This may include concrete function inputs and
outputs, signatures, function specs, etc.
Example use:
coder = nested_structure_coder.StructureCoder()
# Encode into proto.
signature_proto = coder.encode_structure(function.input_signature)
# Decode into a Python object.
restored_signature = coder.decode_proto(signature_proto)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.saved_model import struct_pb2
from tensorflow.python.util import compat
class NotEncodableError(Exception):
"""Error raised when a coder cannot encode an object."""
class StructureCoder(object):
"""Encoder and decoder for nested structures into protos."""
_codecs = []
@classmethod
def register_codec(cls, x):
cls._codecs.append(x)
@classmethod
def _get_encoders(cls):
return [(c.can_encode, c.do_encode) for c in cls._codecs]
@classmethod
def _get_decoders(cls):
return [(c.can_decode, c.do_decode) for c in cls._codecs]
def _map_structure(self, pyobj, coders):
for can, do in coders:
if can(pyobj):
recursion_fn = functools.partial(self._map_structure, coders=coders)
return do(pyobj, recursion_fn)
raise NotEncodableError(
"No encoder for object [%s] of type [%s]." % (str(pyobj), type(pyobj)))
def encode_structure(self, nested_structure):
"""Encodes nested structures composed of encodable types into a proto.
Args:
nested_structure: Structure to encode.
Returns:
Encoded proto.
Raises:
NotEncodableError: For values for which there are no encoders.
"""
return self._map_structure(nested_structure, self._get_encoders())
def can_encode(self, nested_structure):
"""Determines whether a nested structure can be encoded into a proto.
Args:
nested_structure: Structure to encode.
Returns:
True if the nested structured can be encoded.
"""
try:
self.encode_structure(nested_structure)
except NotEncodableError:
return False
return True
def decode_proto(self, proto):
"""Decodes proto representing a nested structure.
Args:
proto: Proto to decode.
Returns:
Decoded structure.
Raises:
NotEncodableError: For values for which there are no encoders.
"""
return self._map_structure(proto, self._get_decoders())
class _ListCodec(object):
"""Codec for lists."""
def can_encode(self, pyobj):
return isinstance(pyobj, list)
def do_encode(self, list_value, encode_fn):
encoded_list = struct_pb2.StructuredValue()
encoded_list.list_value.CopyFrom(struct_pb2.ListValue())
for element in list_value:
encoded_list.list_value.values.add().CopyFrom(encode_fn(element))
return encoded_list
def can_decode(self, value):
return value.HasField("list_value")
def do_decode(self, value, decode_fn):
return [decode_fn(element) for element in value.list_value.values]
StructureCoder.register_codec(_ListCodec())
def _is_tuple(obj):
return not _is_named_tuple(obj) and isinstance(obj, tuple)
def _is_named_tuple(instance):
"""Returns True iff `instance` is a `namedtuple`.
Args:
instance: An instance of a Python object.
Returns:
True if `instance` is a `namedtuple`.
"""
if not isinstance(instance, tuple):
return False
return (hasattr(instance, "_fields") and
isinstance(instance._fields, collections.Sequence) and
all(isinstance(f, six.string_types) for f in instance._fields))
class _TupleCodec(object):
"""Codec for tuples."""
def can_encode(self, pyobj):
return _is_tuple(pyobj)
def do_encode(self, tuple_value, encode_fn):
encoded_tuple = struct_pb2.StructuredValue()
encoded_tuple.tuple_value.CopyFrom(struct_pb2.TupleValue())
for element in tuple_value:
encoded_tuple.tuple_value.values.add().CopyFrom(encode_fn(element))
return encoded_tuple
def can_decode(self, value):
return value.HasField("tuple_value")
def do_decode(self, value, decode_fn):
return tuple(decode_fn(element) for element in value.tuple_value.values)
StructureCoder.register_codec(_TupleCodec())
class _DictCodec(object):
"""Codec for dicts."""
def can_encode(self, pyobj):
return isinstance(pyobj, dict)
def do_encode(self, dict_value, encode_fn):
encoded_dict = struct_pb2.StructuredValue()
encoded_dict.dict_value.CopyFrom(struct_pb2.DictValue())
for key, value in dict_value.items():
encoded_dict.dict_value.fields[key].CopyFrom(encode_fn(value))
return encoded_dict
def can_decode(self, value):
return value.HasField("dict_value")
def do_decode(self, value, decode_fn):
return {key: decode_fn(val) for key, val in value.dict_value.fields.items()}
StructureCoder.register_codec(_DictCodec())
class _NamedTupleCodec(object):
"""Codec for namedtuples.
Encoding and decoding a namedtuple reconstructs a namedtuple with a different
actual Python type, but with same `typename` and `fields`.
"""
def can_encode(self, pyobj):
return _is_named_tuple(pyobj)
def do_encode(self, named_tuple_value, encode_fn):
encoded_named_tuple = struct_pb2.StructuredValue()
encoded_named_tuple.named_tuple_value.CopyFrom(struct_pb2.NamedTupleValue())
encoded_named_tuple.named_tuple_value.name = \
named_tuple_value.__class__.__name__
for key in named_tuple_value._fields:
pair = encoded_named_tuple.named_tuple_value.values.add()
pair.key = key
pair.value.CopyFrom(encode_fn(named_tuple_value._asdict()[key]))
return encoded_named_tuple
def can_decode(self, value):
return value.HasField("named_tuple_value")
def do_decode(self, value, decode_fn):
key_value_pairs = value.named_tuple_value.values
items = [(pair.key, decode_fn(pair.value)) for pair in key_value_pairs]
named_tuple_type = collections.namedtuple(value.named_tuple_value.name,
[item[0] for item in items])
return named_tuple_type(**dict(items))
StructureCoder.register_codec(_NamedTupleCodec())
class _Float64Codec(object):
"""Codec for floats."""
def can_encode(self, pyobj):
return isinstance(pyobj, float)
def do_encode(self, float64_value, encode_fn):
del encode_fn
value = struct_pb2.StructuredValue()
value.float64_value = float64_value
return value
def can_decode(self, value):
return value.HasField("float64_value")
def do_decode(self, value, decode_fn):
del decode_fn
return value.float64_value
StructureCoder.register_codec(_Float64Codec())
class _Int64Codec(object):
"""Codec for Python integers (limited to 64 bit values)."""
def can_encode(self, pyobj):
return not isinstance(pyobj, bool) and isinstance(pyobj, int)
def do_encode(self, int_value, encode_fn):
del encode_fn
value = struct_pb2.StructuredValue()
value.int64_value = int_value
return value
def can_decode(self, value):
return value.HasField("int64_value")
def do_decode(self, value, decode_fn):
del decode_fn
return int(value.int64_value)
StructureCoder.register_codec(_Int64Codec())
class _StringCodec(object):
"""Codec for strings.
See StructuredValue.string_value in proto/struct.proto for more detailed
explanation.
"""
def can_encode(self, pyobj):
return isinstance(pyobj, str)
def do_encode(self, string_value, encode_fn):
del encode_fn
value = struct_pb2.StructuredValue()
value.string_value = string_value
return value
def can_decode(self, value):
return value.HasField("string_value")
def do_decode(self, value, decode_fn):
del decode_fn
return compat.as_str(value.string_value)
StructureCoder.register_codec(_StringCodec())
class _NoneCodec(object):
"""Codec for None."""
def can_encode(self, pyobj):
return pyobj is None
def do_encode(self, none_value, encode_fn):
del encode_fn, none_value
value = struct_pb2.StructuredValue()
value.none_value.CopyFrom(struct_pb2.NoneValue())
return value
def can_decode(self, value):
return value.HasField("none_value")
def do_decode(self, value, decode_fn):
del decode_fn, value
return None
StructureCoder.register_codec(_NoneCodec())
class _BoolCodec(object):
"""Codec for booleans."""
def can_encode(self, pyobj):
return isinstance(pyobj, bool)
def do_encode(self, bool_value, encode_fn):
del encode_fn
value = struct_pb2.StructuredValue()
value.bool_value = bool_value
return value
def can_decode(self, value):
return value.HasField("bool_value")
def do_decode(self, value, decode_fn):
del decode_fn
return value.bool_value
StructureCoder.register_codec(_BoolCodec())
class _TensorShapeCodec(object):
"""Codec for `TensorShape`."""
def can_encode(self, pyobj):
return isinstance(pyobj, tensor_shape.TensorShape)
def do_encode(self, tensor_shape_value, encode_fn):
del encode_fn
encoded_tensor_shape = struct_pb2.StructuredValue()
encoded_tensor_shape.tensor_shape_value.CopyFrom(
tensor_shape_value.as_proto())
return encoded_tensor_shape
def can_decode(self, value):
return value.HasField("tensor_shape_value")
def do_decode(self, value, decode_fn):
del decode_fn
return tensor_shape.TensorShape(value.tensor_shape_value)
StructureCoder.register_codec(_TensorShapeCodec())
class _TensorTypeCodec(object):
"""Codec for `TensorType`."""
def can_encode(self, pyobj):
return isinstance(pyobj, dtypes.DType)
def do_encode(self, tensor_dtype_value, encode_fn):
del encode_fn
encoded_tensor_type = struct_pb2.StructuredValue()
encoded_tensor_type.tensor_dtype_value = tensor_dtype_value.as_datatype_enum
return encoded_tensor_type
def can_decode(self, value):
return value.HasField("tensor_dtype_value")
def do_decode(self, value, decode_fn):
del decode_fn
return dtypes.DType(value.tensor_dtype_value)
StructureCoder.register_codec(_TensorTypeCodec())
class _TensorSpecCodec(object):
"""Codec for `TensorSpec`."""
def can_encode(self, pyobj):
return isinstance(pyobj, tensor_spec.TensorSpec)
def do_encode(self, tensor_spec_value, encode_fn):
encoded_tensor_spec = struct_pb2.StructuredValue()
encoded_tensor_spec.tensor_spec_value.CopyFrom(
struct_pb2.TensorSpecProto(
shape=encode_fn(tensor_spec_value.shape).tensor_shape_value,
dtype=encode_fn(tensor_spec_value.dtype).tensor_dtype_value,
name=tensor_spec_value.name))
return encoded_tensor_spec
def can_decode(self, value):
return value.HasField("tensor_spec_value")
def do_decode(self, value, decode_fn):
return tensor_spec.TensorSpec(
shape=decode_fn(
struct_pb2.StructuredValue(
tensor_shape_value=value.tensor_spec_value.shape)),
dtype=decode_fn(
struct_pb2.StructuredValue(
tensor_dtype_value=value.tensor_spec_value.dtype)),
name=value.tensor_spec_value.name)
StructureCoder.register_codec(_TensorSpecCodec())
| 28.002288
| 80
| 0.731552
|
c74333bc0ad919005e9015e8f0f9fee46eb4af85
| 2,917
|
py
|
Python
|
systems/MNIST/ConvLin/quantize/all_ana.py
|
mdatres/quantlab
|
09fb24ede78f49768f829afe0fac2ac291b8fd4f
|
[
"Apache-2.0"
] | null | null | null |
systems/MNIST/ConvLin/quantize/all_ana.py
|
mdatres/quantlab
|
09fb24ede78f49768f829afe0fac2ac291b8fd4f
|
[
"Apache-2.0"
] | null | null | null |
systems/MNIST/ConvLin/quantize/all_ana.py
|
mdatres/quantlab
|
09fb24ede78f49768f829afe0fac2ac291b8fd4f
|
[
"Apache-2.0"
] | null | null | null |
#
# ana.py
#
# Author(s):
# Matteo Spallanzani <spmatteo@iis.ee.ethz.ch>
#
# Copyright (c) 2020-2021 ETH Zurich.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch.nn as nn
import quantlib.algorithms as qa
import quantlib.editing.lightweight as qlw
from typing import List
def all_ana_recipe(net: nn.Module,
quantizer_spec: dict,
noise_type: str,
strategy: str) -> nn.Module:
# define filters
# type-based filters
filter_conv2d = qlw.rules.filters.TypeFilter(nn.Conv2d)
filter_linear = qlw.rules.filters.TypeFilter(nn.Linear)
filter_tanh = qlw.rules.filters.TypeFilter(nn.Tanh)
# name-based filters
filter_pilot = qlw.rules.filters.NameFilter('pilot')
filter_features = qlw.rules.filters.NameFilter('features')
filter_classifier = qlw.rules.filters.NameFilter('classifier')
filter_last_node = qlw.rules.filters.NameFilter('classifier.6')
# define rules
# 2D convolutions
filter_conv2d_pilot_or_features = filter_conv2d & (filter_pilot | filter_features)
rho_conv2d = qlw.rules.ana.ReplaceConv2dANAConv2dRule(filter_=filter_conv2d_pilot_or_features, quantizer_spec=quantizer_spec, noise_type=noise_type, strategy=strategy)
# linear maps
filter_linear_classifier_no_last = filter_linear & filter_classifier & (-filter_last_node)
rho_linear = qlw.rules.ana.ReplaceLinearANALinearRule(filter_=filter_linear_classifier_no_last, quantizer_spec=quantizer_spec, noise_type=noise_type, strategy=strategy)
# ReLUs
filter_relu_pilot_or_features_or_classifier = filter_tanh & (filter_pilot | filter_features | filter_classifier)
rho_relu = qlw.rules.ana.ReplaceTanhANAActivationRule(filter_relu_pilot_or_features_or_classifier, quantizer_spec=quantizer_spec, noise_type=noise_type, strategy=strategy)
# edit
lwgraph = qlw.LightweightGraph(net)
lweditor = qlw.LightweightEditor(lwgraph)
lweditor.startup()
lweditor.set_lwr(rho_conv2d)
lweditor.apply()
lweditor.set_lwr(rho_linear)
lweditor.apply()
lweditor.set_lwr(rho_relu)
lweditor.apply()
print(lweditor.graph)
lweditor.shutdown()
return lwgraph.net
def all_ana_controller(net: nn.Module,
ctrl_spec: list) -> List[qa.Controller]:
anactrl = qa.ana.ANAController(net, ctrl_spec)
return [anactrl]
| 37.397436
| 175
| 0.73363
|
d162d2d8285f53d09af4f3285a97228c6115f801
| 15,420
|
py
|
Python
|
saleor/payment/gateways/stripe/tests/test_stripe_api.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 15,337
|
2015-01-12T02:11:52.000Z
|
2021-10-05T19:19:29.000Z
|
saleor/payment/gateways/stripe/tests/test_stripe_api.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 7,486
|
2015-02-11T10:52:13.000Z
|
2021-10-06T09:37:15.000Z
|
saleor/payment/gateways/stripe/tests/test_stripe_api.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 5,864
|
2015-01-16T14:52:54.000Z
|
2021-10-05T23:01:15.000Z
|
from decimal import Decimal
from unittest.mock import patch
from stripe.error import AuthenticationError, StripeError
from stripe.stripe_object import StripeObject
from saleor.payment.interface import PaymentMethodInfo
from saleor.payment.utils import price_to_minor_unit
from ..consts import (
AUTOMATIC_CAPTURE_METHOD,
MANUAL_CAPTURE_METHOD,
METADATA_IDENTIFIER,
STRIPE_API_VERSION,
WEBHOOK_EVENTS,
)
from ..stripe_api import (
cancel_payment_intent,
capture_payment_intent,
create_payment_intent,
delete_webhook,
get_or_create_customer,
get_payment_method_details,
is_secret_api_key_valid,
list_customer_payment_methods,
refund_payment_intent,
retrieve_payment_intent,
subscribe_webhook,
update_payment_method,
)
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.WebhookEndpoint",
)
def test_is_secret_api_key_valid_incorrect_key(mocked_webhook):
api_key = "incorrect"
mocked_webhook.list.side_effect = AuthenticationError()
assert is_secret_api_key_valid(api_key) is False
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.WebhookEndpoint",
)
def test_is_secret_api_key_valid_correct_key(mocked_webhook):
api_key = "correct_key"
assert is_secret_api_key_valid(api_key) is True
mocked_webhook.list.assert_called_with(api_key, stripe_version=STRIPE_API_VERSION)
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.WebhookEndpoint",
)
def test_subscribe_webhook_returns_webhook_object(mocked_webhook, channel_USD):
api_key = "api_key"
expected_url = (
"http://mirumee.com/plugins/channel/main/saleor.payments.stripe/webhooks/"
)
subscribe_webhook(api_key, channel_slug=channel_USD.slug)
mocked_webhook.create.assert_called_with(
api_key=api_key,
url=expected_url,
enabled_events=WEBHOOK_EVENTS,
metadata={METADATA_IDENTIFIER: "mirumee.com"},
stripe_version=STRIPE_API_VERSION,
)
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.WebhookEndpoint",
)
def test_delete_webhook(mocked_webhook):
api_key = "api_key"
delete_webhook(api_key, "webhook_id")
mocked_webhook.delete.assert_called_with(
"webhook_id", api_key=api_key, stripe_version=STRIPE_API_VERSION
)
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.PaymentIntent",
)
def test_create_payment_intent_returns_intent_object(mocked_payment_intent):
api_key = "api_key"
mocked_payment_intent.create.return_value = StripeObject()
intent, error = create_payment_intent(
api_key, Decimal(10), "USD", auto_capture=True
)
mocked_payment_intent.create.assert_called_with(
api_key=api_key,
amount="1000",
currency="USD",
capture_method=AUTOMATIC_CAPTURE_METHOD,
stripe_version=STRIPE_API_VERSION,
)
assert isinstance(intent, StripeObject)
assert error is None
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.PaymentIntent",
)
def test_create_payment_intent_with_customer(mocked_payment_intent):
customer = StripeObject(id="c_ABC")
api_key = "api_key"
mocked_payment_intent.create.return_value = StripeObject()
intent, error = create_payment_intent(
api_key, Decimal(10), "USD", auto_capture=True, customer=customer
)
mocked_payment_intent.create.assert_called_with(
api_key=api_key,
amount="1000",
currency="USD",
capture_method=AUTOMATIC_CAPTURE_METHOD,
customer=customer,
stripe_version=STRIPE_API_VERSION,
)
assert isinstance(intent, StripeObject)
assert error is None
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.PaymentIntent",
)
def test_create_payment_intent_manual_auto_capture(mocked_payment_intent):
api_key = "api_key"
mocked_payment_intent.create.return_value = StripeObject()
_intent, _error = create_payment_intent(
api_key, Decimal(10), "USD", auto_capture=False
)
mocked_payment_intent.create.assert_called_with(
api_key=api_key,
amount="1000",
currency="USD",
capture_method=MANUAL_CAPTURE_METHOD,
stripe_version=STRIPE_API_VERSION,
)
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.PaymentIntent",
)
def test_create_payment_intent_returns_error(mocked_payment_intent):
api_key = "api_key"
mocked_payment_intent.create.side_effect = StripeError(
json_body={"error": "stripe-error"}
)
intent, error = create_payment_intent(api_key, Decimal(10), "USD")
mocked_payment_intent.create.assert_called_with(
api_key=api_key,
amount="1000",
currency="USD",
capture_method=AUTOMATIC_CAPTURE_METHOD,
stripe_version=STRIPE_API_VERSION,
)
assert intent is None
assert error
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.PaymentMethod",
)
def test_update_payment_method(mocked_payment_method):
# given
api_key = "api_key"
payment_method_id = "1234"
metadata = {"key": "value"}
# when
update_payment_method(api_key, payment_method_id, metadata)
# then
mocked_payment_method.modify.assert_called_once_with(
payment_method_id,
api_key=api_key,
metadata=metadata,
)
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.PaymentIntent",
)
def test_retrieve_payment_intent(mocked_payment_intent):
api_key = "api_key"
payment_intent_id = "id1234"
mocked_payment_intent.retrieve.return_value = StripeObject()
intent, _ = retrieve_payment_intent(api_key, payment_intent_id)
mocked_payment_intent.retrieve.assert_called_with(
payment_intent_id,
api_key=api_key,
stripe_version=STRIPE_API_VERSION,
)
assert isinstance(intent, StripeObject)
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.PaymentIntent",
)
def test_retrieve_payment_intent_stripe_returns_error(mocked_payment_intent):
api_key = "api_key"
payment_intent_id = "id1234"
expected_error = StripeError(message="stripe-error")
mocked_payment_intent.retrieve.side_effect = expected_error
_, error = retrieve_payment_intent(api_key, payment_intent_id)
mocked_payment_intent.retrieve.assert_called_with(
payment_intent_id,
api_key=api_key,
stripe_version=STRIPE_API_VERSION,
)
assert error == expected_error
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.PaymentIntent",
)
def test_capture_payment_intent(mocked_payment_intent):
api_key = "api_key"
payment_intent_id = "id1234"
amount = price_to_minor_unit(Decimal("10.0"), "USD")
mocked_payment_intent.capture.return_value = StripeObject()
intent, _ = capture_payment_intent(
api_key=api_key, payment_intent_id=payment_intent_id, amount_to_capture=amount
)
mocked_payment_intent.capture.assert_called_with(
payment_intent_id,
amount_to_capture=amount,
api_key=api_key,
stripe_version=STRIPE_API_VERSION,
)
assert isinstance(intent, StripeObject)
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.PaymentIntent",
)
def test_capture_payment_intent_stripe_returns_error(mocked_payment_intent):
api_key = "api_key"
payment_intent_id = "id1234"
amount = price_to_minor_unit(Decimal("10.0"), "USD")
expected_error = StripeError(message="stripe-error")
mocked_payment_intent.capture.side_effect = expected_error
_, error = capture_payment_intent(
api_key=api_key, payment_intent_id=payment_intent_id, amount_to_capture=amount
)
mocked_payment_intent.capture.assert_called_with(
payment_intent_id,
amount_to_capture=amount,
api_key=api_key,
stripe_version=STRIPE_API_VERSION,
)
assert error == expected_error
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.Refund",
)
def test_refund_payment_intent(mocked_refund):
api_key = "api_key"
payment_intent_id = "id1234"
amount = price_to_minor_unit(Decimal("10.0"), "USD")
mocked_refund.create.return_value = StripeObject()
intent, _ = refund_payment_intent(
api_key=api_key, payment_intent_id=payment_intent_id, amount_to_refund=amount
)
mocked_refund.create.assert_called_with(
payment_intent=payment_intent_id,
amount=amount,
api_key=api_key,
stripe_version=STRIPE_API_VERSION,
)
assert isinstance(intent, StripeObject)
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.Refund",
)
def test_refund_payment_intent_returns_error(mocked_refund):
api_key = "api_key"
payment_intent_id = "id1234"
amount = price_to_minor_unit(Decimal("10.0"), "USD")
expected_error = StripeError(message="stripe-error")
mocked_refund.create.side_effect = expected_error
_, error = refund_payment_intent(
api_key=api_key, payment_intent_id=payment_intent_id, amount_to_refund=amount
)
mocked_refund.create.assert_called_with(
payment_intent=payment_intent_id,
amount=amount,
api_key=api_key,
stripe_version=STRIPE_API_VERSION,
)
assert error == expected_error
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.PaymentIntent",
)
def test_cancel_payment_intent(mocked_payment_intent):
api_key = "api_key"
payment_intent_id = "id1234"
mocked_payment_intent.cancel.return_value = StripeObject()
intent, _ = cancel_payment_intent(
api_key=api_key, payment_intent_id=payment_intent_id
)
mocked_payment_intent.cancel.assert_called_with(
payment_intent_id, api_key=api_key, stripe_version=STRIPE_API_VERSION
)
assert isinstance(intent, StripeObject)
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.PaymentIntent",
)
def test_cancel_payment_intent_stripe_returns_error(mocked_payment_intent):
api_key = "api_key"
payment_intent_id = "id1234"
expected_error = StripeError(message="stripe-error")
mocked_payment_intent.cancel.side_effect = expected_error
_, error = cancel_payment_intent(
api_key=api_key, payment_intent_id=payment_intent_id
)
mocked_payment_intent.cancel.assert_called_with(
payment_intent_id, api_key=api_key, stripe_version=STRIPE_API_VERSION
)
assert error == expected_error
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.Customer",
)
def test_get_or_create_customer_retrieve(mocked_customer):
mocked_customer.retrieve.return_value = StripeObject()
api_key = "123"
customer_email = "admin@example.com"
customer_id = "c_12345"
customer = get_or_create_customer(
api_key=api_key,
customer_email=customer_email,
customer_id=customer_id,
)
assert isinstance(customer, StripeObject)
mocked_customer.retrieve.assert_called_with(
customer_id, api_key=api_key, stripe_version=STRIPE_API_VERSION
)
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.Customer",
)
def test_get_or_create_customer_failed_retrieve(mocked_customer):
expected_error = StripeError(message="stripe-error")
mocked_customer.retrieve.side_effect = expected_error
api_key = "123"
customer_email = "admin@example.com"
customer_id = "c_12345"
customer = get_or_create_customer(
api_key=api_key,
customer_email=customer_email,
customer_id=customer_id,
)
assert customer is None
mocked_customer.retrieve.assert_called_with(
customer_id, api_key=api_key, stripe_version=STRIPE_API_VERSION
)
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.Customer",
)
def test_get_or_create_customer_create(mocked_customer):
mocked_customer.create.return_value = StripeObject()
api_key = "123"
customer_email = "admin@example.com"
customer = get_or_create_customer(
api_key=api_key,
customer_email=customer_email,
customer_id=None,
)
assert isinstance(customer, StripeObject)
mocked_customer.create.assert_called_with(
email=customer_email, api_key=api_key, stripe_version=STRIPE_API_VERSION
)
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.Customer",
)
def test_get_or_create_customer_failed_create(mocked_customer):
expected_error = StripeError(message="stripe-error")
mocked_customer.create.side_effect = expected_error
api_key = "123"
customer_email = "admin@example.com"
customer = get_or_create_customer(
api_key=api_key,
customer_email=customer_email,
customer_id=None,
)
assert customer is None
mocked_customer.create.assert_called_with(
email=customer_email, api_key=api_key, stripe_version=STRIPE_API_VERSION
)
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.PaymentMethod",
)
def test_list_customer_payment_methods(mocked_payment_method):
api_key = "123"
customer_id = "c_customer_id"
mocked_payment_method.list.return_value = StripeObject()
payment_method, error = list_customer_payment_methods(
api_key=api_key, customer_id=customer_id
)
assert error is None
assert isinstance(payment_method, StripeObject)
mocked_payment_method.list.assert_called_with(
api_key=api_key,
customer=customer_id,
type="card",
stripe_version=STRIPE_API_VERSION,
)
@patch(
"saleor.payment.gateways.stripe.stripe_api.stripe.PaymentMethod",
)
def test_list_customer_payment_methods_failed_to_fetch(mocked_payment_method):
api_key = "123"
customer_id = "c_customer_id"
expected_error = StripeError(message="stripe-error")
mocked_payment_method.list.side_effect = expected_error
payment_method, error = list_customer_payment_methods(
api_key=api_key, customer_id=customer_id
)
assert payment_method is None
assert isinstance(error, StripeError)
mocked_payment_method.list.assert_called_with(
api_key=api_key,
customer=customer_id,
type="card",
stripe_version=STRIPE_API_VERSION,
)
def test_get_payment_method_details():
payment_intent = StripeObject()
payment_intent.charges = {
"data": [
{
"payment_method_details": {
"type": "card",
"card": {
"last4": "1234",
"exp_year": "2222",
"exp_month": "12",
"brand": "visa",
},
}
}
]
}
payment_method_info = get_payment_method_details(payment_intent)
assert payment_method_info == PaymentMethodInfo(
last_4="1234",
exp_year=2222,
exp_month=12,
brand="visa",
type="card",
)
def test_get_payment_method_details_missing_charges():
payment_intent = StripeObject()
payment_intent.charges = None
payment_method_info = get_payment_method_details(payment_intent)
assert payment_method_info is None
def test_get_payment_method_details_missing_charges_data():
payment_intent = StripeObject()
payment_intent.charges = {"data": None}
payment_method_info = get_payment_method_details(payment_intent)
assert payment_method_info is None
| 28.190128
| 86
| 0.728405
|
3471896b9001b7beb3772b744f6d9e8e7f1c1404
| 38,081
|
py
|
Python
|
vpcnn/main.py
|
OSU-slatelab/vp-cnn
|
a75748b7bfbfdef8b69f7bfbd33097deccfbc600
|
[
"Apache-2.0"
] | null | null | null |
vpcnn/main.py
|
OSU-slatelab/vp-cnn
|
a75748b7bfbfdef8b69f7bfbd33097deccfbc600
|
[
"Apache-2.0"
] | 1
|
2021-09-11T23:44:00.000Z
|
2021-09-12T12:21:41.000Z
|
vpcnn/main.py
|
OSU-slatelab/vp-cnn
|
a75748b7bfbfdef8b69f7bfbd33097deccfbc600
|
[
"Apache-2.0"
] | 3
|
2019-05-15T01:50:23.000Z
|
2019-09-23T20:44:02.000Z
|
#! /usr/bin/env python
import os
import argparse
import datetime
import torch
import torchtext.data as data
import torchtext.datasets as datasets
import model
import train
import mydatasets
import pdb
import vpdataset
import numpy as np
from chatscript_file_generator import *
parser = argparse.ArgumentParser(description='CNN text classificer')
# learning
parser.add_argument('-lr', type=float, default=1.0, help='initial learning rate [default: 1.0]') # 1e-3
parser.add_argument('-word-lr', type=float, default=1.0, help='initial learning rate [default: 1.0]') # 1e-3
parser.add_argument('-char-lr', type=float, default=1.0, help='initial learning rate [default: 1.0]') # 1e-3
parser.add_argument('-l2', type=float, default=0.0, help='l2 regularization strength [default: 0.0]') # 1e-6
parser.add_argument('-word-l2', type=float, default=0.0, help='l2 regularization strength [default: 0.0]') # 1e-6
parser.add_argument('-char-l2', type=float, default=0.0, help='l2 regularization strength [default: 0.0]') # 1e-6
parser.add_argument('-epochs', type=int, default=25, help='number of epochs for train [default: 25]')
parser.add_argument('-word-epochs', type=int, default=25, help='number of epochs for train [default: 25]')
parser.add_argument('-char-epochs', type=int, default=25, help='number of epochs for train [default: 25]')
parser.add_argument('-batch-size', type=int, default=50, help='batch size for training [default: 50]')
parser.add_argument('-word-batch-size', type=int, default=50, help='batch size for training [default: 50]')
parser.add_argument('-char-batch-size', type=int, default=50, help='batch size for training [default: 50]')
parser.add_argument('-log-interval', type=int, default=1,
help='how many steps to wait before logging training status [default: 1]')
parser.add_argument('-log-file', type=str, default=datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + 'result.txt',
help='the name of the file to store results')
parser.add_argument('-verbose', action='store_true', default=False, help='logging verbose info of training process')
# parser.add_argument('-verbose-interval', type=int, default=5000, help='steps between two verbose logging')
parser.add_argument('-test-interval', type=int, default=500,
help='how many steps to wait before testing [default: 100]')
parser.add_argument('-eval-on-test', action='store_true', default=False, help='run evaluation on test data?')
parser.add_argument('-save-interval', type=int, default=5000, help='how many steps to wait before saving [default:500]')
parser.add_argument('-save-dir', type=str, default='snapshot', help='where to save the snapshot')
# data
parser.add_argument('-data-dir', type=str, default='./data/', help='directory containing data files')
parser.add_argument('-train-idx-file', type=str, default='wilkins.shuffled.30.indices', help='file containing dial,turn idxs corresponding to train-file entries, in order, in `data-dir`')
parser.add_argument('-test-idx-file', type=str, default='wilkins.shuffled.30.indices', help='file containing dial,turn idxs corresponding to test-file entries, if using fixed test set (xfolds=0), in order, in `data-dir`')
parser.add_argument('-full-test-dialogues', type=str, default='vp16-CS_remapped.fix.full.csv', help='file containing dial,turn idxs corresponding to test-file entries, if using fixed test set (xfolds=0), in order, in `data-dir`')
parser.add_argument('-two-ch', action='store_true', help='use two-channel boundary/phone model, when supplying appropriate data')
parser.add_argument('-char-train-file', type=str, default='wilkins.phone.shuffled.30.txt', help='file containing char data for training, in `data-dir`')
parser.add_argument('-word-train-file', type=str, default='wilkins.word.shuffled.30.txt', help='file containing word data for training, in `data-dir`')
parser.add_argument('-char-test-file', type=str, default=None, help='file containing char data for testing, in `data-dir`')
parser.add_argument('-word-test-file', type=str, default=None, help='file containing word data for testing, in `data-dir`')
parser.add_argument('-char-alt-file', type=str, default=None, help='file containing char example alternatives to be randomly sampled, in `data-dir`')
parser.add_argument('-word-alt-file', type=str, default=None, help='file containing word example alternatives to be randomly sampled, in `data-dir`')
parser.add_argument('-alt-prob', type=float, default=0.0, help='probability of choosing an alternative example, if alternatives are provided')
parser.add_argument('-shuffle', action='store_true', default=True, help='shuffle the data every epoch')
parser.add_argument('-train-file', type=str, default='wilkins_corrected.shuffled.51.txt', help='file containing word data for training, in `data-dir`')
parser.add_argument('-test-file', type=str, default=None, help='file containing word data for testing, in `data-dir`')
# model
parser.add_argument('-dropout', type=float, default=0.5, help='the probability for dropout [default: 0.5]')
parser.add_argument('-char-dropout', type=float, default=0.5, help='the probability for dropout [default: 0.5]')
parser.add_argument('-word-dropout', type=float, default=0.5, help='the probability for dropout [default: 0.5]')
parser.add_argument('-max-norm', type=float, default=3.0, help='l2 constraint of parameters [default: 3.0]') # 0.0
parser.add_argument('-word-max-norm', type=float, default=3.0, help='l2 constraint of parameters [default: 3.0]') # 0.0
parser.add_argument('-char-max-norm', type=float, default=3.0, help='l2 constraint of parameters [default: 3.0]') # 0.0
parser.add_argument('-char-embed-dim', type=int, default=16, help='number of char embedding dimension [default: 128]')
parser.add_argument('-word-embed-dim', type=int, default=300, help='number of word embedding dimension [default: 300]')
parser.add_argument('-kernel-num', type=int, default=100, help='number of each kind of kernel')
parser.add_argument('-word-kernel-num', type=int, default=300, help='number of each kind of kernel')
parser.add_argument('-char-kernel-num', type=int, default=400, help='number of each kind of kernel')
# parser.add_argument('-kernel-sizes', type=str, default='3,4,5', help='comma-separated kernel size to use for convolution')
parser.add_argument('-char-kernel-sizes', type=str, default='2,3,4,5,6', help='comma-separated kernel size to use for char convolution')
parser.add_argument('-word-kernel-sizes', type=str, default='3,4,5', help='comma-separated kernel size to use for word convolution')
parser.add_argument('-static', action='store_true', default=False, help='fix the embedding')
# device
parser.add_argument('-device', type=int, default=0, help='device to use for iterate data, -1 mean cpu [default: -1]')
parser.add_argument('-yes-cuda', action='store_true', default=True, help='disable the gpu')
# option
parser.add_argument('-snapshot', type=str, default=None, help='filename of model snapshot [default: None]')
parser.add_argument('-predict', type=str, default=None, help='predict the sentence given')
parser.add_argument('-test', action='store_true', default=False, help='train or test')
parser.add_argument('-xfolds', type=int, default=10, help='number of folds for cross-validation; if zero, do not split test set from training data')
parser.add_argument('-layer-num', type=int, default=2, help='the number of layers in the final MLP')
parser.add_argument('-word-vector', type=str, default='w2v',
help="use of vectors [default: w2v. options: 'glove' or 'w2v']")
parser.add_argument('-emb-path', type=str, default=os.getcwd(), help="the path to the w2v file")
parser.add_argument('-min-freq', type=int, default=1, help='minimal frequency to be added to vocab')
parser.add_argument('-optimizer', type=str, default='adadelta', help="optimizer for all the models [default: SGD. options: 'sgd' or 'adam' or 'adadelta]")
parser.add_argument('-word-optimizer', type=str, default='adadelta', help="optimizer for all the models [default: SGD. options: 'sgd' or 'adam' or 'adadelta]")
parser.add_argument('-char-optimizer', type=str, default='adadelta', help="optimizer for all the models [default: SGD. options: 'sgd' or 'adam' or 'adadelta]")
parser.add_argument('-fine-tune', action='store_true', default=False,
help='whether to fine tune the final ensembled model')
parser.add_argument('-ortho-init', action='store_true', default=False,
help='use orthogonalization to improve weight matrix random initialization')
parser.add_argument('-ensemble', type=str, default='poe',
help='ensemble methods [default: poe. options: poe, avg, vot]')
parser.add_argument('-num-experts', type=int, default=5, help='number of experts if poe is enabled [default: 5]')
parser.add_argument('-prediction-file-handle', type=str, default='predictions.txt', help='the file to output the test predictions')
parser.add_argument('-no-always-norm', action='store_true', default=False, help='always max norm the weights')
parser.add_argument('-no-char', action='store_false', help='do NOT train character-based CNN')
parser.add_argument('-no-word', action='store_false', help='do NOT train word-based CNN')
args = parser.parse_args()
prediction_file_handle = open(args.prediction_file_handle, 'w')
print('dial_id,turn_id,predicted,correct,prob,entropy,confidence,ave_prob,ave_logporb,chatscript_prob,chatscript_rank', file=prediction_file_handle)
if args.word_vector == 'glove':
args.word_vector = 'glove.6B'
elif args.word_vector == 'w2v':
if args.word_embed_dim != 300:
raise Exception("w2v has no other kind of vectors than 300")
else:
args.word_vector = None
# TODO these separate functions should probably be handled separately;
# i.e. how many folds, and whether or not to split test out of the training set;
# Would require changes to vp(), etc. aes-20180827
no_test_split = False
if args.xfolds == 0:
no_test_split = True
args.xfolds = 1
# load SST dataset
def sst(text_field, label_field, **kargs):
train_data, dev_data, test_data = datasets.SST.splits(text_field, label_field, fine_grained=True)
text_field.build_vocab(train_data, dev_data, test_data)
label_field.build_vocab(train_data, dev_data, test_data)
train_iter, dev_iter, test_iter = data.BucketIterator.splits(
(train_data, dev_data, test_data),
batch_sizes=(args.batch_size,
len(dev_data),
len(test_data)),
**kargs)
return train_iter, dev_iter, test_iter
# load MR dataset
def mr(text_field, label_field, **kargs):
train_data, dev_data = mydatasets.MR.splits(text_field, label_field)
text_field.build_vocab(train_data, dev_data)
label_field.build_vocab(train_data, dev_data)
train_iter, dev_iter = data.Iterator.splits(
(train_data, dev_data),
batch_sizes=(args.batch_size, len(dev_data)),
**kargs)
return train_iter, dev_iter
if no_test_split:
test_batch_size = args.batch_size
else:
test_batch_size = args.batch_size
# load VP dataset
def vp(text_field, label_field, foldid, test_batch_size, bound_field=None,
path=None, filename=None,
test_filename=None, label_filename=None, train_idxs=None,
alt_file=None, alt_p=0.0, num_experts=0, **kargs):
# print('num_experts', num_experts)
train_data, dev_data, test_data = vpdataset.VP.splits(text_field,
label_field,
bound_field=bound_field,
root=path,
filename=filename,
test_filename=test_filename,
label_filename=label_filename,
train_idxs=train_idxs,
alt_file=alt_file,
alt_p=alt_p,
foldid=foldid,
num_experts=num_experts)
alt_list = None
alt_dict = None
if num_experts > 0:
alt_dict = train_data[0].alt_dict
train_vocab = train_data[0]
dev_vocab = dev_data[0]
else:
alt_dict = train_data.alt_dict
train_vocab = train_data
dev_vocab = dev_data
if alt_dict is not None:
alt_list = [alt for key in alt_dict for alt in alt_dict[key]]
#print(alt_list[:10])
if bound_field is not None:
alt_list = [vpdataset.split_bounds(alt)[0] for alt in alt_list]
if alt_list is None:
text_field.build_vocab(train_vocab, dev_vocab, test_data, wv_type=kargs["wv_type"], wv_dim=kargs["wv_dim"],
wv_dir=kargs["wv_dir"], min_freq=kargs['min_freq'])
else:
text_field.build_vocab(train_vocab, dev_vocab, test_data, alt_list, wv_type=kargs["wv_type"], wv_dim=kargs["wv_dim"],
wv_dir=kargs["wv_dir"], min_freq=kargs['min_freq'])
if bound_field is not None:
bound_field.build_vocab(train_vocab, dev_vocab, test_data)
# label_field.build_vocab(train_data, dev_data, test_data)
kargs.pop('wv_type')
kargs.pop('wv_dim')
kargs.pop('wv_dir')
kargs.pop("min_freq")
# print(type(train_data), type(dev_data))
if num_experts > 0:
train_iter = []
dev_iter = []
for i in range(num_experts):
this_train_iter, this_dev_iter, test_iter = data.Iterator.splits((train_data[i], dev_data[i], test_data),
batch_sizes=(args.batch_size,
args.batch_size, #len(dev_data[i]),
test_batch_size), **kargs)
train_iter.append(this_train_iter)
dev_iter.append(this_dev_iter)
else:
train_iter, dev_iter, test_iter = data.Iterator.splits(
(train_data, dev_data, test_data),
batch_sizes=(args.batch_size,
args.batch_size, #len(dev_data),
test_batch_size),
**kargs)
return train_iter, dev_iter, test_iter
#def vp_enh(text_field, label_field, **kargs):
# print('num_experts', num_experts)
# enh_data = vpdataset.VP(text_field, label_field, path='data', filename='vp17-all.shuffled.69.lbl_in.txt')
# this is just being treated as a test set for now, so it doesn't matter how many
# experts there are, and we want to use the existing vocabularies from training for evaluation
# enh_iter = data.Iterator(enh_data, args.batch_size, train=False)
# return enh_iter
# TODO: parameterize this:
def char_tokenizer(mstring):
# return mstring.split()
return list(mstring)
def bound_tokenizer(mstring):
return mstring.split()
def check_vocab(field):
itos = field.vocab.itos
other_vocab = set()
filename = '../sent-conv-torch/custom_word_mapping.txt'
f = open(filename)
for line in f:
line = line.strip().split(" ")
other_vocab.add(line[0])
for word in itos:
if word not in other_vocab:
print(word)
print('------')
for word in other_vocab:
if word not in itos:
print(word)
print("Beginning {0}-fold cross-validation...".format(args.xfolds))
print("Logging the results in {}".format(args.log_file))
log_file_handle = open(args.log_file, 'w')
char_dev_fold_accuracies = []
word_dev_fold_accuracies = []
ensemble_dev_fold_accuracies = []
char_test_fold_accuracies = []
word_test_fold_accuracies = []
ensemble_test_fold_accuracies = []
orig_save_dir = args.save_dir
update_args = True
data_dir = args.data_dir
labels, inv_labels = read_in_labels('data/labels.txt')
word_file = args.word_train_file
phn_file = args.char_train_file
word_test_file = args.word_test_file
phn_test_file = args.char_test_file
# these get used for indexing alternatives if using sampling
train_dialogues = read_in_dial_turn_idxs(os.path.join(args.data_dir, args.train_idx_file))
# these get used for printing test features to pass to chooser
if no_test_split:
test_dialogues = read_in_dial_turn_idxs(os.path.join(args.data_dir, args.test_idx_file))
else:
test_dialogues = train_dialogues
len_all_test_data = len(test_dialogues)
# to index examples for printing features to pass to chooser for test predictions:
fold_indices = calc_fold_indices(args.xfolds, len_all_test_data)
full_dials = read_in_dialogues(os.path.join(args.data_dir, args.full_test_dialogues))
#enh_dial_idxs = read_in_dial_turn_idxs('data/vp17-all.shuffled.69.indices')
#full_enh_dials = read_in_dialogues('data/vp17-all.full.csv')
#chats = read_in_chat('data/stats.16mar2017.csv', dialogues)
#TODO FIXME
#this should not be hardcoded (missing plain phn_labels.txt option in current state)
#phn_labels = 'phn+bd_labels.txt' if args.two_ch else 'phn_labels.txt'
# and now this is another dumb temporary hack for a char run
phn_labels = 'labels.txt'
word_labels = 'labels.txt'
use_char = args.no_char
use_word = args.no_word
for xfold in range(args.xfolds):
print("Fold {0}".format(xfold))
# load data
print("\nLoading data...")
tokenizer = data.Pipeline(vpdataset.clean_str)
text_field = data.Field(lower=True, tokenize=char_tokenizer)
word_field = data.Field(lower=True, tokenize=tokenizer)
label_field = data.Field(sequential=False, use_vocab=False, preprocessing=int)
if args.two_ch:
bound_field = data.Field(lower=True, tokenize=bound_tokenizer)
else:
bound_field = None
if use_char:
print(phn_file)
train_iter, dev_iter, test_iter = vp(text_field,
label_field,
bound_field=bound_field,
path=data_dir,
filename=phn_file,
test_filename=phn_test_file,
test_batch_size=test_batch_size,
label_filename=phn_labels,
train_idxs=train_dialogues,
alt_file=args.char_alt_file,
alt_p=args.alt_prob,
foldid=None if no_test_split else xfold,
num_experts=args.num_experts,
device=args.device,
repeat=False,
sort=False,
wv_type=None,
wv_dim=None,
wv_dir=None,
min_freq=1)
if use_word:
print(word_file)
train_iter_word, dev_iter_word, test_iter_word = vp(word_field,
label_field,
path=data_dir,
filename=word_file,
test_filename=word_test_file,
test_batch_size=test_batch_size,
label_filename=word_labels,
train_idxs=train_dialogues,
alt_file=args.word_alt_file,
alt_p=args.alt_prob,
foldid=None if no_test_split else xfold,
num_experts=args.num_experts,
device=args.device,
repeat=False,
sort=False,
wv_type=args.word_vector,
wv_dim=args.word_embed_dim,
wv_dir=args.emb_path,
min_freq=args.min_freq)
# check_vocab(word_field)
# print(label_field.vocab.itos)
#TODO make this dependent on size of labels.txt
args.class_num = 361
args.cuda = args.yes_cuda and torch.cuda.is_available() # ; del args.no_cuda
if update_args == True:
if isinstance(args.char_kernel_sizes,str):
args.char_kernel_sizes = [int(k) for k in args.char_kernel_sizes.split(',')]
args.save_dir = os.path.join(args.save_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'), 'CHAR')
else:
args.save_dir = os.path.join(orig_save_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'), 'CHAR')
print("\nParameters:", file=log_file_handle)
for attr, value in sorted(args.__dict__.items()):
print("\t{}={}".format(attr.upper(), value), file=log_file_handle)
# char CNN training and dev
if use_char:
args.embed_num = len(text_field.vocab)
args.lr = args.char_lr
args.l2 = args.char_l2
args.epochs = args.char_epochs
args.batch_size = args.char_batch_size
args.dropout = args.char_dropout
args.max_norm = args.char_max_norm
args.kernel_num = args.char_kernel_num
args.optimizer = args.char_optimizer
if args.two_ch:
V_bd = len(bound_field.vocab)
else:
V_bd = 1
print("\nParameters:")
for attr, value in sorted(args.__dict__.items()):
print(" {}={}".format(attr.upper(), value))
if args.snapshot is None and args.num_experts == 0:
char_cnn = model.CNN_Text(class_num=args.class_num,
kernel_num=args.char_kernel_num,
kernel_sizes=args.char_kernel_sizes,
embed_num=len(text_field.vocab),
embed2_num=V_bd,
embed_dim=args.char_embed_dim,
dropout=args.char_dropout,
conv_init='uniform',
fc_init='normal',
static=False,
two_ch=args.two_ch,
vectors=None)
elif args.snapshot is None and args.num_experts > 0:
char_cnn = [model.CNN_Text(class_num=args.class_num,
kernel_num=args.char_kernel_num,
kernel_sizes=args.char_kernel_sizes,
embed_num=len(text_field.vocab),
embed2_num=V_bd,
embed_dim=args.char_embed_dim,
dropout=args.char_dropout,
conv_init='uniform',
fc_init='normal',
static=False,
two_ch=args.two_ch,
vectors=None)
for i in range(args.num_experts)]
else:
print('\nLoading model from [%s]...' % args.snapshot)
try:
char_cnn = torch.load(args.snapshot)
except:
print("Sorry, This snapshot doesn't exist.");
exit()
if args.num_experts > 0:
acc, char_cnn = train.ensemble_train(train_iter, dev_iter, char_cnn, args, two_ch=args.two_ch,
log_file_handle=log_file_handle, always_norm=False)
else:
acc, char_cnn = train.train(train_iter, dev_iter, char_cnn, args, two_ch=args.two_ch, log_file_handle=log_file_handle)
char_dev_fold_accuracies.append(acc)
print("Completed fold {0}. Accuracy on Dev: {1} for CHAR".format(xfold, acc), file=log_file_handle)
print("Completed fold {0}. Mean accuracy on Dev: {1} for CHAR".format(xfold, np.mean(acc)), file=log_file_handle)
if args.eval_on_test:
if args.num_experts > 0:
result = train.ensemble_eval(test_iter, char_cnn, args, two_ch=args.two_ch, log_file_handle=log_file_handle)
else:
result = train.eval(test_iter, char_cnn, args, two_ch=args.two_ch, log_file_handle=log_file_handle)
char_test_fold_accuracies.append(result)
print("Completed fold {0}. Accuracy on Test: {1} for CHAR".format(xfold, result))
print("Completed fold {0}. Accuracy on Test: {1} for CHAR".format(xfold, result), file=log_file_handle)
log_file_handle.flush()
#continue
# Word CNN training and dev
if use_word:
args.embed_num = len(word_field.vocab)
args.lr = args.word_lr
args.l2 = args.word_l2
args.epochs = args.word_epochs
args.batch_size = args.word_batch_size
args.dropout = args.word_dropout
args.max_norm = args.word_max_norm
args.kernel_num = args.word_kernel_num
args.optimizer = args.word_optimizer
print("\nParameters:")
for attr, value in sorted(args.__dict__.items()):
print(" {}={}".format(attr.upper(), value))
if update_args == True:
# args.kernel_sizes = [int(k) for k in args.kernel_sizes.split(',')]
args.word_kernel_sizes = [int(k) for k in args.word_kernel_sizes.split(',')]
args.save_dir = os.path.join(args.save_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'), 'WORD')
else:
args.save_dir = os.path.join(orig_save_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'), 'WORD')
if args.snapshot is None and args.num_experts == 0:
word_cnn = model.CNN_Text(class_num=args.class_num,
kernel_num=args.word_kernel_num,
kernel_sizes=args.word_kernel_sizes,
embed_num=len(word_field.vocab),
embed_dim=args.word_embed_dim,
dropout=args.word_dropout,
conv_init='uniform',
fc_init='normal',
static=True,
vectors=word_field.vocab.vectors)
elif args.snapshot is None and args.num_experts > 0:
word_cnn = [model.CNN_Text(class_num=args.class_num,
kernel_num=args.word_kernel_num,
kernel_sizes=args.word_kernel_sizes,
embed_num=len(word_field.vocab),
embed_dim=args.word_embed_dim,
dropout=args.word_dropout,
conv_init='uniform',
fc_init='normal',
static=True,
vectors=word_field.vocab.vectors)
for i in range(args.num_experts)]
else:
print('\nLoading model from [%s]...' % args.snapshot)
try:
word_cnn = torch.load(args.snapshot)
except:
print("Sorry, This snapshot doesn't exist.");
exit()
if args.num_experts > 0:
acc, word_cnn = train.ensemble_train(train_iter_word, dev_iter_word, word_cnn, args,
log_file_handle=log_file_handle)
else:
acc, word_cnn = train.train(train_iter_word, dev_iter_word, word_cnn, args, log_file_handle=log_file_handle)
word_dev_fold_accuracies.append(acc)
print("Completed fold {0}. Accuracy on Dev: {1} for WORD".format(xfold, acc), file=log_file_handle)
if args.eval_on_test:
if args.num_experts > 0:
result = train.ensemble_eval(test_iter_word, word_cnn, args, log_file_handle=log_file_handle)
else:
result = train.eval(test_iter_word, word_cnn, args, log_file_handle=log_file_handle)
word_test_fold_accuracies.append(result)
print("Completed fold {0}. Accuracy on Test: {1} for WORD".format(xfold, result))
print("Completed fold {0}. Accuracy on Test: {1} for WORD".format(xfold, result), file=log_file_handle)
# Ensemble training and dev
if use_char and use_word:
if update_args == True:
args.save_dir = os.path.join(args.save_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'), 'LOGIT')
else:
args.save_dir = os.path.join(orig_save_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'), 'LOGIT')
update_args = False
#
if args.snapshot is None:
final_logit = model.StackingNet(args)
else:
print('\nLoading model from [%s]...' % args.snapshot)
try:
final_logit = torch.load(args.snapshot)
except:
print("Sorry, This snapshot doesn't exist.");
exit()
train_iter, dev_iter, test_iter = vp(text_field,
label_field,
bound_field=bound_field,
path=data_dir,
filename=phn_file,
test_filename=phn_test_file,
test_batch_size=test_batch_size,
label_filename=phn_labels,
train_idxs=train_dialogues,
alt_file=args.char_alt_file,
alt_p=args.alt_prob,
foldid=None if no_test_split else xfold,
device=args.device,
repeat=False,
shuffle=False,
sort=False,
wv_type=None,
wv_dim=None,
wv_dir=None,
min_freq=1)
train_iter_word, dev_iter_word, test_iter_word = vp(word_field,
label_field,
path=data_dir,
filename=word_file,
test_filename=word_test_file,
test_batch_size=test_batch_size,
label_filename=word_labels,
train_idxs=train_dialogues,
alt_file=args.word_alt_file,
alt_p=args.alt_prob,
foldid=None if no_test_split else xfold,
device=args.device,
repeat=False,
sort=False,
shuffle=False,
wv_type=args.word_vector,
wv_dim=args.word_embed_dim,
wv_dir=args.emb_path,
min_freq=args.min_freq)
acc = train.train_final_ensemble(train_iter, dev_iter, train_iter_word, dev_iter_word, char_cnn, word_cnn, final_logit,
args, two_ch=args.two_ch, log_file_handle=log_file_handle)
ensemble_dev_fold_accuracies.append(acc)
print("Completed fold {0}. Accuracy on Dev: {1} for LOGIT".format(xfold, acc), file=log_file_handle)
if args.eval_on_test:
# if test_file is not None:
# result = train.eval_final_ensemble(test_iter, test_iter_word, char_cnn, word_cnn, final_logit, args,
# log_file_handle=log_file_handle, prediction_file_handle=prediction_file_handle,
# labels=labels, inv_labels=inv_labels, full_dials=full_enh_dials, dialogues=enh_dial_idxs, indices=indices, fold_id=xfold,
# test_batch_size=test_batch_size)
# else:
result = train.eval_final_ensemble(test_iter, test_iter_word, char_cnn, word_cnn, final_logit, args, two_ch=args.two_ch,
log_file_handle=log_file_handle, prediction_file_handle=prediction_file_handle,
labels=labels, inv_labels=inv_labels, full_dials=full_dials, dialogues=test_dialogues, indices=fold_indices, fold_id=xfold,
test_batch_size=test_batch_size)
# if args.eval_on_test:
# result = train.eval_final_ensemble(test_iter, test_iter_word, char_cnn, word_cnn, final_logit, args, two_ch=args.two_ch,
# log_file_handle=log_file_handle, prediction_file_handle=prediction_file_handle,
# labels=labels, chats=chats, dialogues=dialogues, indices=indices, fold_id=xfold)
ensemble_test_fold_accuracies.append(result)
print("Completed fold {0}. Accuracy on Test: {1} for LOGIT".format(xfold, result))
print("Completed fold {0}. Accuracy on Test: {1} for LOGIT".format(xfold, result), file=log_file_handle)
log_file_handle.flush()
#if False: #args.eval_enh:
# print("Begin evaluation of enhanced set")
# enh_prediction_file_handle = open('predict_enh.txt', 'w')
# enh_char = vp_enh(text_field, label_field)
# enh_word = vp_enh(word_field, label_field)
# result = train.eval_final_ensemble(enh_char, enh_word, char_cnn, word_cnn, final_logit, args,
# log_file_handle=log_file_handle, prediction_file_handle=enh_prediction_file_handle,
# labels=labels, inv_labels=inv_labels, full_dials=full_enh_dials, dialogues=enh_dial_idxs,
# indices=[(0,len(full_enh_dials))], fold_id=0)
# enh_prediction_file_handle.close()
print("CHAR mean accuracy is {}, std is {}".format(np.mean(char_dev_fold_accuracies), np.std(char_dev_fold_accuracies)))
print("WORD mean accuracy is {}, std is {}".format(np.mean(word_dev_fold_accuracies), np.std(word_dev_fold_accuracies)))
print("LOGIT mean accuracy is {}, std is {}".format(np.mean(ensemble_dev_fold_accuracies), np.std(ensemble_dev_fold_accuracies)))
print("CHAR mean accuracy is {}, std is {}".format(np.mean(char_dev_fold_accuracies), np.std(char_dev_fold_accuracies)), file=log_file_handle)
print("WORD mean accuracy is {}, std is {}".format(np.mean(word_dev_fold_accuracies), np.std(word_dev_fold_accuracies)),
file=log_file_handle)
print("LOGIT mean accuracy is {}, std is {}".format(np.mean(ensemble_dev_fold_accuracies), np.std(ensemble_dev_fold_accuracies)), file=log_file_handle)
if char_test_fold_accuracies or word_test_fold_accuracies:
print("CHAR mean accuracy is {}, std is {}".format(np.mean(char_test_fold_accuracies), np.std(char_test_fold_accuracies)))
print("WORD mean accuracy is {}, std is {}".format(np.mean(word_test_fold_accuracies),
np.std(word_test_fold_accuracies)))
print("LOGIT mean accuracy is {}, std is {}".format(np.mean(ensemble_test_fold_accuracies), np.std(ensemble_test_fold_accuracies)))
print("CHAR mean accuracy is {}, std is {}".format(np.mean(char_test_fold_accuracies), np.std(char_test_fold_accuracies)), file=log_file_handle)
print("WORD mean accuracy is {}, std is {}".format(np.mean(word_test_fold_accuracies),
np.std(word_test_fold_accuracies)), file=log_file_handle)
print("LOGIT mean accuracy is {}, std is {}".format(np.mean(ensemble_test_fold_accuracies), np.std(ensemble_test_fold_accuracies)), file=log_file_handle)
log_file_handle.close()
prediction_file_handle.close()
| 59.22395
| 229
| 0.582285
|
3bbed73390848681b812f36b27590c8e4274c11d
| 2,920
|
py
|
Python
|
docs/source/conf.py
|
galipremsagar/cuxfilter
|
17de7c0851ce4f7ce53a2ead428fedf6545268b9
|
[
"Apache-2.0"
] | null | null | null |
docs/source/conf.py
|
galipremsagar/cuxfilter
|
17de7c0851ce4f7ce53a2ead428fedf6545268b9
|
[
"Apache-2.0"
] | null | null | null |
docs/source/conf.py
|
galipremsagar/cuxfilter
|
17de7c0851ce4f7ce53a2ead428fedf6545268b9
|
[
"Apache-2.0"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('...'))
# -- Project information -----------------------------------------------------
project = 'cuxfilter'
copyright = '2019, NVIDIA'
author = 'NVIDIA'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '22.04'
# The full version, including alpha/beta/rc tags
release = '22.04.00'
nbsphinx_allow_errors = True
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.intersphinx",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"numpydoc",
"sphinx_markdown_tables",
"IPython.sphinxext.ipython_console_highlighting",
"IPython.sphinxext.ipython_directive",
"nbsphinx",
"recommonmark",
"jupyter_sphinx"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd:
# only import and set the theme if we're building docs locally
# otherwise, readthedocs.org uses their theme by default,
# so no need to specify it
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_static']
htmlhelp_basename = "cuxfilterdoc"
# cuxfilter.load_notebook_assets()
def setup(app):
app.add_css_file('custom.css')
| 31.73913
| 79
| 0.684247
|
e06717cf2cbe13f0d5ad516fc3fc825943ab9cd5
| 117
|
py
|
Python
|
src/comments.py
|
joseluisbn/PythonLearningProject
|
87f21d3971eb44f288bc18ebdbbf5a74e1aeabaf
|
[
"MIT"
] | null | null | null |
src/comments.py
|
joseluisbn/PythonLearningProject
|
87f21d3971eb44f288bc18ebdbbf5a74e1aeabaf
|
[
"MIT"
] | null | null | null |
src/comments.py
|
joseluisbn/PythonLearningProject
|
87f21d3971eb44f288bc18ebdbbf5a74e1aeabaf
|
[
"MIT"
] | null | null | null |
# This is a single line comment.
"""
This is
a multiline
comment
"""
print("Comments are ignored by the compiler")
| 11.7
| 45
| 0.709402
|
253536c2c9934ec6cb4d859133b5407fef41c9de
| 895
|
py
|
Python
|
traiders/backend/api/views/__init__.py
|
rdilruba/bounswe2019group2
|
b373908a4a8e92481f359297aba07245f0a23c1c
|
[
"Apache-2.0"
] | 11
|
2019-02-15T12:08:32.000Z
|
2019-11-14T19:25:09.000Z
|
traiders/backend/api/views/__init__.py
|
bounswe/bounswe2019group2
|
05d41cf7b6bc1b3f994e82495d2a885a6eaa7cf3
|
[
"Apache-2.0"
] | 279
|
2019-02-13T14:57:39.000Z
|
2022-03-12T00:02:30.000Z
|
traiders/backend/api/views/__init__.py
|
rdilruba/bounswe2019group2
|
b373908a4a8e92481f359297aba07245f0a23c1c
|
[
"Apache-2.0"
] | 13
|
2019-03-20T08:30:55.000Z
|
2021-01-31T16:49:14.000Z
|
from .users import UserViewSet, UserSuccessViewSet
from .token import TokenViewSet
from .article import ArticleViewSet
from .equipment import EquipmentViewSet
from .parity import ParityViewSet, ParityLatestViewSet
from .comment import ArticleCommentViewSet, EquipmentCommentViewSet
from .mobile_app import latest_mobile_app
from .likes import LikeViewSet
from .following import FollowingViewSet
from .event import EventViewSet
from .prediction import PredictionViewSet
from .investment import ManualInvestmentViewSet, AssetViewSet, OnlineInvestmentViewSet, ProfitLossViewSet
from .order import BuyOrderViewSet, StopLossOrderViewSet
from .verify_email import verify_email
from .portfolio import PortfolioViewSet, PortfolioItemViewSet
from .search import SearchViewSet
from .notification import NotificationViewSet
from .recommendation import RecommendationViewSet
from .alert import AlertViewSet
| 44.75
| 105
| 0.875978
|
d9a8d06d1ba4429f7f0955b10d24eda7fb24ec91
| 5,635
|
py
|
Python
|
reid/datasets/ilids.py
|
ZhaoChuyang/dgreid
|
ee1d7af74b796f2f194307ab023e43ecc3d3d525
|
[
"MIT"
] | null | null | null |
reid/datasets/ilids.py
|
ZhaoChuyang/dgreid
|
ee1d7af74b796f2f194307ab023e43ecc3d3d525
|
[
"MIT"
] | null | null | null |
reid/datasets/ilids.py
|
ZhaoChuyang/dgreid
|
ee1d7af74b796f2f194307ab023e43ecc3d3d525
|
[
"MIT"
] | null | null | null |
from __future__ import division, print_function, absolute_import
import copy
import glob
import random
import os.path as osp
from collections import defaultdict
from ..utils.tools import read_json, write_json
from ..utils.data import BaseImageDataset
class iLIDS(BaseImageDataset):
"""QMUL-iLIDS.
Reference:
Zheng et al. Associating Groups of People. BMVC 2009.
Dataset statistics:
- identities: 119.
- images: 476.
- cameras: 8 (not explicitly provided).
"""
dataset_dir = 'ilids'
dataset_url = 'http://www.eecs.qmul.ac.uk/~jason/data/i-LIDS_Pedestrian.tgz'
dataset_name = 'ilids'
def __init__(self, root='', split_id=0, verbose=True, combineall=False, **kwargs):
super(iLIDS, self).__init__()
self.root = osp.abspath(osp.expanduser(root))
self.dataset_dir = self.root
self.download_dataset(self.dataset_dir, self.dataset_url)
self.data_dir = osp.join(self.dataset_dir, 'i-LIDS_Pedestrian/Persons')
self.split_path = osp.join(self.dataset_dir, 'splits.json')
required_files = [self.dataset_dir, self.data_dir]
self.check_before_run(required_files)
self.prepare_split()
splits = read_json(self.split_path)
if split_id >= len(splits):
raise ValueError(
'split_id exceeds range, received {}, but '
'expected between 0 and {}'.format(split_id,
len(splits) - 1)
)
split = splits[split_id]
train, query, gallery = self.process_split(split)
self.train = train
self.query = query
self.gallery = gallery
if combineall:
self.train = self.combine_all(train, query, gallery)
if verbose:
print("=> iLIDS loaded")
self.print_dataset_statistics(self.train, query, gallery)
self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)
self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)
self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)
def prepare_split(self):
if not osp.exists(self.split_path):
print('Creating splits ...')
paths = glob.glob(osp.join(self.data_dir, '*.jpg'))
img_names = [osp.basename(path) for path in paths]
num_imgs = len(img_names)
assert num_imgs == 476, 'There should be 476 images, but ' \
'got {}, please check the data'.format(num_imgs)
# store image names
# image naming format:
# the first four digits denote the person ID
# the last four digits denote the sequence index
pid_dict = defaultdict(list)
for img_name in img_names:
pid = int(img_name[:4])
pid_dict[pid].append(img_name)
pids = list(pid_dict.keys())
num_pids = len(pids)
assert num_pids == 119, 'There should be 119 identities, ' \
'but got {}, please check the data'.format(num_pids)
num_train_pids = int(num_pids * 0.5)
splits = []
for _ in range(10):
# randomly choose num_train_pids train IDs and the rest for test IDs
pids_copy = copy.deepcopy(pids)
random.shuffle(pids_copy)
train_pids = pids_copy[:num_train_pids]
test_pids = pids_copy[num_train_pids:]
train = []
query = []
gallery = []
# for train IDs, all images are used in the train set.
for pid in train_pids:
img_names = pid_dict[pid]
train.extend(img_names)
# for each test ID, randomly choose two images, one for
# query and the other one for gallery.
for pid in test_pids:
img_names = pid_dict[pid]
samples = random.sample(img_names, 2)
query.append(samples[0])
gallery.append(samples[1])
split = {'train': train, 'query': query, 'gallery': gallery}
splits.append(split)
print('Totally {} splits are created'.format(len(splits)))
write_json(splits, self.split_path)
print('Split file is saved to {}'.format(self.split_path))
def get_pid2label(self, img_names):
pid_container = set()
for img_name in img_names:
pid = int(img_name[:4])
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
return pid2label
def parse_img_names(self, img_names, pid2label=None):
data = []
for img_name in img_names:
pid = int(img_name[:4])
if pid2label is not None:
pid = pid2label[pid]
camid = int(img_name[4:7]) - 1 # 0-based
img_path = osp.join(self.data_dir, img_name)
data.append((img_path, pid, camid))
return data
def process_split(self, split):
train_pid2label = self.get_pid2label(split['train'])
train = self.parse_img_names(split['train'], train_pid2label)
query = self.parse_img_names(split['query'])
gallery = self.parse_img_names(split['gallery'])
return train, query, gallery
| 37.317881
| 115
| 0.583851
|
64464bb6ee987d529e1b49496a45698d917bc3d5
| 25,573
|
py
|
Python
|
tests/test_circuit.py
|
AGaliciaMartinez/qutip-qip
|
73411ae884d117f05ff0ecb920ca055940fc76dd
|
[
"BSD-3-Clause"
] | 36
|
2020-05-22T10:51:13.000Z
|
2022-03-07T05:41:08.000Z
|
tests/test_circuit.py
|
AGaliciaMartinez/qutip-qip
|
73411ae884d117f05ff0ecb920ca055940fc76dd
|
[
"BSD-3-Clause"
] | 73
|
2020-07-14T07:26:48.000Z
|
2022-03-25T08:00:43.000Z
|
tests/test_circuit.py
|
AGaliciaMartinez/qutip-qip
|
73411ae884d117f05ff0ecb920ca055940fc76dd
|
[
"BSD-3-Clause"
] | 24
|
2020-06-18T22:59:20.000Z
|
2022-03-12T05:11:59.000Z
|
import pytest
import numpy as np
from pathlib import Path
from qutip_qip.circuit import (
QubitCircuit, CircuitSimulator, Measurement)
from qutip import (tensor, Qobj, ptrace, rand_ket, fock_dm, basis,
rand_dm, bell_state, ket2dm, identity, sigmax)
from qutip_qip.qasm import read_qasm
from qutip_qip.operations import (
Gate, gates, gate_sequence_product,
_ctrl_gates, _single_qubit_gates, _swap_like, _toffoli_like, _fredkin_like,
_para_gates
)
from qutip_qip.decompose.decompose_single_qubit_gate import _ZYZ_rotation
import qutip as qp
def _op_dist(A, B):
return (A - B).norm()
def _teleportation_circuit():
teleportation = QubitCircuit(3, num_cbits=2,
input_states=["q0", "0", "0", "c0", "c1"])
teleportation.add_gate("SNOT", targets=[1])
teleportation.add_gate("CNOT", targets=[2], controls=[1])
teleportation.add_gate("CNOT", targets=[1], controls=[0])
teleportation.add_gate("SNOT", targets=[0])
teleportation.add_measurement("M0", targets=[0], classical_store=1)
teleportation.add_measurement("M1", targets=[1], classical_store=0)
teleportation.add_gate("X", targets=[2], classical_controls=[0])
teleportation.add_gate("Z", targets=[2], classical_controls=[1])
return teleportation
def _teleportation_circuit2():
teleportation = QubitCircuit(3, num_cbits=2,
input_states=["q0", "0", "0", "c0", "c1"])
teleportation.add_gate("SNOT", targets=[1])
teleportation.add_gate("CNOT", targets=[2], controls=[1])
teleportation.add_gate("CNOT", targets=[1], controls=[0])
teleportation.add_gate("SNOT", targets=[0])
teleportation.add_gate("CNOT", targets=[2], controls=[1])
teleportation.add_gate("CZ", targets=[2], controls=[0])
return teleportation
def _measurement_circuit():
qc = QubitCircuit(2, num_cbits=2)
qc.add_measurement("M0", targets=[0], classical_store=0)
qc.add_measurement("M1", targets=[1], classical_store=1)
return qc
def _simulators_sv(qc):
sim_sv_precompute = CircuitSimulator(qc, mode="state_vector_simulator",
precompute_unitary=True)
sim_sv = CircuitSimulator(qc, mode="state_vector_simulator")
return [sim_sv_precompute, sim_sv]
def _simulators_dm(qc):
sim_dm_precompute = CircuitSimulator(qc, mode="density_matrix_simulator",
precompute_unitary=True)
sim_dm = CircuitSimulator(qc, mode="density_matrix_simulator")
return [sim_dm_precompute, sim_dm]
class TestQubitCircuit:
"""
A test class for the QuTiP functions for Circuit resolution.
"""
@pytest.mark.parametrize(["gate_from", "gate_to", "targets", "controls"], [
pytest.param("SWAP", "CNOT",
[0, 1], None, id="SWAPtoCNOT"),
pytest.param("ISWAP", "CNOT",
[0, 1], None, id="ISWAPtoCNOT"),
pytest.param("CSIGN", "CNOT",
[1], [0], id="CSIGNtoCNOT"),
pytest.param("CNOT", "CSIGN",
[0], [1], id="CNOTtoCSIGN"),
pytest.param("CNOT", "SQRTSWAP",
[0], [1], id="CNOTtoSQRTSWAP"),
pytest.param("CNOT", "SQRTISWAP",
[0], [1], id="CNOTtoSQRTISWAP"),
pytest.param("CNOT", "ISWAP",
[0], [1], id="CNOTtoISWAP")])
def testresolve(self, gate_from, gate_to, targets, controls):
qc1 = QubitCircuit(2)
qc1.add_gate(gate_from, targets=targets, controls=controls)
U1 = gates.gate_sequence_product(qc1.propagators())
qc2 = qc1.resolve_gates(basis=gate_to)
U2 = gates.gate_sequence_product(qc2.propagators())
assert _op_dist(U1, U2) < 1e-12
def testSNOTdecompose(self):
"""
SNOT to rotation: compare unitary matrix for SNOT and product of
resolved matrices in terms of rotation gates.
"""
qc1 = QubitCircuit(1)
qc1.add_gate("SNOT", targets=0)
U1 = gates.gate_sequence_product(qc1.propagators())
qc2 = qc1.resolve_gates()
U2 = gates.gate_sequence_product(qc2.propagators())
assert _op_dist(U1, U2) < 1e-12
def testFREDKINdecompose(self):
"""
FREDKIN to rotation and CNOT: compare unitary matrix for FREDKIN and product of
resolved matrices in terms of rotation gates and CNOT.
"""
qc1 = QubitCircuit(3)
qc1.add_gate("FREDKIN", targets=[0, 1], controls=[2])
U1 = gates.gate_sequence_product(qc1.propagators())
qc2 = qc1.resolve_gates()
U2 = gates.gate_sequence_product(qc2.propagators())
assert _op_dist(U1, U2) < 1e-12
def testadjacentgates(self):
"""
Adjacent Gates: compare unitary matrix for ISWAP and product of
resolved matrices in terms of adjacent gates interaction.
"""
qc1 = QubitCircuit(3)
qc1.add_gate("ISWAP", targets=[0, 2])
U1 = gates.gate_sequence_product(qc1.propagators())
qc0 = qc1.adjacent_gates()
qc2 = qc0.resolve_gates(basis="ISWAP")
U2 = gates.gate_sequence_product(qc2.propagators())
assert _op_dist(U1, U2) < 1e-12
def test_add_gate(self):
"""
Addition of a gate object directly to a `QubitCircuit`
"""
qc = QubitCircuit(6)
qc.add_gate("CNOT", targets=[1], controls=[0])
test_gate = Gate("SWAP", targets=[1, 4])
qc.add_gate(test_gate)
qc.add_gate("TOFFOLI", controls=[0, 1], targets=[2])
qc.add_gate("SNOT", targets=[3])
qc.add_gate(test_gate, index=[3])
qc.add_1q_gate("RY", start=4, end=5, arg_value=1.570796)
# Test explicit gate addition
assert qc.gates[0].name == "CNOT"
assert qc.gates[0].targets == [1]
assert qc.gates[0].controls == [0]
# Test direct gate addition
assert qc.gates[1].name == test_gate.name
assert qc.gates[1].targets == test_gate.targets
# Test specified position gate addition
assert qc.gates[3].name == test_gate.name
assert qc.gates[3].targets == test_gate.targets
# Test adding 1 qubit gate on [start, end] qubits
assert qc.gates[5].name == "RY"
assert qc.gates[5].targets == [4]
assert qc.gates[5].arg_value == 1.570796
assert qc.gates[6].name == "RY"
assert qc.gates[6].targets == [5]
assert qc.gates[5].arg_value == 1.570796
# Test Exceptions # Global phase is not included
for gate in _single_qubit_gates:
if gate not in _para_gates:
# No target
pytest.raises(ValueError, qc.add_gate, gate, None, None)
# Multiple targets
pytest.raises(ValueError, qc.add_gate, gate, [0, 1, 2], None)
# With control
pytest.raises(ValueError, qc.add_gate, gate, [0], [1])
else:
# No target
pytest.raises(ValueError, qc.add_gate, gate, None, None, 1)
# Multiple targets
pytest.raises(ValueError, qc.add_gate, gate, [0, 1, 2], None, 1)
# With control
pytest.raises(ValueError, qc.add_gate, gate, [0], [1], 1)
for gate in _ctrl_gates:
if gate not in _para_gates:
# No target
pytest.raises(ValueError, qc.add_gate, gate, None, [1])
# No control
pytest.raises(ValueError, qc.add_gate, gate, [0], None)
else:
# No target
pytest.raises(ValueError, qc.add_gate, gate, None, [1], 1)
# No control
pytest.raises(ValueError, qc.add_gate, gate, [0], None, 1)
for gate in _swap_like:
if gate not in _para_gates:
# Single target
pytest.raises(ValueError, qc.add_gate, gate, [0], None)
# With control
pytest.raises(ValueError, qc.add_gate, gate, [0, 1], [3])
else:
# Single target
pytest.raises(ValueError, qc.add_gate, gate, [0], None, 1)
# With control
pytest.raises(ValueError, qc.add_gate, gate, [0, 1], [3], 1)
for gate in _fredkin_like:
# Single target
pytest.raises(ValueError, qc.add_gate, gate, [0], [2])
# No control
pytest.raises(ValueError, qc.add_gate, gate, [0, 1], None)
for gate in _toffoli_like:
# No target
pytest.raises(ValueError, qc.add_gate, gate, None, [1, 2])
# Single control
pytest.raises(ValueError, qc.add_gate, gate, [0], [1])
def test_add_circuit(self):
"""
Addition of a circuit to a `QubitCircuit`
"""
qc = QubitCircuit(6)
qc.add_gate("CNOT", targets=[1], controls=[0])
test_gate = Gate("SWAP", targets=[1, 4])
qc.add_gate(test_gate)
qc.add_gate("TOFFOLI", controls=[0, 1], targets=[2])
qc.add_gate("SNOT", targets=[3])
qc.add_gate(test_gate, index=[3])
qc.add_measurement("M0", targets=[0], classical_store=[1])
qc.add_1q_gate("RY", start=4, end=5, arg_value=1.570796)
qc1 = QubitCircuit(6)
qc1.add_circuit(qc)
# Test if all gates and measurements are added
assert len(qc1.gates) == len(qc.gates)
for i in range(len(qc1.gates)):
assert (qc1.gates[i].name
== qc.gates[i].name)
assert (qc1.gates[i].targets
== qc.gates[i].targets)
if (isinstance(qc1.gates[i], Gate) and
isinstance(qc.gates[i], Gate)):
assert (qc1.gates[i].controls
== qc.gates[i].controls)
assert (qc1.gates[i].classical_controls
== qc.gates[i].classical_controls)
elif (isinstance(qc1.gates[i], Measurement) and
isinstance(qc.gates[i], Measurement)):
assert (qc1.gates[i].classical_store
== qc.gates[i].classical_store)
# Test exception when qubit out of range
pytest.raises(NotImplementedError, qc1.add_circuit, qc, start=4)
qc2 = QubitCircuit(8)
qc2.add_circuit(qc, start=2)
# Test if all gates are added
assert len(qc2.gates) == len(qc.gates)
# Test if the positions are correct
for i in range(len(qc2.gates)):
if qc.gates[i].targets is not None:
assert (qc2.gates[i].targets[0]
== qc.gates[i].targets[0]+2)
if (isinstance(qc.gates[i], Gate) and
qc.gates[i].controls is not None):
assert (qc2.gates[i].controls[0]
== qc.gates[i].controls[0]+2)
def test_add_state(self):
"""
Addition of input and output states to a circuit.
"""
qc = QubitCircuit(3)
qc.add_state("0", targets=[0])
qc.add_state("+", targets=[1], state_type="output")
qc.add_state("-", targets=[1])
assert qc.input_states[0] == "0"
assert qc.input_states[2] is None
assert qc.output_states[1] == "+"
qc1 = QubitCircuit(10)
qc1.add_state("0", targets=[2, 3, 5, 6])
qc1.add_state("+", targets=[1, 4, 9])
qc1.add_state("A", targets=[1, 4, 9], state_type="output")
qc1.add_state("A", targets=[1, 4, 9], state_type="output")
qc1.add_state("beta", targets=[0], state_type="output")
assert qc1.input_states[0] is None
assert qc1.input_states[2] == "0"
assert qc1.input_states[3] == "0"
assert qc1.input_states[6] == "0"
assert qc1.input_states[1] == "+"
assert qc1.input_states[4] == "+"
assert qc1.output_states[2] is None
assert qc1.output_states[1] == "A"
assert qc1.output_states[4] == "A"
assert qc1.output_states[9] == "A"
assert qc1.output_states[0] == "beta"
def test_add_measurement(self):
"""
Addition of Measurement Object to a circuit.
"""
qc = QubitCircuit(3, num_cbits=2)
qc.add_measurement("M0", targets=[0], classical_store=1)
qc.add_gate("CNOT", targets=[1], controls=[0])
qc.add_gate("TOFFOLI", controls=[0, 1], targets=[2])
qc.add_measurement("M1", targets=[2], classical_store=0)
qc.add_gate("SNOT", targets=[1], classical_controls=[0, 1])
qc.add_measurement("M2", targets=[1])
# checking correct addition of measurements
assert qc.gates[0].targets[0] == 0
assert qc.gates[0].classical_store == 1
assert qc.gates[3].name == "M1"
assert qc.gates[5].classical_store is None
# checking if gates are added correctly with measurements
assert qc.gates[2].name == "TOFFOLI"
assert qc.gates[4].classical_controls == [0, 1]
@pytest.mark.parametrize('gate', ['X', 'Y', 'Z', 'S', 'T'])
def test_exceptions(self, gate):
"""
Text exceptions are thrown correctly for inadequate inputs
"""
qc = QubitCircuit(2)
pytest.raises(ValueError, qc.add_gate, gate, targets=[1], controls=[0])
@pytest.mark.parametrize('gate', ['CY', 'CZ', 'CS', 'CT'])
def test_exceptions_controlled(self, gate):
"""
Text exceptions are thrown correctly for inadequate inputs
"""
qc = QubitCircuit(2)
'''
pytest.raises(ValueError, qc.add_gate, gate,
targets=[1], controls=[0])
'''
pytest.raises(ValueError, qc.add_gate, gate,
targets=[1])
pytest.raises(ValueError, qc.add_gate, gate)
def test_globalphase_gate_propagators(self):
qc = QubitCircuit(2)
qc.add_gate("GLOBALPHASE", arg_value=np.pi / 2)
[gate] = qc.gates
assert gate.name == "GLOBALPHASE"
assert gate.arg_value == np.pi / 2
[U_expanded] = qc.propagators()
assert U_expanded == 1j * qp.qeye([2, 2])
[U_unexpanded] = qc.propagators(expand=False)
assert U_unexpanded == 1j * qp.qeye([2, 2])
def test_single_qubit_gates(self):
"""
Text single qubit gates are added correctly
"""
qc = QubitCircuit(3)
qc.add_gate("X", targets=[0])
qc.add_gate("CY", targets=[1], controls=[0])
qc.add_gate("Y", targets=[2])
qc.add_gate("CS", targets=[0], controls=[1])
qc.add_gate("Z", targets=[1])
qc.add_gate("CT", targets=[2], controls=[2])
qc.add_gate("CZ", targets=[0], controls=[0])
qc.add_gate("S", targets=[1])
qc.add_gate("T", targets=[2])
assert qc.gates[8].name == "T"
assert qc.gates[7].name == "S"
assert qc.gates[6].name == "CZ"
assert qc.gates[5].name == "CT"
assert qc.gates[4].name == "Z"
assert qc.gates[3].name == "CS"
assert qc.gates[2].name == "Y"
assert qc.gates[1].name == "CY"
assert qc.gates[0].name == "X"
assert qc.gates[8].targets == [2]
assert qc.gates[7].targets == [1]
assert qc.gates[6].targets == [0]
assert qc.gates[5].targets == [2]
assert qc.gates[4].targets == [1]
assert qc.gates[3].targets == [0]
assert qc.gates[2].targets == [2]
assert qc.gates[1].targets == [1]
assert qc.gates[0].targets == [0]
assert qc.gates[6].controls == [0]
assert qc.gates[5].controls == [2]
assert qc.gates[3].controls == [1]
assert qc.gates[1].controls == [0]
def test_reverse(self):
"""
Reverse a quantum circuit
"""
qc = QubitCircuit(3)
qc.add_gate("RX", targets=[0], arg_value=3.141,
arg_label=r"\pi/2")
qc.add_gate("CNOT", targets=[1], controls=[0])
qc.add_measurement("M1", targets=[1])
qc.add_gate("SNOT", targets=[2])
# Keep input output same
qc.add_state("0", targets=[0])
qc.add_state("+", targets=[1], state_type="output")
qc.add_state("-", targets=[1])
qc_rev = qc.reverse_circuit()
assert qc_rev.gates[0].name == "SNOT"
assert qc_rev.gates[1].name == "M1"
assert qc_rev.gates[2].name == "CNOT"
assert qc_rev.gates[3].name == "RX"
assert qc_rev.input_states[0] == "0"
assert qc_rev.input_states[2] is None
assert qc_rev.output_states[1] == "+"
def test_user_gate(self):
"""
User defined gate for QubitCircuit
"""
def customer_gate1(arg_values):
mat = np.zeros((4, 4), dtype=np.complex128)
mat[0, 0] = mat[1, 1] = 1.
mat[2:4, 2:4] = gates.rx(arg_values)
return Qobj(mat, dims=[[2, 2], [2, 2]])
def customer_gate2():
mat = np.array([[1., 0],
[0., 1.j]])
return Qobj(mat, dims=[[2], [2]])
qc = QubitCircuit(3)
qc.user_gates = {"CTRLRX": customer_gate1,
"T1": customer_gate2}
qc.add_gate("CTRLRX", targets=[1, 2], arg_value=np.pi/2)
qc.add_gate("T1", targets=[1])
props = qc.propagators()
result1 = tensor(identity(2), customer_gate1(np.pi/2))
np.testing.assert_allclose(props[0], result1)
result2 = tensor(identity(2), customer_gate2(), identity(2))
np.testing.assert_allclose(props[1], result2)
def test_N_level_system(self):
"""
Test for circuit with N-level system.
"""
mat3 = rand_dm(3, density=1.)
def controlled_mat3(arg_value):
"""
A qubit control an operator acting on a 3 level system
"""
control_value = arg_value
dim = mat3.dims[0][0]
return (tensor(fock_dm(2, control_value), mat3) +
tensor(fock_dm(2, 1 - control_value), identity(dim)))
qc = QubitCircuit(2, dims=[3, 2])
qc.user_gates = {"CTRLMAT3": controlled_mat3}
qc.add_gate("CTRLMAT3", targets=[1, 0], arg_value=1)
props = qc.propagators()
np.testing.assert_allclose(mat3, ptrace(props[0], 0) - 1)
@pytest.mark.repeat(10)
def test_run_teleportation(self):
"""
Test circuit run and mid-circuit measurement functionality
by repeating the teleportation circuit on multiple random kets
"""
teleportation = _teleportation_circuit()
state = tensor(rand_ket(2), basis(2, 0), basis(2, 0))
initial_measurement = Measurement("start", targets=[0])
_, initial_probabilities = initial_measurement.measurement_comp_basis(state)
teleportation_sim = CircuitSimulator(teleportation)
teleportation_sim_results = teleportation_sim.run(state)
state_final = teleportation_sim_results.get_final_states(0)
probability = teleportation_sim_results.get_probabilities(0)
final_measurement = Measurement("start", targets=[2])
_, final_probabilities = final_measurement.measurement_comp_basis(state_final)
np.testing.assert_allclose(initial_probabilities, final_probabilities)
def test_runstatistics_teleportation(self):
"""
Test circuit run_statistics on teleportation circuit
"""
teleportation = _teleportation_circuit()
final_measurement = Measurement("start", targets=[2])
initial_measurement = Measurement("start", targets=[0])
original_state = tensor(rand_ket(2), basis(2, 0), basis(2, 0))
_, initial_probabilities = initial_measurement.measurement_comp_basis(original_state)
teleportation_results = teleportation.run_statistics(original_state)
states = teleportation_results.get_final_states()
probabilities = teleportation_results.get_probabilities()
for i, state in enumerate(states):
state_final = state
prob = probabilities[i]
_, final_probabilities = final_measurement.measurement_comp_basis(state_final)
np.testing.assert_allclose(initial_probabilities,
final_probabilities)
assert prob == pytest.approx(0.25, abs=1e-7)
mixed_state = sum(p * ket2dm(s) for p, s in zip(probabilities, states))
dm_state = ket2dm(original_state)
teleportation2 = _teleportation_circuit2()
final_state = teleportation2.run(dm_state)
_, probs1 = final_measurement.measurement_comp_basis(final_state)
_, probs2 = final_measurement.measurement_comp_basis(mixed_state)
np.testing.assert_allclose(probs1, probs2)
def test_measurement_circuit(self):
qc = _measurement_circuit()
simulators = _simulators_sv(qc)
labels = ["00", "01", "10", "11"]
for label in labels:
state = bell_state(label)
for i, simulator in enumerate(simulators):
simulator.run(state)
if label[0] == "0":
assert simulator.cbits[0] == simulator.cbits[1]
else:
assert simulator.cbits[0] != simulator.cbits[1]
def test_gate_product(self):
filename = "qft.qasm"
filepath = Path(__file__).parent / 'qasm_files' / filename
qc = read_qasm(filepath)
U_list_expanded = qc.propagators()
U_list = qc.propagators(expand=False)
inds_list = []
for gate in qc.gates:
if isinstance(gate, Measurement):
continue
else:
inds_list.append(gate.get_all_qubits())
U_1, _ = gate_sequence_product(U_list,
inds_list=inds_list,
expand=True)
U_2 = gate_sequence_product(U_list_expanded, left_to_right=True,
expand=False)
np.testing.assert_allclose(U_1, U_2)
def test_wstate(self):
filename = "w-state.qasm"
filepath = Path(__file__).parent / 'qasm_files' / filename
qc = read_qasm(filepath)
rand_state = rand_ket(2)
wstate = (tensor(basis(2, 0), basis(2, 0), basis(2, 1))
+ tensor(basis(2, 0), basis(2, 1), basis(2, 0))
+ tensor(basis(2, 1), basis(2, 0), basis(2, 0))).unit()
state = tensor(tensor(basis(2, 0), basis(2, 0), basis(2, 0)),
rand_state)
fourth = Measurement("test_rand", targets=[3])
_, probs_initial = fourth.measurement_comp_basis(state)
simulators = _simulators_sv(qc)
for simulator in simulators:
result = simulator.run_statistics(state)
final_states = result.get_final_states()
result_cbits = result.get_cbits()
for i, final_state in enumerate(final_states):
_, probs_final = fourth.measurement_comp_basis(final_state)
np.testing.assert_allclose(probs_initial, probs_final)
assert sum(result_cbits[i]) == 1
def test_latex_code_teleportation_circuit(self):
qc = _teleportation_circuit()
latex = qc.latex_code()
assert latex == "\n".join([
r" & \lstick{c1} & \qw & \qw & \qw & \qw"
r" & \qw \cwx[4] & \qw & \qw & \ctrl{2} & \qw \\ ",
r" & \lstick{c0} & \qw & \qw & \qw & \qw"
r" & \qw & \qw \cwx[2] & \ctrl{1} & \qw & \qw \\ ",
r" & \lstick{\ket{0}} & \qw & \targ & \qw & \qw"
r" & \qw & \qw & \gate{X} & \gate{Z} & \qw \\ ",
r" & \lstick{\ket{0}} & \gate{{\rm H}} & \ctrl{-1} &"
r" \targ & \qw & \qw & \meter & \qw & \qw & \qw \\ ",
r" & \lstick{\ket{q0}} & \qw & \qw & \ctrl{-1} &"
r" \gate{{\rm H}} & \meter & \qw & \qw & \qw & \qw \\ ",
"",
])
H = Qobj([[1/np.sqrt(2), 1/np.sqrt(2)], [1/np.sqrt(2), -1/np.sqrt(2)]])
H_zyz_gates = _ZYZ_rotation(H)
H_zyz_quantum_circuit = QubitCircuit(1)
H_zyz_quantum_circuit.add_gates(H_zyz_gates)
sigmax_zyz_gates = _ZYZ_rotation(sigmax())
sigmax_zyz_quantum_circuit = QubitCircuit(1)
sigmax_zyz_quantum_circuit.add_gates(sigmax_zyz_gates)
@pytest.mark.parametrize(
"valid_input, correct_result",
[(H_zyz_gates, H),
(sigmax_zyz_gates, sigmax())]
)
def test_add_gates(self, valid_input, correct_result):
circuit = QubitCircuit(1)
circuit.add_gates(valid_input)
result = gate_sequence_product(circuit.propagators())
assert(result == correct_result)
@pytest.mark.parametrize(
"valid_input, correct_result",
[(H_zyz_quantum_circuit, H),
(sigmax_zyz_quantum_circuit, sigmax())]
)
def test_compute_unitary(
self, valid_input, correct_result):
final_output = valid_input.compute_unitary()
assert(isinstance(final_output, Qobj))
assert(final_output == correct_result)
| 37.885926
| 93
| 0.569859
|
5cfd413ab88fae00a3f94a1bc7b3026ae541cb60
| 1,001
|
py
|
Python
|
NPC.py
|
bhuvan21/TBGC
|
7cb26d64b61fa0347199ff4ca15830516395bd2f
|
[
"MIT"
] | null | null | null |
NPC.py
|
bhuvan21/TBGC
|
7cb26d64b61fa0347199ff4ca15830516395bd2f
|
[
"MIT"
] | null | null | null |
NPC.py
|
bhuvan21/TBGC
|
7cb26d64b61fa0347199ff4ca15830516395bd2f
|
[
"MIT"
] | null | null | null |
from utils import numbered_choice
import Location
from Conversation import Conversation
# Location class, instantiated for every location in the game
class NPC:
def __init__(self, obj):
attribs = obj.attrib
# set requried attributes
self.name = attribs["name"]
self.intro = attribs["intro"]
self.children = []
self.children_names = []
# instantiate children conversations
for element in obj:
if element.tag == "Conversation":
self.children.append(Conversation(element))
self.children_names.append(element.attrib["name"])
# initiate talking with an npc
def visit(self):
# TODO MAKE THE REPROMPT SPECIFIABLE
choice = self.children[numbered_choice(self.children_names, self.intro, "Eh?")]
if type(choice) == Conversation:
choice.start()
self.visit()
elif type(choice) == Location.Location:
choice.goto()
| 32.290323
| 87
| 0.621379
|
647b010e1ddad0e13fff033d06c3ac4f14b895cf
| 23,613
|
py
|
Python
|
service/generated_flatbuffers/tflite/Operator.py
|
lcrh/falken
|
7545431c7bfa34a9b45c2243cae40dbb58adefaa
|
[
"Apache-2.0"
] | 213
|
2021-06-11T01:15:16.000Z
|
2022-02-25T16:18:57.000Z
|
service/generated_flatbuffers/tflite/Operator.py
|
lcrh/falken
|
7545431c7bfa34a9b45c2243cae40dbb58adefaa
|
[
"Apache-2.0"
] | 32
|
2021-06-17T17:58:54.000Z
|
2022-02-02T05:58:10.000Z
|
service/generated_flatbuffers/tflite/Operator.py
|
lcrh/falken
|
7545431c7bfa34a9b45c2243cae40dbb58adefaa
|
[
"Apache-2.0"
] | 28
|
2021-06-17T17:34:21.000Z
|
2022-03-24T14:05:20.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Operator(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsOperator(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Operator()
x.Init(buf, n + offset)
return x
@classmethod
def OperatorBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# Operator
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Operator
def OpcodeIndex(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# Operator
def Inputs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# Operator
def InputsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# Operator
def InputsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Operator
def InputsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
# Operator
def Outputs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# Operator
def OutputsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# Operator
def OutputsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Operator
def OutputsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
return o == 0
# Operator
def BuiltinOptionsType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# Operator
def BuiltinOptions(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
# Operator
def CustomOptions(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Operator
def CustomOptionsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Operator
def CustomOptionsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Operator
def CustomOptionsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
return o == 0
# Operator
def CustomOptionsFormat(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# Operator
def MutatingVariableInputs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.BoolFlags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Operator
def MutatingVariableInputsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.BoolFlags, o)
return 0
# Operator
def MutatingVariableInputsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Operator
def MutatingVariableInputsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
return o == 0
# Operator
def Intermediates(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# Operator
def IntermediatesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# Operator
def IntermediatesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Operator
def IntermediatesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
return o == 0
def OperatorStart(builder): builder.StartObject(9)
def OperatorAddOpcodeIndex(builder, opcodeIndex): builder.PrependUint32Slot(0, opcodeIndex, 0)
def OperatorAddInputs(builder, inputs): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
def OperatorStartInputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def OperatorAddOutputs(builder, outputs): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
def OperatorStartOutputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def OperatorAddBuiltinOptionsType(builder, builtinOptionsType): builder.PrependUint8Slot(3, builtinOptionsType, 0)
def OperatorAddBuiltinOptions(builder, builtinOptions): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(builtinOptions), 0)
def OperatorAddCustomOptions(builder, customOptions): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(customOptions), 0)
def OperatorStartCustomOptionsVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def OperatorAddCustomOptionsFormat(builder, customOptionsFormat): builder.PrependInt8Slot(6, customOptionsFormat, 0)
def OperatorAddMutatingVariableInputs(builder, mutatingVariableInputs): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(mutatingVariableInputs), 0)
def OperatorStartMutatingVariableInputsVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def OperatorAddIntermediates(builder, intermediates): builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(intermediates), 0)
def OperatorStartIntermediatesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def OperatorEnd(builder): return builder.EndObject()
import tflite.AbsOptions
import tflite.AddNOptions
import tflite.AddOptions
import tflite.ArgMaxOptions
import tflite.ArgMinOptions
import tflite.BatchMatMulOptions
import tflite.BatchToSpaceNDOptions
import tflite.BidirectionalSequenceLSTMOptions
import tflite.BidirectionalSequenceRNNOptions
import tflite.BroadcastToOptions
import tflite.BuiltinOptions
import tflite.CallOnceOptions
import tflite.CallOptions
import tflite.CastOptions
import tflite.ConcatEmbeddingsOptions
import tflite.ConcatenationOptions
import tflite.Conv2DOptions
import tflite.Conv3DOptions
import tflite.CosOptions
import tflite.CumsumOptions
import tflite.DensifyOptions
import tflite.DepthToSpaceOptions
import tflite.DepthwiseConv2DOptions
import tflite.DequantizeOptions
import tflite.DivOptions
import tflite.EmbeddingLookupSparseOptions
import tflite.EqualOptions
import tflite.ExpOptions
import tflite.ExpandDimsOptions
import tflite.FakeQuantOptions
import tflite.FillOptions
import tflite.FloorDivOptions
import tflite.FloorModOptions
import tflite.FullyConnectedOptions
import tflite.GatherNdOptions
import tflite.GatherOptions
import tflite.GreaterEqualOptions
import tflite.GreaterOptions
import tflite.HardSwishOptions
import tflite.HashtableFindOptions
import tflite.HashtableImportOptions
import tflite.HashtableOptions
import tflite.HashtableSizeOptions
import tflite.IfOptions
import tflite.L2NormOptions
import tflite.LSHProjectionOptions
import tflite.LSTMOptions
import tflite.LeakyReluOptions
import tflite.LessEqualOptions
import tflite.LessOptions
import tflite.LocalResponseNormalizationOptions
import tflite.LogSoftmaxOptions
import tflite.LogicalAndOptions
import tflite.LogicalNotOptions
import tflite.LogicalOrOptions
import tflite.MatrixDiagOptions
import tflite.MatrixSetDiagOptions
import tflite.MaximumMinimumOptions
import tflite.MirrorPadOptions
import tflite.MulOptions
import tflite.NegOptions
import tflite.NonMaxSuppressionV4Options
import tflite.NonMaxSuppressionV5Options
import tflite.NotEqualOptions
import tflite.OneHotOptions
import tflite.PackOptions
import tflite.PadOptions
import tflite.PadV2Options
import tflite.Pool2DOptions
import tflite.PowOptions
import tflite.QuantizeOptions
import tflite.RNNOptions
import tflite.RangeOptions
import tflite.RankOptions
import tflite.ReducerOptions
import tflite.ReshapeOptions
import tflite.ResizeBilinearOptions
import tflite.ResizeNearestNeighborOptions
import tflite.ReverseSequenceOptions
import tflite.ReverseV2Options
import tflite.Rfft2dOptions
import tflite.SVDFOptions
import tflite.ScatterNdOptions
import tflite.SegmentSumOptions
import tflite.SelectOptions
import tflite.SelectV2Options
import tflite.SequenceRNNOptions
import tflite.ShapeOptions
import tflite.SkipGramOptions
import tflite.SliceOptions
import tflite.SoftmaxOptions
import tflite.SpaceToBatchNDOptions
import tflite.SpaceToDepthOptions
import tflite.SparseToDenseOptions
import tflite.SplitOptions
import tflite.SplitVOptions
import tflite.SquareOptions
import tflite.SquaredDifferenceOptions
import tflite.SqueezeOptions
import tflite.StridedSliceOptions
import tflite.SubOptions
import tflite.TileOptions
import tflite.TopKV2Options
import tflite.TransposeConvOptions
import tflite.TransposeOptions
import tflite.UnidirectionalSequenceLSTMOptions
import tflite.UniqueOptions
import tflite.UnpackOptions
import tflite.WhereOptions
import tflite.WhileOptions
import tflite.ZerosLikeOptions
try:
from typing import List, Union
except:
pass
class OperatorT(object):
# OperatorT
def __init__(self):
self.opcodeIndex = 0 # type: int
self.inputs = None # type: List[int]
self.outputs = None # type: List[int]
self.builtinOptionsType = 0 # type: int
self.builtinOptions = None # type: Union[None, tflite.Conv2DOptions.Conv2DOptionsT, tflite.DepthwiseConv2DOptions.DepthwiseConv2DOptionsT, tflite.ConcatEmbeddingsOptions.ConcatEmbeddingsOptionsT, tflite.LSHProjectionOptions.LSHProjectionOptionsT, tflite.Pool2DOptions.Pool2DOptionsT, tflite.SVDFOptions.SVDFOptionsT, tflite.RNNOptions.RNNOptionsT, tflite.FullyConnectedOptions.FullyConnectedOptionsT, tflite.SoftmaxOptions.SoftmaxOptionsT, tflite.ConcatenationOptions.ConcatenationOptionsT, tflite.AddOptions.AddOptionsT, tflite.L2NormOptions.L2NormOptionsT, tflite.LocalResponseNormalizationOptions.LocalResponseNormalizationOptionsT, tflite.LSTMOptions.LSTMOptionsT, tflite.ResizeBilinearOptions.ResizeBilinearOptionsT, tflite.CallOptions.CallOptionsT, tflite.ReshapeOptions.ReshapeOptionsT, tflite.SkipGramOptions.SkipGramOptionsT, tflite.SpaceToDepthOptions.SpaceToDepthOptionsT, tflite.EmbeddingLookupSparseOptions.EmbeddingLookupSparseOptionsT, tflite.MulOptions.MulOptionsT, tflite.PadOptions.PadOptionsT, tflite.GatherOptions.GatherOptionsT, tflite.BatchToSpaceNDOptions.BatchToSpaceNDOptionsT, tflite.SpaceToBatchNDOptions.SpaceToBatchNDOptionsT, tflite.TransposeOptions.TransposeOptionsT, tflite.ReducerOptions.ReducerOptionsT, tflite.SubOptions.SubOptionsT, tflite.DivOptions.DivOptionsT, tflite.SqueezeOptions.SqueezeOptionsT, tflite.SequenceRNNOptions.SequenceRNNOptionsT, tflite.StridedSliceOptions.StridedSliceOptionsT, tflite.ExpOptions.ExpOptionsT, tflite.TopKV2Options.TopKV2OptionsT, tflite.SplitOptions.SplitOptionsT, tflite.LogSoftmaxOptions.LogSoftmaxOptionsT, tflite.CastOptions.CastOptionsT, tflite.DequantizeOptions.DequantizeOptionsT, tflite.MaximumMinimumOptions.MaximumMinimumOptionsT, tflite.ArgMaxOptions.ArgMaxOptionsT, tflite.LessOptions.LessOptionsT, tflite.NegOptions.NegOptionsT, tflite.PadV2Options.PadV2OptionsT, tflite.GreaterOptions.GreaterOptionsT, tflite.GreaterEqualOptions.GreaterEqualOptionsT, tflite.LessEqualOptions.LessEqualOptionsT, tflite.SelectOptions.SelectOptionsT, tflite.SliceOptions.SliceOptionsT, tflite.TransposeConvOptions.TransposeConvOptionsT, tflite.SparseToDenseOptions.SparseToDenseOptionsT, tflite.TileOptions.TileOptionsT, tflite.ExpandDimsOptions.ExpandDimsOptionsT, tflite.EqualOptions.EqualOptionsT, tflite.NotEqualOptions.NotEqualOptionsT, tflite.ShapeOptions.ShapeOptionsT, tflite.PowOptions.PowOptionsT, tflite.ArgMinOptions.ArgMinOptionsT, tflite.FakeQuantOptions.FakeQuantOptionsT, tflite.PackOptions.PackOptionsT, tflite.LogicalOrOptions.LogicalOrOptionsT, tflite.OneHotOptions.OneHotOptionsT, tflite.LogicalAndOptions.LogicalAndOptionsT, tflite.LogicalNotOptions.LogicalNotOptionsT, tflite.UnpackOptions.UnpackOptionsT, tflite.FloorDivOptions.FloorDivOptionsT, tflite.SquareOptions.SquareOptionsT, tflite.ZerosLikeOptions.ZerosLikeOptionsT, tflite.FillOptions.FillOptionsT, tflite.BidirectionalSequenceLSTMOptions.BidirectionalSequenceLSTMOptionsT, tflite.BidirectionalSequenceRNNOptions.BidirectionalSequenceRNNOptionsT, tflite.UnidirectionalSequenceLSTMOptions.UnidirectionalSequenceLSTMOptionsT, tflite.FloorModOptions.FloorModOptionsT, tflite.RangeOptions.RangeOptionsT, tflite.ResizeNearestNeighborOptions.ResizeNearestNeighborOptionsT, tflite.LeakyReluOptions.LeakyReluOptionsT, tflite.SquaredDifferenceOptions.SquaredDifferenceOptionsT, tflite.MirrorPadOptions.MirrorPadOptionsT, tflite.AbsOptions.AbsOptionsT, tflite.SplitVOptions.SplitVOptionsT, tflite.UniqueOptions.UniqueOptionsT, tflite.ReverseV2Options.ReverseV2OptionsT, tflite.AddNOptions.AddNOptionsT, tflite.GatherNdOptions.GatherNdOptionsT, tflite.CosOptions.CosOptionsT, tflite.WhereOptions.WhereOptionsT, tflite.RankOptions.RankOptionsT, tflite.ReverseSequenceOptions.ReverseSequenceOptionsT, tflite.MatrixDiagOptions.MatrixDiagOptionsT, tflite.QuantizeOptions.QuantizeOptionsT, tflite.MatrixSetDiagOptions.MatrixSetDiagOptionsT, tflite.HardSwishOptions.HardSwishOptionsT, tflite.IfOptions.IfOptionsT, tflite.WhileOptions.WhileOptionsT, tflite.DepthToSpaceOptions.DepthToSpaceOptionsT, tflite.NonMaxSuppressionV4Options.NonMaxSuppressionV4OptionsT, tflite.NonMaxSuppressionV5Options.NonMaxSuppressionV5OptionsT, tflite.ScatterNdOptions.ScatterNdOptionsT, tflite.SelectV2Options.SelectV2OptionsT, tflite.DensifyOptions.DensifyOptionsT, tflite.SegmentSumOptions.SegmentSumOptionsT, tflite.BatchMatMulOptions.BatchMatMulOptionsT, tflite.CumsumOptions.CumsumOptionsT, tflite.CallOnceOptions.CallOnceOptionsT, tflite.BroadcastToOptions.BroadcastToOptionsT, tflite.Rfft2dOptions.Rfft2dOptionsT, tflite.Conv3DOptions.Conv3DOptionsT, tflite.HashtableOptions.HashtableOptionsT, tflite.HashtableFindOptions.HashtableFindOptionsT, tflite.HashtableImportOptions.HashtableImportOptionsT, tflite.HashtableSizeOptions.HashtableSizeOptionsT]
self.customOptions = None # type: List[int]
self.customOptionsFormat = 0 # type: int
self.mutatingVariableInputs = None # type: List[bool]
self.intermediates = None # type: List[int]
@classmethod
def InitFromBuf(cls, buf, pos):
operator = Operator()
operator.Init(buf, pos)
return cls.InitFromObj(operator)
@classmethod
def InitFromObj(cls, operator):
x = OperatorT()
x._UnPack(operator)
return x
# OperatorT
def _UnPack(self, operator):
if operator is None:
return
self.opcodeIndex = operator.OpcodeIndex()
if not operator.InputsIsNone():
if np is None:
self.inputs = []
for i in range(operator.InputsLength()):
self.inputs.append(operator.Inputs(i))
else:
self.inputs = operator.InputsAsNumpy()
if not operator.OutputsIsNone():
if np is None:
self.outputs = []
for i in range(operator.OutputsLength()):
self.outputs.append(operator.Outputs(i))
else:
self.outputs = operator.OutputsAsNumpy()
self.builtinOptionsType = operator.BuiltinOptionsType()
self.builtinOptions = tflite.BuiltinOptions.BuiltinOptionsCreator(self.builtinOptionsType, operator.BuiltinOptions())
if not operator.CustomOptionsIsNone():
if np is None:
self.customOptions = []
for i in range(operator.CustomOptionsLength()):
self.customOptions.append(operator.CustomOptions(i))
else:
self.customOptions = operator.CustomOptionsAsNumpy()
self.customOptionsFormat = operator.CustomOptionsFormat()
if not operator.MutatingVariableInputsIsNone():
if np is None:
self.mutatingVariableInputs = []
for i in range(operator.MutatingVariableInputsLength()):
self.mutatingVariableInputs.append(operator.MutatingVariableInputs(i))
else:
self.mutatingVariableInputs = operator.MutatingVariableInputsAsNumpy()
if not operator.IntermediatesIsNone():
if np is None:
self.intermediates = []
for i in range(operator.IntermediatesLength()):
self.intermediates.append(operator.Intermediates(i))
else:
self.intermediates = operator.IntermediatesAsNumpy()
# OperatorT
def Pack(self, builder):
if self.inputs is not None:
if np is not None and type(self.inputs) is np.ndarray:
inputs = builder.CreateNumpyVector(self.inputs)
else:
OperatorStartInputsVector(builder, len(self.inputs))
for i in reversed(range(len(self.inputs))):
builder.PrependInt32(self.inputs[i])
inputs = builder.EndVector(len(self.inputs))
if self.outputs is not None:
if np is not None and type(self.outputs) is np.ndarray:
outputs = builder.CreateNumpyVector(self.outputs)
else:
OperatorStartOutputsVector(builder, len(self.outputs))
for i in reversed(range(len(self.outputs))):
builder.PrependInt32(self.outputs[i])
outputs = builder.EndVector(len(self.outputs))
if self.builtinOptions is not None:
builtinOptions = self.builtinOptions.Pack(builder)
if self.customOptions is not None:
if np is not None and type(self.customOptions) is np.ndarray:
customOptions = builder.CreateNumpyVector(self.customOptions)
else:
OperatorStartCustomOptionsVector(builder, len(self.customOptions))
for i in reversed(range(len(self.customOptions))):
builder.PrependUint8(self.customOptions[i])
customOptions = builder.EndVector(len(self.customOptions))
if self.mutatingVariableInputs is not None:
if np is not None and type(self.mutatingVariableInputs) is np.ndarray:
mutatingVariableInputs = builder.CreateNumpyVector(self.mutatingVariableInputs)
else:
OperatorStartMutatingVariableInputsVector(builder, len(self.mutatingVariableInputs))
for i in reversed(range(len(self.mutatingVariableInputs))):
builder.PrependBool(self.mutatingVariableInputs[i])
mutatingVariableInputs = builder.EndVector(len(self.mutatingVariableInputs))
if self.intermediates is not None:
if np is not None and type(self.intermediates) is np.ndarray:
intermediates = builder.CreateNumpyVector(self.intermediates)
else:
OperatorStartIntermediatesVector(builder, len(self.intermediates))
for i in reversed(range(len(self.intermediates))):
builder.PrependInt32(self.intermediates[i])
intermediates = builder.EndVector(len(self.intermediates))
OperatorStart(builder)
OperatorAddOpcodeIndex(builder, self.opcodeIndex)
if self.inputs is not None:
OperatorAddInputs(builder, inputs)
if self.outputs is not None:
OperatorAddOutputs(builder, outputs)
OperatorAddBuiltinOptionsType(builder, self.builtinOptionsType)
if self.builtinOptions is not None:
OperatorAddBuiltinOptions(builder, builtinOptions)
if self.customOptions is not None:
OperatorAddCustomOptions(builder, customOptions)
OperatorAddCustomOptionsFormat(builder, self.customOptionsFormat)
if self.mutatingVariableInputs is not None:
OperatorAddMutatingVariableInputs(builder, mutatingVariableInputs)
if self.intermediates is not None:
OperatorAddIntermediates(builder, intermediates)
operator = OperatorEnd(builder)
return operator
| 50.027542
| 4,779
| 0.749291
|
a30dcdbe128f284b336e14c56761f0b68915085d
| 6,719
|
py
|
Python
|
ML/detection/ssd_layers.py
|
PepSalehi/algorithms
|
1c20f57185e6324aa840ccff98e69764b4213131
|
[
"MIT"
] | 13
|
2018-08-14T08:56:59.000Z
|
2021-05-04T12:42:49.000Z
|
ML/detection/ssd_layers.py
|
PepSalehi/algorithms
|
1c20f57185e6324aa840ccff98e69764b4213131
|
[
"MIT"
] | 17
|
2019-02-22T14:43:58.000Z
|
2020-04-06T14:22:49.000Z
|
ML/detection/ssd_layers.py
|
PepSalehi/algorithms
|
1c20f57185e6324aa840ccff98e69764b4213131
|
[
"MIT"
] | 6
|
2019-02-19T02:29:39.000Z
|
2021-05-04T12:51:13.000Z
|
"""Some special pupropse layers for SSD."""
import keras.backend as K
from keras.engine.topology import InputSpec
from keras.engine.topology import Layer
import numpy as np
import tensorflow as tf
class Normalize(Layer):
"""Normalization layer as described in ParseNet paper.
# Arguments
scale: Default feature scale.
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if dim_ordering='th'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if dim_ordering='tf'.
# Output shape
Same as input
# References
http://cs.unc.edu/~wliu/papers/parsenet.pdf
#TODO
Add possibility to have one scale for all features.
"""
def __init__(self, scale, **kwargs):
if K.image_dim_ordering() == 'tf':
self.axis = 3
else:
self.axis = 1
self.scale = scale
super(Normalize, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
shape = (input_shape[self.axis],)
init_gamma = self.scale * np.ones(shape)
self.gamma = K.variable(init_gamma, name='{}_gamma'.format(self.name))
self.trainable_weights = [self.gamma]
def call(self, x, mask=None):
output = K.l2_normalize(x, self.axis)
output *= self.gamma
return output
class PriorBox(Layer):
"""Generate the prior boxes of designated sizes and aspect ratios.
# Arguments
img_size: Size of the input image as tuple (w, h).
min_size: Minimum box size in pixels.
max_size: Maximum box size in pixels.
aspect_ratios: List of aspect ratios of boxes.
flip: Whether to consider reverse aspect ratios.
variances: List of variances for x, y, w, h.
clip: Whether to clip the prior's coordinates
such that they are within [0, 1].
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if dim_ordering='th'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if dim_ordering='tf'.
# Output shape
3D tensor with shape:
(samples, num_boxes, 8)
# References
https://arxiv.org/abs/1512.02325
#TODO
Add possibility not to have variances.
Add Theano support
"""
def __init__(self, img_size, min_size, max_size=None, aspect_ratios=None,
flip=True, variances=[0.1], clip=True, **kwargs):
if K.image_dim_ordering() == 'tf':
self.waxis = 2
self.haxis = 1
else:
self.waxis = 3
self.haxis = 2
self.img_size = img_size
if min_size <= 0:
raise Exception('min_size must be positive.')
self.min_size = min_size
self.max_size = max_size
self.aspect_ratios = [1.0]
if max_size:
if max_size < min_size:
raise Exception('max_size must be greater than min_size.')
self.aspect_ratios.append(1.0)
if aspect_ratios:
for ar in aspect_ratios:
if ar in self.aspect_ratios:
continue
self.aspect_ratios.append(ar)
if flip:
self.aspect_ratios.append(1.0 / ar)
self.variances = np.array(variances)
self.clip = True
super(PriorBox, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
num_priors_ = len(self.aspect_ratios)
layer_width = input_shape[self.waxis]
layer_height = input_shape[self.haxis]
num_boxes = num_priors_ * layer_width * layer_height
return (input_shape[0], num_boxes, 8)
def call(self, x, mask=None):
if hasattr(x, '_keras_shape'):
input_shape = x._keras_shape
elif hasattr(K, 'int_shape'):
input_shape = K.int_shape(x)
layer_width = input_shape[self.waxis]
layer_height = input_shape[self.haxis]
img_width = self.img_size[0]
img_height = self.img_size[1]
# define prior boxes shapes
box_widths = []
box_heights = []
for ar in self.aspect_ratios:
if ar == 1 and len(box_widths) == 0:
box_widths.append(self.min_size)
box_heights.append(self.min_size)
elif ar == 1 and len(box_widths) > 0:
box_widths.append(np.sqrt(self.min_size * self.max_size))
box_heights.append(np.sqrt(self.min_size * self.max_size))
elif ar != 1:
box_widths.append(self.min_size * np.sqrt(ar))
box_heights.append(self.min_size / np.sqrt(ar))
box_widths = 0.5 * np.array(box_widths)
box_heights = 0.5 * np.array(box_heights)
# define centers of prior boxes
step_x = img_width / layer_width
step_y = img_height / layer_height
linx = np.linspace(0.5 * step_x, img_width - 0.5 * step_x,
layer_width)
liny = np.linspace(0.5 * step_y, img_height - 0.5 * step_y,
layer_height)
centers_x, centers_y = np.meshgrid(linx, liny)
centers_x = centers_x.reshape(-1, 1)
centers_y = centers_y.reshape(-1, 1)
# define xmin, ymin, xmax, ymax of prior boxes
num_priors_ = len(self.aspect_ratios)
prior_boxes = np.concatenate((centers_x, centers_y), axis=1)
prior_boxes = np.tile(prior_boxes, (1, 2 * num_priors_))
prior_boxes[:, ::4] -= box_widths
prior_boxes[:, 1::4] -= box_heights
prior_boxes[:, 2::4] += box_widths
prior_boxes[:, 3::4] += box_heights
prior_boxes[:, ::2] /= img_width
prior_boxes[:, 1::2] /= img_height
prior_boxes = prior_boxes.reshape(-1, 4)
if self.clip:
prior_boxes = np.minimum(np.maximum(prior_boxes, 0.0), 1.0)
# define variances
num_boxes = len(prior_boxes)
if len(self.variances) == 1:
variances = np.ones((num_boxes, 4)) * self.variances[0]
elif len(self.variances) == 4:
variances = np.tile(self.variances, (num_boxes, 1))
else:
raise Exception('Must provide one or four variances.')
prior_boxes = np.concatenate((prior_boxes, variances), axis=1)
prior_boxes_tensor = K.expand_dims(K.variable(prior_boxes), 0)
if K.backend() == 'tensorflow':
pattern = [tf.shape(x)[0], 1, 1]
prior_boxes_tensor = tf.tile(prior_boxes_tensor, pattern)
elif K.backend() == 'theano':
#TODO
pass
return prior_boxes_tensor
| 36.917582
| 78
| 0.591011
|
35073b9bb12ad83fe3d3742e5d6ac66ba65ebdcc
| 13,366
|
py
|
Python
|
src/onelogin/saml2/logout_request.py
|
amrfayad/python3-saml
|
8be65c7b5c5f53ed8e64bb2c1ccf3a447fcc7f95
|
[
"MIT"
] | null | null | null |
src/onelogin/saml2/logout_request.py
|
amrfayad/python3-saml
|
8be65c7b5c5f53ed8e64bb2c1ccf3a447fcc7f95
|
[
"MIT"
] | null | null | null |
src/onelogin/saml2/logout_request.py
|
amrfayad/python3-saml
|
8be65c7b5c5f53ed8e64bb2c1ccf3a447fcc7f95
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" OneLogin_Saml2_Logout_Request class
Copyright (c) 2010-2018 OneLogin, Inc.
MIT License
Logout Request class of OneLogin's Python Toolkit.
"""
from onelogin.saml2 import compat
from onelogin.saml2.constants import OneLogin_Saml2_Constants
from onelogin.saml2.utils import OneLogin_Saml2_Utils, OneLogin_Saml2_Error, OneLogin_Saml2_ValidationError
from onelogin.saml2.xml_templates import OneLogin_Saml2_Templates
from onelogin.saml2.xml_utils import OneLogin_Saml2_XML
class OneLogin_Saml2_Logout_Request(object):
"""
This class handles a Logout Request.
Builds a Logout Response object and validates it.
"""
def __init__(self, settings, request=None, name_id=None, session_index=None, nq=None, name_id_format=None):
"""
Constructs the Logout Request object.
:param settings: Setting data
:type settings: OneLogin_Saml2_Settings
:param request: Optional. A LogoutRequest to be loaded instead build one.
:type request: string
:param name_id: The NameID that will be set in the LogoutRequest.
:type name_id: string
:param session_index: SessionIndex that identifies the session of the user.
:type session_index: string
:param nq: IDP Name Qualifier
:type: string
:param name_id_format: The NameID Format that will be set in the LogoutRequest.
:type: string
"""
self.__settings = settings
self.__error = None
self.id = None
if request is None:
sp_data = self.__settings.get_sp_data()
idp_data = self.__settings.get_idp_data()
security = self.__settings.get_security_data()
uid = OneLogin_Saml2_Utils.generate_unique_id()
self.id = uid
issue_instant = OneLogin_Saml2_Utils.parse_time_to_SAML(OneLogin_Saml2_Utils.now())
cert = None
if security['nameIdEncrypted']:
exists_multix509enc = 'x509certMulti' in idp_data and \
'encryption' in idp_data['x509certMulti'] and \
idp_data['x509certMulti']['encryption']
if exists_multix509enc:
cert = idp_data['x509certMulti']['encryption'][0]
else:
cert = idp_data['x509cert']
if name_id is not None:
if not name_id_format and sp_data['NameIDFormat'] != OneLogin_Saml2_Constants.NAMEID_UNSPECIFIED:
name_id_format = sp_data['NameIDFormat']
else:
name_id_format = OneLogin_Saml2_Constants.NAMEID_ENTITY
sp_name_qualifier = None
if name_id_format == OneLogin_Saml2_Constants.NAMEID_ENTITY:
name_id = idp_data['entityId']
nq = None
elif nq is not None:
# We only gonna include SPNameQualifier if NameQualifier is provided
sp_name_qualifier = sp_data['entityId']
name_id_obj = OneLogin_Saml2_Utils.generate_name_id(
name_id,
sp_name_qualifier,
name_id_format,
cert,
False,
nq
)
if session_index:
session_index_str = '<saml2p:SessionIndex>%s</saml2p:SessionIndex>' % session_index
else:
session_index_str = ''
logout_request = OneLogin_Saml2_Templates.LOGOUT_REQUEST % \
{
'id': uid,
'issue_instant': issue_instant,
'single_logout_url': idp_data['singleLogoutService']['url'],
'entity_id': sp_data['entityId'],
'name_id': name_id_obj,
'session_index': session_index_str,
}
else:
logout_request = OneLogin_Saml2_Utils.decode_base64_and_inflate(request, ignore_zip=True)
self.id = self.get_id(logout_request)
self.__logout_request = compat.to_string(logout_request)
def get_request(self, deflate=True):
"""
Returns the Logout Request deflated, base64encoded
:param deflate: It makes the deflate process optional
:type: bool
:return: Logout Request maybe deflated and base64 encoded
:rtype: str object
"""
if deflate:
request = OneLogin_Saml2_Utils.deflate_and_base64_encode(self.__logout_request)
else:
request = OneLogin_Saml2_Utils.b64encode(self.__logout_request)
return request
def get_xml(self):
"""
Returns the XML that will be sent as part of the request
or that was received at the SP
:return: XML request body
:rtype: string
"""
return self.__logout_request
@staticmethod
def get_id(request):
"""
Returns the ID of the Logout Request
:param request: Logout Request Message
:type request: string|DOMDocument
:return: string ID
:rtype: str object
"""
elem = OneLogin_Saml2_XML.to_etree(request)
return elem.get('ID', None)
@staticmethod
def get_nameid_data(request, key=None):
"""
Gets the NameID Data of the the Logout Request
:param request: Logout Request Message
:type request: string|DOMDocument
:param key: The SP key
:type key: string
:return: Name ID Data (Value, Format, NameQualifier, SPNameQualifier)
:rtype: dict
"""
elem = OneLogin_Saml2_XML.to_etree(request)
name_id = None
encrypted_entries = OneLogin_Saml2_XML.query(elem, '/saml2p:LogoutRequest/saml2:EncryptedID')
if len(encrypted_entries) == 1:
if key is None:
raise OneLogin_Saml2_Error(
'Private Key is required in order to decrypt the NameID, check settings',
OneLogin_Saml2_Error.PRIVATE_KEY_NOT_FOUND
)
encrypted_data_nodes = OneLogin_Saml2_XML.query(elem, '/saml2p:LogoutRequest/saml2:EncryptedID/xenc:EncryptedData')
if len(encrypted_data_nodes) == 1:
encrypted_data = encrypted_data_nodes[0]
name_id = OneLogin_Saml2_Utils.decrypt_element(encrypted_data, key)
else:
entries = OneLogin_Saml2_XML.query(elem, '/saml2p:LogoutRequest/saml2:NameID')
if len(entries) == 1:
name_id = entries[0]
if name_id is None:
raise OneLogin_Saml2_ValidationError(
'NameID not found in the Logout Request',
OneLogin_Saml2_ValidationError.NO_NAMEID
)
name_id_data = {
'Value': OneLogin_Saml2_XML.element_text(name_id)
}
for attr in ['Format', 'SPNameQualifier', 'NameQualifier']:
if attr in name_id.attrib:
name_id_data[attr] = name_id.attrib[attr]
return name_id_data
@staticmethod
def get_nameid(request, key=None):
"""
Gets the NameID of the Logout Request Message
:param request: Logout Request Message
:type request: string|DOMDocument
:param key: The SP key
:type key: string
:return: Name ID Value
:rtype: string
"""
name_id = OneLogin_Saml2_Logout_Request.get_nameid_data(request, key)
return name_id['Value']
@staticmethod
def get_nameid_format(request, key=None):
"""
Gets the NameID Format of the Logout Request Message
:param request: Logout Request Message
:type request: string|DOMDocument
:param key: The SP key
:type key: string
:return: Name ID Format
:rtype: string
"""
name_id_format = None
name_id_data = OneLogin_Saml2_Logout_Request.get_nameid_data(request, key)
if name_id_data and 'Format' in name_id_data.keys():
name_id_format = name_id_data['Format']
return name_id_format
@staticmethod
def get_issuer(request):
"""
Gets the Issuer of the Logout Request Message
:param request: Logout Request Message
:type request: string|DOMDocument
:return: The Issuer
:rtype: string
"""
elem = OneLogin_Saml2_XML.to_etree(request)
issuer = None
issuer_nodes = OneLogin_Saml2_XML.query(elem, '/saml2p:LogoutRequest/saml2:Issuer')
if len(issuer_nodes) == 1:
issuer = OneLogin_Saml2_XML.element_text(issuer_nodes[0])
return issuer
@staticmethod
def get_session_indexes(request):
"""
Gets the SessionIndexes from the Logout Request
:param request: Logout Request Message
:type request: string|DOMDocument
:return: The SessionIndex value
:rtype: list
"""
elem = OneLogin_Saml2_XML.to_etree(request)
session_indexes = []
session_index_nodes = OneLogin_Saml2_XML.query(elem, '/saml2p:LogoutRequest/saml2p:SessionIndex')
for session_index_node in session_index_nodes:
session_indexes.append(OneLogin_Saml2_XML.element_text(session_index_node))
return session_indexes
def is_valid(self, request_data, raise_exceptions=False):
"""
Checks if the Logout Request received is valid
:param request_data: Request Data
:type request_data: dict
:param raise_exceptions: Whether to return false on failure or raise an exception
:type raise_exceptions: Boolean
:return: If the Logout Request is or not valid
:rtype: boolean
"""
self.__error = None
try:
root = OneLogin_Saml2_XML.to_etree(self.__logout_request)
idp_data = self.__settings.get_idp_data()
idp_entity_id = idp_data['entityId']
get_data = ('get_data' in request_data and request_data['get_data']) or dict()
if self.__settings.is_strict():
res = OneLogin_Saml2_XML.validate_xml(root, 'saml-schema-protocol-2.0.xsd', self.__settings.is_debug_active())
if isinstance(res, str):
raise OneLogin_Saml2_ValidationError(
'Invalid SAML Logout Request. Not match the saml-schema-protocol-2.0.xsd',
OneLogin_Saml2_ValidationError.INVALID_XML_FORMAT
)
security = self.__settings.get_security_data()
current_url = OneLogin_Saml2_Utils.get_self_url_no_query(request_data)
# Check NotOnOrAfter
if root.get('NotOnOrAfter', None):
na = OneLogin_Saml2_Utils.parse_SAML_to_time(root.get('NotOnOrAfter'))
if na <= OneLogin_Saml2_Utils.now():
raise OneLogin_Saml2_ValidationError(
'Could not validate timestamp: expired. Check system clock.)',
OneLogin_Saml2_ValidationError.RESPONSE_EXPIRED
)
# Check destination
if root.get('Destination', None):
destination = root.get('Destination')
if destination != '':
if current_url not in destination:
raise OneLogin_Saml2_ValidationError(
'The LogoutRequest was received at '
'%(currentURL)s instead of %(destination)s' %
{
'currentURL': current_url,
'destination': destination,
},
OneLogin_Saml2_ValidationError.WRONG_DESTINATION
)
# Check issuer
issuer = OneLogin_Saml2_Logout_Request.get_issuer(root)
if issuer is not None and issuer != idp_entity_id:
raise OneLogin_Saml2_ValidationError(
'Invalid issuer in the Logout Request (expected %(idpEntityId)s, got %(issuer)s)' %
{
'idpEntityId': idp_entity_id,
'issuer': issuer
},
OneLogin_Saml2_ValidationError.WRONG_ISSUER
)
if security['wantMessagesSigned']:
if 'Signature' not in get_data:
raise OneLogin_Saml2_ValidationError(
'The Message of the Logout Request is not signed and the SP require it',
OneLogin_Saml2_ValidationError.NO_SIGNED_MESSAGE
)
return True
except Exception as err:
# pylint: disable=R0801
self.__error = str(err)
debug = self.__settings.is_debug_active()
if debug:
print(err)
if raise_exceptions:
raise
return False
def get_error(self):
"""
After executing a validation process, if it fails this method returns the cause
"""
return self.__error
| 37.544944
| 127
| 0.590453
|
2edc498e5b11bee7a9c18d1fdec86d201e5dcfa3
| 363
|
py
|
Python
|
items.py
|
MihaiLai/spider
|
4a3727a722feee1c38e98dc0dab65b92fe971a7f
|
[
"MIT"
] | null | null | null |
items.py
|
MihaiLai/spider
|
4a3727a722feee1c38e98dc0dab65b92fe971a7f
|
[
"MIT"
] | null | null | null |
items.py
|
MihaiLai/spider
|
4a3727a722feee1c38e98dc0dab65b92fe971a7f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class CarItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
brand = scrapy.Field()
car_title = scrapy.Field()
car_content = scrapy.Field()
| 22.6875
| 51
| 0.677686
|
3902fd6e360491e321eecdea8a92e7b88c680769
| 788
|
py
|
Python
|
setup.py
|
HDKidd/hdk-pkg-cri
|
a0370aad531ad47b36f0a0fd91c543b9d499bacd
|
[
"MIT"
] | null | null | null |
setup.py
|
HDKidd/hdk-pkg-cri
|
a0370aad531ad47b36f0a0fd91c543b9d499bacd
|
[
"MIT"
] | null | null | null |
setup.py
|
HDKidd/hdk-pkg-cri
|
a0370aad531ad47b36f0a0fd91c543b9d499bacd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: He Dekun
"""
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name = "hdk-pkg-cri", # Replace with your own username
version = "0.0.8",
author = "He Dekun",
author_email = "hede0001@e.ntu.edu.sg",
description = "A small example package for CRI PMT test.",
long_description = long_description,
long_description_content_type = "text/markdown",
url = "https://github.com/HDKidd/hdk-pkg-cri",
packages = setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 28.142857
| 62
| 0.637056
|
7c4c824f9577d4be4885c4a74836f1f40eb527c5
| 3,271
|
py
|
Python
|
config/settings.py
|
iqbalcrat/django-location-based-homepage
|
eabcc736931e1be7d2eb20aa2cc1c13ef88be78c
|
[
"MIT"
] | null | null | null |
config/settings.py
|
iqbalcrat/django-location-based-homepage
|
eabcc736931e1be7d2eb20aa2cc1c13ef88be78c
|
[
"MIT"
] | null | null | null |
config/settings.py
|
iqbalcrat/django-location-based-homepage
|
eabcc736931e1be7d2eb20aa2cc1c13ef88be78c
|
[
"MIT"
] | null | null | null |
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-^1uc&l6p0eg_i&=u(iplgn6rzogt3hqm1r!nsto9a!vpzvppw-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.home.apps.HomeConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.755906
| 91
| 0.702537
|
390c5a41d61e91aec335278fa9dade79519d794c
| 113
|
py
|
Python
|
user/admin.py
|
ssoumyajit/imgapi2
|
b2129f1d35d55e093a3d96272686ac25ea2cf7bb
|
[
"MIT"
] | null | null | null |
user/admin.py
|
ssoumyajit/imgapi2
|
b2129f1d35d55e093a3d96272686ac25ea2cf7bb
|
[
"MIT"
] | null | null | null |
user/admin.py
|
ssoumyajit/imgapi2
|
b2129f1d35d55e093a3d96272686ac25ea2cf7bb
|
[
"MIT"
] | 1
|
2021-06-26T21:16:56.000Z
|
2021-06-26T21:16:56.000Z
|
from django.contrib import admin
from .models import User
# Register your models here.
admin.site.register(User)
| 22.6
| 32
| 0.80531
|
bc30878909df9dc33f9d019ade4a4a180bf67c62
| 3,981
|
py
|
Python
|
fawkes/align_face.py
|
baajur/fawkes
|
2b08b3ec8ec98e9f0f13aa9c345f48f24ad40deb
|
[
"BSD-3-Clause"
] | 4
|
2021-01-26T09:21:12.000Z
|
2021-02-15T15:59:38.000Z
|
fawkes/align_face.py
|
tweetz0r/fawkes
|
a0708ca9c8fa4ac3acca6ae5e8ff6219da33d5c6
|
[
"BSD-3-Clause"
] | null | null | null |
fawkes/align_face.py
|
tweetz0r/fawkes
|
a0708ca9c8fa4ac3acca6ae5e8ff6219da33d5c6
|
[
"BSD-3-Clause"
] | 1
|
2020-08-04T19:25:53.000Z
|
2020-08-04T19:25:53.000Z
|
"""Performs face alignment and stores face thumbnails in the output directory."""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" Tensorflow implementation of the face detection / alignment algorithm found at
https://github.com/kpzhang93/MTCNN_face_detection_alignment
"""
import numpy as np
from fawkes import create_mtcnn, run_detect_face
np_load_old = np.load
np.load = lambda *a, **k: np_load_old(*a, allow_pickle=True, **k)
def to_rgb(img):
w, h = img.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
return ret
def aligner(sess):
pnet, rnet, onet = create_mtcnn(sess, None)
return [pnet, rnet, onet]
def align(orig_img, aligner, margin=0.8, detect_multiple_faces=True):
pnet, rnet, onet = aligner
minsize = 25 # minimum size of face
threshold = [0.85, 0.85, 0.85] # three steps's threshold
factor = 0.709 # scale factor
if orig_img.ndim < 2:
return None
if orig_img.ndim == 2:
orig_img = to_rgb(orig_img)
orig_img = orig_img[:, :, 0:3]
bounding_boxes, _ = run_detect_face(orig_img, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]
if nrof_faces > 0:
det = bounding_boxes[:, 0:4]
det_arr = []
img_size = np.asarray(orig_img.shape)[0:2]
if nrof_faces > 1:
margin = margin / 1.5
if detect_multiple_faces:
for i in range(nrof_faces):
det_arr.append(np.squeeze(det[i]))
else:
bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] - det[:, 1])
img_center = img_size / 2
offsets = np.vstack([(det[:, 0] + det[:, 2]) / 2 - img_center[1],
(det[:, 1] + det[:, 3]) / 2 - img_center[0]])
offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
index = np.argmax(bounding_box_size - offset_dist_squared * 2.0) # some extra weight on the centering
det_arr.append(det[index, :])
else:
det_arr.append(np.squeeze(det))
cropped_arr = []
bounding_boxes_arr = []
for i, det in enumerate(det_arr):
det = np.squeeze(det)
bb = np.zeros(4, dtype=np.int32)
side_1 = int((det[2] - det[0]) * margin)
side_2 = int((det[3] - det[1]) * margin)
bb[0] = np.maximum(det[0] - side_1 / 2, 0)
bb[1] = np.maximum(det[1] - side_1 / 2, 0)
bb[2] = np.minimum(det[2] + side_2 / 2, img_size[1])
bb[3] = np.minimum(det[3] + side_2 / 2, img_size[0])
cropped = orig_img[bb[1]:bb[3], bb[0]:bb[2], :]
cropped_arr.append(cropped)
bounding_boxes_arr.append([bb[0], bb[1], bb[2], bb[3]])
return cropped_arr, bounding_boxes_arr
else:
return None
| 40.212121
| 118
| 0.622205
|
4ab5d5e33606a9d30097e947f57497f50566b802
| 414
|
py
|
Python
|
{{cookiecutter.project_slug}}/backend/app/db/tipo_acordo/schemas.py
|
souzjfe/conectar
|
0603e955394765f3fc1a01bbd902be695bc44cba
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_slug}}/backend/app/db/tipo_acordo/schemas.py
|
souzjfe/conectar
|
0603e955394765f3fc1a01bbd902be695bc44cba
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_slug}}/backend/app/db/tipo_acordo/schemas.py
|
souzjfe/conectar
|
0603e955394765f3fc1a01bbd902be695bc44cba
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel
import typing as t
class TipoAcordoBase(BaseModel):
descricao: str
class TipoAcordoOut(TipoAcordoBase):
pass
class TipoAcordoCreate(TipoAcordoBase):
pass
class TipoAcordoEdit(TipoAcordoBase):
descricao: t.Optional[str] = None
class Config:
orm_mode = True
class TipoAcordo(TipoAcordoBase):
id: int
class Config:
orm_mode = True
| 14.785714
| 39
| 0.71256
|
07c70758bd09e7aec410833fec6782f32bd6f902
| 13,970
|
py
|
Python
|
crystal4D/layers/convQuad.py
|
AI-ML-4DSTEM/crystal4D
|
03eed92c16d8e96625f1be71d4f81397ae474b66
|
[
"MIT"
] | null | null | null |
crystal4D/layers/convQuad.py
|
AI-ML-4DSTEM/crystal4D
|
03eed92c16d8e96625f1be71d4f81397ae474b66
|
[
"MIT"
] | null | null | null |
crystal4D/layers/convQuad.py
|
AI-ML-4DSTEM/crystal4D
|
03eed92c16d8e96625f1be71d4f81397ae474b66
|
[
"MIT"
] | null | null | null |
import functools
import six
from tensorflow import reshape
from tensorflow import einsum
from tensorflow import convert_to_tensor
from tensorflow.math import reduce_sum
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
# imports for backwards namespace compatibility
# pylint: disable=unused-import
# pylint: enable=unused-import
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.util.tf_export import keras_export
# pylint: disable=g-classes-have-attributes
from tensorflow.image import extract_patches
class ConvQuad2D(Layer):
def __init__(self,
rank,
filters,
kernel_size,
strides=(1,1),
padding='valid',
data_format=None,
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
conv_op=None,
**kwargs):
super(ConvQuad2D, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
self.rank = 2
if isinstance(filters, float):
filters = int(filters)
self.filters = filters
self.groups = groups or 1
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=self.rank + 2)
self._validate_init()
self._is_causal = self.padding == 'causal'
self._channels_first = self.data_format == 'channels_first'
self._tf_data_format = conv_utils.convert_data_format(
self.data_format, self.rank + 2)
#Quadtraic CNN initialization
if isinstance(kernel_size, int):
self.quad_kernel_size = conv_utils.normalize_tuple(
(kernel_size**2), rank, 'quad_kernel_size')
else:
self.quad_kernel_size = conv_utils.normalize_tuple(
(kernel_size[0]**2,kernel_size[1]**2), rank, 'quad_kernel_size')
#Quadratic CNN using Volterra kernel theory
def _volterra_conv(self, inputs, W, input_dim, quad_ksize, quad_strides, padding):
input_patches = extract_patches(inputs,
sizes= quad_ksize,
strides=quad_strides,
rates=quad_strides,
padding=padding)
input_patches_shape = (-1, 250, 250, self.kernel_size[0]*self.kernel_size[1], input_dim)
#print(input_patches_shape)
input_patches = array_ops.reshape(input_patches, input_patches_shape)
V = einsum('abcid,abcjd,dijo->abcdo', input_patches, input_patches, W)
return reduce_sum(V, 3)
##############################################
def _validate_init(self):
if self.filters is not None and self.filters % self.groups != 0:
raise ValueError(
'The number of filters must be evenly divisible by the number of '
'groups. Received: groups={}, filters={}'.format(
self.groups, self.filters))
if not all(self.kernel_size):
raise ValueError('The argument `kernel_size` cannot contain 0(s). '
'Received: %s' % (self.kernel_size,))
if (self.padding == 'causal' and not isinstance(self,(Conv1D, SeparableConv1D))):
raise ValueError('Causal padding is only supported for `Conv1D`'
'and `SeparableConv1D`.')
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
if input_channel % self.groups != 0:
raise ValueError(
'The number of input channels must be evenly divisible by the number '
'of groups. Received groups={}, but the input has {} channels '
'(full input shape is {}).'.format(self.groups, input_channel,
input_shape))
kernel_shape = self.kernel_size + (input_channel // self.groups, self.filters)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
#Volterra kernel initialize
self.quad_kernel_shape = (input_channel // self.groups,) + self.quad_kernel_size + (self.filters,)
self.quad_kernel = self.add_weight(
name='quad_kernel',
shape=self.quad_kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
channel_axis = self._get_channel_axis()
self.input_spec = InputSpec(min_ndim=self.rank + 2,
axes={channel_axis: input_channel})
# Convert Keras formats to TF native formats.
if self.padding == 'causal':
tf_padding = 'VALID' # Causal padding handled in `call`.
elif isinstance(self.padding, six.string_types):
tf_padding = self.padding.upper()
else:
tf_padding = self.padding
tf_dilations = list(self.dilation_rate)
tf_strides = list(self.strides)
tf_op_name = self.__class__.__name__
if tf_op_name == 'Conv1D':
tf_op_name = 'conv1d' # Backwards compat.
self._convolution_op = functools.partial(
nn_ops.convolution_v2,
strides=tf_strides,
padding=tf_padding,
dilations=tf_dilations,
data_format=self._tf_data_format,
name=tf_op_name)
#Volterra quad CNN
tf_quad_strides = list((1,) + self.strides + (1,))
tf_quad_padding = tf_padding
tf_quad_ksize = list((1,) + self.kernel_size + (1,))
self._quad_convolution_op = functools.partial(
self._volterra_conv,
input_dim = input_channel,
quad_ksize = tf_quad_ksize,
quad_strides=tf_quad_strides,
padding=tf_quad_padding)
self.built = True
def call(self, inputs):
if self._is_causal: # Apply causal padding to inputs for Conv1D.
inputs = array_ops.pad(inputs, self._compute_causal_padding(inputs))
outputs = self._quad_convolution_op(inputs, self.quad_kernel) + self._convolution_op(inputs, self.kernel)
if self.use_bias:
output_rank = outputs.shape.rank
if self.rank == 1 and self._channels_first:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
else:
# Handle multiple batch dimensions.
if output_rank is not None and output_rank > 2 + self.rank:
def _apply_fn(o):
return nn.bias_add(o, self.bias, data_format=self._tf_data_format)
outputs = nn_ops.squeeze_batch_dims(outputs, _apply_fn, inner_rank=self.rank + 1)
else:
outputs = nn.bias_add(outputs, self.bias, data_format=self._tf_data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def _spatial_output_shape(self, spatial_input_shape):
return [
conv_utils.conv_output_length(
length,
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
for i, length in enumerate(spatial_input_shape)
]
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
batch_rank = len(input_shape) - self.rank - 1
if self.data_format == 'channels_last':
return tensor_shape.TensorShape(
input_shape[:batch_rank]
+ self._spatial_output_shape(input_shape[batch_rank:-1])
+ [self.filters])
else:
return tensor_shape.TensorShape(
input_shape[:batch_rank] + [self.filters] +
self._spatial_output_shape(input_shape[batch_rank + 1:]))
def _recreate_conv_op(self, inputs): # pylint: disable=unused-argument
return False
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'quad_kernel_size':
self.quad_kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'dilation_rate':
self.dilation_rate,
'groups':
self.groups,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint)
}
base_config = super(ConvQuad2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _compute_causal_padding(self, inputs):
"""Calculates padding for 'causal' option for 1-d conv layers."""
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if getattr(inputs.shape, 'ndims', None) is None:
batch_rank = 1
else:
batch_rank = len(inputs.shape) - 2
if self.data_format == 'channels_last':
causal_padding = [[0, 0]] * batch_rank + [[left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0]] * batch_rank + [[0, 0], [left_pad, 0]]
return causal_padding
def _get_channel_axis(self):
if self.data_format == 'channels_first':
return -1 - self.rank
else:
return -1
def _get_input_channel(self, input_shape):
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
return int(input_shape[channel_axis])
def _get_padding_op(self):
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
return op_padding
| 40.492754
| 113
| 0.589549
|
94274f5554c762abe4309070d37cd14fb47e8b46
| 4,127
|
py
|
Python
|
scripts/update_dreqs/update_dreqs_0270.py
|
jonseddon/primavera-dmt
|
1239044e37f070b925a3d06db68351f285df780c
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/update_dreqs/update_dreqs_0270.py
|
jonseddon/primavera-dmt
|
1239044e37f070b925a3d06db68351f285df780c
|
[
"BSD-3-Clause"
] | 49
|
2018-11-14T17:00:03.000Z
|
2021-12-20T11:04:22.000Z
|
scripts/update_dreqs/update_dreqs_0270.py
|
jonseddon/primavera-dmt
|
1239044e37f070b925a3d06db68351f285df780c
|
[
"BSD-3-Clause"
] | 2
|
2018-07-04T10:58:43.000Z
|
2018-09-29T14:55:08.000Z
|
#!/usr/bin/env python
"""
update_dreqs_0270.py
From the Cylc db identify jobs that failed due to running out of time. For
these, delete the data from disk and create a retrieval request to restore it.
"""
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import argparse
import datetime
import logging.config
import sqlite3
import sys
import django
django.setup()
from django.contrib.auth.models import User
from pdata_app.models import DataRequest, RetrievalRequest
from pdata_app.utils.common import delete_files
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Add additional data requests')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
parser.add_argument('-c', '--create', help='Create the retrieval request '
'rather than just displaying '
'the data volums',
action='store_true')
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
Example task_name:
crepp_submission_HadGEM3-GC31-LL_hist-1950_r1i7p1f1_Omon_so
"""
if args.create:
start_year = 1948
end_year = 2051
jon = User.objects.get(username='jseddon')
rr = RetrievalRequest.objects.create(requester=jon, start_year=start_year,
end_year=end_year)
time_zone = datetime.timezone(datetime.timedelta())
rr.date_created = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=time_zone)
rr.save()
conn = sqlite3.connect('/home/users/jseddon/cylc-run/db_u-bs020.db')
c = conn.cursor()
for task_name in c.execute('SELECT "name" FROM "task_jobs" WHERE '
'"name" LIKE "crepp_submission_%" AND '
'"run_signal" IS "SIGUSR2";'):
model, expt, var_label, table, var_name = task_name[0].split('_')[2:]
dreq = DataRequest.objects.get(
climate_model__short_name=model,
experiment__short_name=expt,
rip_code=var_label,
variable_request__table_name=table,
variable_request__cmor_name=var_name
)
logger.debug(f'{task_name[0]} '
f'{dreq.datafile_set.filter(online=True).count()}')
if args.create:
try:
delete_files(dreq.datafile_set.all(),
'/gws/nopw/j04/primavera5/stream1')
except Exception as exc:
logger.error(str(exc))
rr.data_request.add(dreq)
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
| 30.798507
| 82
| 0.578386
|
d3d87b11d43903546c263b237a849f0727cf1098
| 4,601
|
py
|
Python
|
truecase/Trainer.py
|
Brucewuzhang/truecase
|
dd377de45764321072e4b7789d67a14ecf037742
|
[
"Apache-2.0"
] | null | null | null |
truecase/Trainer.py
|
Brucewuzhang/truecase
|
dd377de45764321072e4b7789d67a14ecf037742
|
[
"Apache-2.0"
] | null | null | null |
truecase/Trainer.py
|
Brucewuzhang/truecase
|
dd377de45764321072e4b7789d67a14ecf037742
|
[
"Apache-2.0"
] | null | null | null |
import pickle
import nltk
class Trainer:
def __init__(self):
self.uni_dist = nltk.FreqDist()
self.backward_bi_dist = nltk.FreqDist()
self.forward_bi_dist = nltk.FreqDist()
self.trigram_dist = nltk.FreqDist()
self.word_casing_lookup = {}
def __function_one(self, sentence, word, word_idx, word_lower):
try:
if (word_lower in self.word_casing_lookup
and len(self.word_casing_lookup[word_lower]) >= 2):
# Only if there are multiple options
prev_word = sentence[word_idx - 1]
self.backward_bi_dist[prev_word + "_" + word] += 1
next_word = sentence[word_idx + 1].lower()
self.forward_bi_dist[word + "_" + next_word] += 1
except IndexError:
pass
def __function_two(self, sentence, word, word_idx):
try:
if word_idx - 1 < 0:
return
prev_word = sentence[word_idx - 1]
cur_word = sentence[word_idx]
cur_word_lower = word.lower()
next_word_lower = sentence[word_idx + 1].lower()
if (cur_word_lower in self.word_casing_lookup
and len(self.word_casing_lookup[cur_word_lower]) >= 2):
# Only if there are multiple options
self.trigram_dist[prev_word + "_" + cur_word + "_" +
next_word_lower] += 1
except IndexError:
pass
def get_unigram_casing_stats(self, corpus):
for sentence in corpus:
if not self.check_sentence_sanity(sentence):
continue
for _, word in enumerate(sentence):
self.uni_dist[word] += 1
word_lower = word.lower()
if word_lower not in self.word_casing_lookup:
self.word_casing_lookup[word_lower] = set()
self.word_casing_lookup[word_lower].add(word)
def train(self, corpus):
# first pass to get unigram and casing statistics
self.get_unigram_casing_stats(corpus)
# second pass to get bi-gram tri-gram statistics
for sentence in corpus:
if not self.check_sentence_sanity(sentence) or len(sentence) == 1:
continue
if self.get_casing(sentence[0]) == "initialUpper" and sentence[1] not in '°#$%&' and \
self.get_casing(sentence[1]) not in ["allUpper", "numeric", "initialUpper"]:
# first word and initialUpper not ner, count only unigram
sentence = sentence[1:]
for word_idx, word in enumerate(sentence):
word_lower = word.lower()
# todo: count bigram and trigram
self.__function_one(sentence, word, word_idx, word_lower)
self.__function_two(sentence, word, word_idx)
def save_to_file(self, file_path):
pickle_dict = {
"uni_dist": self.uni_dist,
"backward_bi_dist": self.backward_bi_dist,
"forward_bi_dist": self.forward_bi_dist,
"trigram_dist": self.trigram_dist,
"word_casing_lookup": self.word_casing_lookup,
}
with open(file_path, "wb") as fp:
pickle.dump(pickle_dict, fp)
print("Model saved to " + file_path)
@staticmethod
def get_casing(word):
""" Returns the casing of a word """
if len(word) == 0:
return "other"
elif word.isdigit(): # Is a digit
return "numeric"
elif word.islower(): # All lower case
return "allLower"
elif word.isupper(): # All upper case
return "allUpper"
# is a title, initial char upper, then all lower
elif word[0].isupper():
return "initialUpper"
return "other"
def check_sentence_sanity(self, sentence):
""" Checks the sanity of the sentence.
If the sentence is for example all uppercase, it is rejected """
case_dist = nltk.FreqDist()
for token in sentence:
case_dist[self.get_casing(token)] += 1
if case_dist.most_common(1)[0][0] != "allLower":
return False
return True
if __name__ == "__main__":
corpus = (nltk.corpus.brown.sents() + nltk.corpus.reuters.sents() +
nltk.corpus.semcor.sents() + nltk.corpus.conll2000.sents() +
nltk.corpus.state_union.sents())
trainer = Trainer()
trainer.train(corpus)
trainer.save_to_file("data/english.dist")
| 34.856061
| 98
| 0.575962
|
c7340c9b05dc42b36c69f5e6dbe799a01745cad3
| 8,873
|
py
|
Python
|
fastmri/pl_modules/mri_module.py
|
kapoor1992/fastMRI
|
6b0af94663faa55a2dd901a6a5cbb7d7b5f4cf6d
|
[
"MIT"
] | null | null | null |
fastmri/pl_modules/mri_module.py
|
kapoor1992/fastMRI
|
6b0af94663faa55a2dd901a6a5cbb7d7b5f4cf6d
|
[
"MIT"
] | null | null | null |
fastmri/pl_modules/mri_module.py
|
kapoor1992/fastMRI
|
6b0af94663faa55a2dd901a6a5cbb7d7b5f4cf6d
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import pathlib
from argparse import ArgumentParser
from collections import defaultdict
import fastmri
import numpy as np
import pytorch_lightning as pl
import torch
from fastmri import evaluate
class DistributedMetricSum(pl.metrics.Metric):
def __init__(self, dist_sync_on_step=True):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.add_state("quantity", default=torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, batch: torch.Tensor): # type: ignore
self.quantity += batch
def compute(self):
return self.quantity
class MriModule(pl.LightningModule):
"""
Abstract super class for deep larning reconstruction models.
This is a subclass of the LightningModule class from pytorch_lightning,
with some additional functionality specific to fastMRI:
- Evaluating reconstructions
- Visualization
To implement a new reconstruction model, inherit from this class and
implement the following methods:
- training_step, validation_step, test_step:
Define what happens in one step of training, validation, and
testing, respectively
- configure_optimizers:
Create and return the optimizers
Other methods from LightningModule can be overridden as needed.
"""
def __init__(self, num_log_images: int = 16):
"""
Args:
num_log_images: Number of images to log. Defaults to 16.
"""
super().__init__()
self.num_log_images = num_log_images
self.val_log_indices = None
self.NMSE = DistributedMetricSum()
self.SSIM = DistributedMetricSum()
self.PSNR = DistributedMetricSum()
self.ValLoss = DistributedMetricSum()
self.TotExamples = DistributedMetricSum()
self.TotSliceExamples = DistributedMetricSum()
def validation_step_end(self, val_logs):
# check inputs
for k in (
"batch_idx",
"fname",
"slice_num",
"max_value",
"output",
"target",
"val_loss",
):
if k not in val_logs.keys():
raise RuntimeError(
f"Expected key {k} in dict returned by validation_step."
)
if val_logs["output"].ndim == 2:
val_logs["output"] = val_logs["output"].unsqueeze(0)
elif val_logs["output"].ndim != 3:
raise RuntimeError("Unexpected output size from validation_step.")
if val_logs["target"].ndim == 2:
val_logs["target"] = val_logs["target"].unsqueeze(0)
elif val_logs["target"].ndim != 3:
raise RuntimeError("Unexpected output size from validation_step.")
# pick a set of images to log if we don't have one already
if self.val_log_indices is None:
self.val_log_indices = list(
np.random.permutation(len(self.trainer.val_dataloaders[0]))[
: self.num_log_images
]
)
# log images to tensorboard
if isinstance(val_logs["batch_idx"], int):
batch_indices = [val_logs["batch_idx"]]
else:
batch_indices = val_logs["batch_idx"]
for i, batch_idx in enumerate(batch_indices):
if batch_idx in self.val_log_indices:
key = f"val_images_idx_{batch_idx}"
target = val_logs["target"][i].unsqueeze(0)
output = val_logs["output"][i].unsqueeze(0)
error = torch.abs(target - output)
output = output / output.max()
target = target / target.max()
error = error / error.max()
self.logger.experiment.add_image(
f"{key}/target", target, global_step=self.global_step
)
self.logger.experiment.add_image(
f"{key}/reconstruction", output, global_step=self.global_step
)
self.logger.experiment.add_image(
f"{key}/error", error, global_step=self.global_step
)
# compute evaluation metrics
nmse_vals = defaultdict(dict)
ssim_vals = defaultdict(dict)
psnr_vals = defaultdict(dict)
for i, fname in enumerate(val_logs["fname"]):
slice_num = int(val_logs["slice_num"][i].cpu())
maxval = val_logs["max_value"][i].cpu().numpy()
output = val_logs["output"][i].cpu().numpy()
target = val_logs["target"][i].cpu().numpy()
nmse_vals[fname][slice_num] = torch.tensor(
evaluate.nmse(target, output)
).view(1)
ssim_vals[fname][slice_num] = torch.tensor(
evaluate.ssim(target, output, maxval=maxval)
).view(1)
psnr_vals[fname][slice_num] = torch.tensor(
evaluate.psnr(target, output)
).view(1)
return {
"val_loss": val_logs["val_loss"],
"nmse_vals": nmse_vals,
"ssim_vals": ssim_vals,
"psnr_vals": psnr_vals,
}
def validation_epoch_end(self, val_logs):
# aggregate losses
losses = []
nmse_vals = defaultdict(dict)
ssim_vals = defaultdict(dict)
psnr_vals = defaultdict(dict)
# use dict updates to handle duplicate slices
for val_log in val_logs:
losses.append(val_log["val_loss"].view(-1))
for k in val_log["nmse_vals"].keys():
nmse_vals[k].update(val_log["nmse_vals"][k])
for k in val_log["ssim_vals"].keys():
ssim_vals[k].update(val_log["ssim_vals"][k])
for k in val_log["psnr_vals"].keys():
psnr_vals[k].update(val_log["psnr_vals"][k])
# check to make sure we have all files in all metrics
assert nmse_vals.keys() == ssim_vals.keys() == psnr_vals.keys()
# apply means across image volumes
metrics = {"nmse": 0, "ssim": 0, "psnr": 0}
local_examples = 0
for fname in nmse_vals.keys():
local_examples = local_examples + 1
metrics["nmse"] = metrics["nmse"] + torch.mean(
torch.cat([v.view(-1) for _, v in nmse_vals[fname].items()])
)
metrics["ssim"] = metrics["ssim"] + torch.mean(
torch.cat([v.view(-1) for _, v in ssim_vals[fname].items()])
)
metrics["psnr"] = metrics["psnr"] + torch.mean(
torch.cat([v.view(-1) for _, v in psnr_vals[fname].items()])
)
# reduce across ddp via sum
metrics["nmse"] = self.NMSE(metrics["nmse"])
metrics["ssim"] = self.SSIM(metrics["ssim"])
metrics["psnr"] = self.PSNR(metrics["psnr"])
tot_examples = self.TotExamples(torch.tensor(local_examples))
val_loss = self.ValLoss(torch.sum(torch.cat(losses)))
tot_slice_examples = self.TotSliceExamples(
torch.tensor(len(losses), dtype=torch.float)
)
self.log("val_loss", val_loss / tot_slice_examples, prog_bar=True)
for metric, value in metrics.items():
self.log(f"val_metrics/{metric}", value / tot_examples)
def test_epoch_end(self, test_logs):
outputs = defaultdict(dict)
# use dicts for aggregation to handle duplicate slices in ddp mode
for log in test_logs:
for i, (fname, slice_num) in enumerate(zip(log["fname"], log["slice"])):
outputs[fname][int(slice_num.cpu())] = log["output"][i]
# stack all the slices for each file
for fname in outputs:
outputs[fname] = np.stack(
[out for _, out in sorted(outputs[fname].items())]
)
# pull the default_root_dir if we have a trainer, otherwise save to cwd
if hasattr(self, "trainer"):
save_path = pathlib.Path(self.trainer.default_root_dir) / "reconstructions"
else:
save_path = pathlib.Path.cwd() / "reconstructions"
self.print(f"Saving reconstructions to {save_path}")
fastmri.save_reconstructions(outputs, save_path)
@staticmethod
def add_model_specific_args(parent_parser): # pragma: no-cover
"""
Define parameters that only apply to this model
"""
parser = ArgumentParser(parents=[parent_parser], add_help=False)
# logging params
parser.add_argument(
"--num_log_images",
default=16,
type=int,
help="Number of images to log to Tensorboard",
)
return parser
| 36.817427
| 87
| 0.588414
|
2e66464683eb0b4c91165a4b34f5924f90e384d6
| 5,398
|
py
|
Python
|
bddrest/tests/test_call.py
|
pyfather/bddrest
|
dc8df8c52aaed1fd56a7d5c3a2bc3450432c7395
|
[
"MIT"
] | null | null | null |
bddrest/tests/test_call.py
|
pyfather/bddrest
|
dc8df8c52aaed1fd56a7d5c3a2bc3450432c7395
|
[
"MIT"
] | null | null | null |
bddrest/tests/test_call.py
|
pyfather/bddrest
|
dc8df8c52aaed1fd56a7d5c3a2bc3450432c7395
|
[
"MIT"
] | null | null | null |
import cgi
import functools
import json
import unittest
import pytest
from bddrest import CallVerifyError, FirstCall, AlteredCall
def wsgi_application(environ, start_response):
form = cgi.FieldStorage(
fp=environ['wsgi.input'],
environ=environ,
strict_parsing=False,
keep_blank_values=True
)
start_response(
'200 OK',
[('Content-Type', 'application/json;charset=utf-8')]
)
result = dict(
query=environ.get('QUERY_STRING'),
url=environ['PATH_INFO']
)
if form and isinstance(form, dict):
result.update(form)
yield json.dumps(result).encode()
def test_call_constructor():
call = FirstCall('Testing Call contractor', url='/id: 1')
assert call.url == '/:id'
assert call.url_parameters == dict(id='1')
call = FirstCall(
'Testing Call contractor',
url='/id: 1/:name',
url_parameters=dict(name='foo', id=2)
)
call.validate()
assert call.url == '/:id/:name'
assert call.url_parameters == dict(id='2', name='foo')
call.conclude(wsgi_application)
assert '/2/foo' == call.response.json['url']
def test_call_invoke():
call = FirstCall('Testing Call contractor', url='/id: 1')
call.conclude(wsgi_application)
assert call.response is not None
def test_call_response():
call = FirstCall('Testing Call contractor', url='/id: 1', query='a=1')
call.conclude(wsgi_application)
assert call.response is not None
assert call.response.body is not None
assert call.response.status == '200 OK'
assert call.response.status == 200
assert call.response.encoding == 'utf-8'
assert call.response.content_type == 'application/json'
assert call.response.text is not None
assert call.response.json == {'query': 'a=1', 'url': '/1'}
assert call.response.headers == [
('Content-Type', 'application/json;charset=utf-8')
]
def test_call_to_dict():
call = FirstCall('Testing Call to_dict', url='/id: 1', query='a=1')
call.conclude(wsgi_application)
call_dict = call.to_dict()
assert call_dict == dict(
title='Testing Call to_dict',
query=dict(a='1'),
url='/:id',
url_parameters={'id': '1'},
verb='GET',
response=dict(
json={'query': 'a=1', 'url': '/1'},
headers=['Content-Type: application/json;charset=utf-8'],
status='200 OK',
)
)
def test_altered_call():
call = FirstCall(
'Testing AlteredCall contractor',
url='/id: 1',
query=dict(a=1)
)
altered_call = AlteredCall(
call,
'Altering a call',
query=dict(b=2)
)
altered_call.conclude(wsgi_application)
assert altered_call.to_dict() == dict(
title='Altering a call',
query=dict(b=2),
response=dict(
status='200 OK',
headers=['Content-Type: application/json;charset=utf-8'],
json={'query': 'b=2', 'url': '/1'}
)
)
def test_alteredcall_setters_deleters():
basecall = FirstCall(
'Base call for testing When class',
url='/apiv1/devices/id: 1',
)
when = AlteredCall(
basecall,
title='Testing the When class',
url='/apiv1/books/isbn: abc/pages/page: 3?highlight=false',
verb='POST',
form=dict(a='b'),
headers=['A: B'],
content_type='text/plain',
as_='Admin',
extra_environ=dict(A='B')
)
assert '/apiv1/books/:isbn/pages/:page' == when.url
assert dict(isbn='abc', page='3') == when.url_parameters
assert dict(highlight='false') == when.query
assert dict(a='b') == when.form
assert 'POST' == when.verb
assert 'A' in when.headers
assert 'text/plain' == when.content_type
assert 'Admin' == when.as_
del when.url_parameters
del when.verb
del when.headers
del when.query
del when.content_type
del when.as_
del when.extra_environ
del when.form
assert dict(id='1') == when.url_parameters
assert 'GET' == when.verb
assert when.headers is None
assert when.query is None
assert when.form is None
assert when.content_type is None
assert when.as_ is None
assert when.extra_environ is None
def test_call_verify():
call = FirstCall(
'Testing FirstCall contractor',
url='/id: 1',
query=dict(a=1)
)
call.conclude(wsgi_application)
call.verify(wsgi_application)
altered_call = AlteredCall(
call,
'Altering a call',
query=dict(b=2)
)
altered_call.conclude(wsgi_application)
altered_call.verify(wsgi_application)
altered_call.response.body = '{"a": 1}'
with pytest.raises(CallVerifyError):
altered_call.verify(wsgi_application)
altered_call.response.status = '400 Bad Request'
with pytest.raises(CallVerifyError):
altered_call.verify(wsgi_application)
def test_querystring_parser():
call = FirstCall('Testing querystring parsing', url='/id: 1?a=1')
assert '/:id' == call.url
assert dict(a='1') == call.query
call = FirstCall('Testing querystring parsing', url='/id: 1?a=1&a=2')
assert dict(a=['1','2']) == call.query
def test_form_parser():
pyload = dict(a=1, b=2)
call = FirstCall('Testing form parsing', form=pyload)
assert call.form == pyload
| 27.401015
| 74
| 0.618933
|
0d2df0ea8f8741d5e45a833fde75593273e8f313
| 2,196
|
py
|
Python
|
users/migrations/0001_squashed.py
|
Kadantte/MangAdventure
|
0646617cb6376eaeb3142cbd1f266267518a149f
|
[
"MIT"
] | 58
|
2019-03-04T09:22:42.000Z
|
2022-02-18T09:11:57.000Z
|
users/migrations/0001_squashed.py
|
Kadantte/MangAdventure
|
0646617cb6376eaeb3142cbd1f266267518a149f
|
[
"MIT"
] | 21
|
2019-03-07T19:34:53.000Z
|
2021-12-19T12:46:40.000Z
|
users/migrations/0001_squashed.py
|
Kadantte/MangAdventure
|
0646617cb6376eaeb3142cbd1f266267518a149f
|
[
"MIT"
] | 14
|
2019-06-06T09:53:13.000Z
|
2021-12-17T14:34:13.000Z
|
from django.conf import settings
from django.db import migrations, models
from MangAdventure import storage, validators
from users.models import _avatar_uploader
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('reader', '0001_squashed'),
('socialaccount', '0003_extra_data_default_dict'),
]
operations = [
migrations.CreateModel(
name='Bookmark',
fields=[
('id', models.AutoField(
auto_created=True, primary_key=True,
serialize=False, verbose_name='ID'
)),
('series', models.ForeignKey(
on_delete=models.deletion.CASCADE, to='reader.Series'
)),
('user', models.ForeignKey(
on_delete=models.deletion.CASCADE,
related_name='bookmarks', to=settings.AUTH_USER_MODEL
)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(
auto_created=True, primary_key=True,
serialize=False, verbose_name='ID'
)),
('bio', models.TextField(
blank=True, verbose_name='biography',
help_text="The user's biography."
)),
('avatar', models.ImageField(
blank=True, help_text=(
"The user's avatar image. Must be up to 2 MBs."
), upload_to=_avatar_uploader,
storage=storage.CDNStorage((150, 150)),
validators=(validators.FileSizeValidator(2),)
)),
('user', models.OneToOneField(
on_delete=models.deletion.CASCADE,
related_name='profile', to=settings.AUTH_USER_MODEL
)),
],
),
migrations.AlterUniqueTogether(
name='bookmark',
unique_together={('series', 'user')},
),
]
| 34.3125
| 73
| 0.508197
|
94580662fda6dca6ce54922905a086f1469b404d
| 2,148
|
py
|
Python
|
tools/generate_conf.py
|
jichenjc/python-zvm-sdk
|
c081805c6079107b4823af898babdf92cf5577ee
|
[
"Apache-2.0"
] | null | null | null |
tools/generate_conf.py
|
jichenjc/python-zvm-sdk
|
c081805c6079107b4823af898babdf92cf5577ee
|
[
"Apache-2.0"
] | null | null | null |
tools/generate_conf.py
|
jichenjc/python-zvm-sdk
|
c081805c6079107b4823af898babdf92cf5577ee
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from zvmsdk import config
CONF = config.ConfigOpts()
def _print_one_line(f):
f.write('\n')
def _print_with_comment(f, v, key):
string = v.split('\n')
for i in string:
if key:
f.write('#')
else:
f.write('# ')
f.write(i)
f.write('\n')
def _print_one_section(f, section, data):
f.write('[')
f.write(section)
f.write(']')
_print_one_line(f)
for k,v in data.items():
_print_one_line(f)
if 'help' in v and len(v['help']) != 0:
_print_with_comment(f, v['help'], False)
if 'required' in v:
if v['required']:
required = 'This param is required'
else:
required = 'This param is optional'
_print_with_comment(f, required, False)
if 'default' in v:
setting = '%s=%s' % (k, v['default'])
else:
setting = '%s=' % k
_print_with_comment(f, setting, True)
_print_one_line(f)
def generate(f):
dicts = CONF.get_config_dicts_default(config.zvm_opts)
for data in sorted(dicts):
# bypass test section on purpose
if (data == 'tests'):
continue
# xcat is only used for internal test purpose
if (data == 'xcat'):
continue
_print_one_section(f, data, dicts[data])
_print_one_line(f)
def main(args=None):
doc_file = './configuration.ini'
with open(doc_file, 'w') as f:
generate(f)
main()
| 24.409091
| 78
| 0.586592
|
59ecb1c98523f61433c38b51f73074a7f3d8a305
| 1,237
|
py
|
Python
|
backlog/models/star.py
|
ryo8000/backlog-api4py
|
a58106bb559d0d6d39dfeb54bd2cbd2a16fb490a
|
[
"Apache-2.0"
] | null | null | null |
backlog/models/star.py
|
ryo8000/backlog-api4py
|
a58106bb559d0d6d39dfeb54bd2cbd2a16fb490a
|
[
"Apache-2.0"
] | null | null | null |
backlog/models/star.py
|
ryo8000/backlog-api4py
|
a58106bb559d0d6d39dfeb54bd2cbd2a16fb490a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 Ryo H
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Star module."""
from dataclasses import dataclass
from datetime import datetime
from typing import Optional
from .base import Base
from .user import User
@dataclass
class Star(Base):
"""Star class."""
id: int
comment: Optional[str]
url: str
title: str
presenter: User
created: datetime
@classmethod
def from_dict(cls, data: dict):
return cls(
id=data["id"],
comment=data["comment"],
url=data["url"],
title=data["title"],
presenter=User.from_dict(data["presenter"]),
created=datetime.strptime(data["created"], cls._DATETIME_FORMAT),
)
| 26.891304
| 77
| 0.675829
|
2dc2f365160a03844bef7d786da04636f54c4c42
| 10,704
|
py
|
Python
|
webpath/test/test_runner.py
|
simplefin/webpath
|
2958a36a16aaa0647d51e8b1be39d65826a9592c
|
[
"Apache-2.0"
] | null | null | null |
webpath/test/test_runner.py
|
simplefin/webpath
|
2958a36a16aaa0647d51e8b1be39d65826a9592c
|
[
"Apache-2.0"
] | null | null | null |
webpath/test/test_runner.py
|
simplefin/webpath
|
2958a36a16aaa0647d51e8b1be39d65826a9592c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) The SimpleFIN Team
# See LICENSE for details.
from twisted.trial.unittest import TestCase
from twisted.internet import defer
from webpath.runner import Runner, Context, basicRunner, interpolate
class RunnerTest(TestCase):
@defer.inlineCallbacks
def test_registerHandler(self):
"""
You can register a function to be run for a particular action.
"""
called = []
def handle(params, context):
called.append(params)
called.append(context)
return 'something'
runner = Runner()
runner.registerHandler('smirk', handle)
context = Context()
params = {'foo': 'bar'}
result = yield runner.runSingleAction('smirk', params, context)
self.assertEqual(called, [params, context], "Should have run the "
"function.")
self.assertEqual(result, 'something', "Should return the result of the"
" last function.")
@defer.inlineCallbacks
def test_registerHandlers(self):
"""
You can register more than one handler at a time.
"""
runner = Runner()
runner.registerHandlers({
'func': lambda *a,**kw: 'func',
})
result = yield runner.runSingleAction('func', {}, Context())
self.assertEqual(result, 'func')
@defer.inlineCallbacks
def test_runSingleAction(self):
"""
Running a single action should store the result in the context.
"""
runner = Runner({
'open': lambda *a,**kw: 'hello',
})
context = Context()
result = yield runner.runSingleAction('open', {}, context)
self.assertEqual(context.runner, runner, "Should set the runner")
self.assertEqual(context.variables['_'], 'hello')
self.assertEqual(context.results, ['hello'])
self.assertEqual(result, 'hello')
@defer.inlineCallbacks
def test_runActions(self):
"""
You can run a set of actions.
"""
runner = Runner({
'speak': lambda params,context: 'hi',
'emphasize': lambda params,context: context.variables['_'] + '!',
'yell': lambda params,context: context.variables['_'].upper(),
})
context = Context()
result = yield runner.runActions([
{'action': 'speak'},
{'action': 'yell'},
{'action': 'emphasize'},
], context)
self.assertEqual(result, 'HI!')
self.assertEqual(context.variables['_'], result)
self.assertEqual(context.results, ['hi', 'HI', 'HI!'])
def test_runActions_Deferred(self):
"""
Functions can return deferred results but don't have to.
"""
d = defer.Deferred()
runner = Runner({
'speak': lambda params,context: 'hi',
'gummy': lambda params,context: d,
'yell': lambda params,context: defer.succeed(context.variables['_'].upper()),
})
context = Context()
result = runner.runActions([
{'action': 'speak'},
{'action': 'gummy'},
{'action': 'yell'},
], context)
self.assertFalse(result.called, "Should not have finished yet")
d.callback('gummy bear')
result = self.successResultOf(result)
self.assertEqual(result, 'GUMMY BEAR')
self.assertEqual(context.variables['_'], result)
self.assertEqual(context.results, ['hi', 'gummy bear', 'GUMMY BEAR'])
@defer.inlineCallbacks
def test_runActions_namedResults(self):
"""
You can access results by their names.
"""
runner = Runner({
'speak': lambda params,context: params['word']
})
context = Context()
result = yield runner.runActions([
{'action': 'speak', 'word': 'Hi', 'name': 'first command'},
{'action': 'speak', 'word': '$_R["first command"]'},
], context)
self.assertEqual(result, 'Hi', "Should have access to all results")
class basicRunnerTest(TestCase):
@defer.inlineCallbacks
def test_loop(self):
"""
You can loop over a list of things
"""
called = []
def func(params, context):
called.append(params['arg'])
return 'hey, ' + params['arg']
runner = basicRunner({'func': func})
context = Context()
result = yield runner.runSingleAction('loop', {
'action': 'loop',
'iterable': ['jim', 'john', 'joe'],
'actions': [
{'action': 'func', 'arg': '$item'},
],
}, context)
self.assertEqual(result, 'hey, joe', "Should return last result")
self.assertEqual(called, ['jim', 'john', 'joe'],
"Should replace $item with the item")
@defer.inlineCallbacks
def test_set(self):
"""
You can save the last result in a variable.
"""
runner = basicRunner({
'func': lambda *a: 'hello',
})
context = Context()
result = yield runner.runActions([
{'action': 'func'},
{'action': 'set', 'key': 'something', 'value': '$_'},
], context)
self.assertEqual(result, 'hello', "save should return the previous "
"result")
self.assertEqual(context.variables['_'], 'hello')
self.assertEqual(context.variables['something'], 'hello')
@defer.inlineCallbacks
def test_append(self):
"""
You can append an item to a list, creating it in the process.
"""
runner = basicRunner()
context = Context()
result = yield runner.runActions([
{'action': 'append', 'key': 'foo', 'value': 'apple'},
{'action': 'append', 'key': 'foo', 'value': 'something'},
], context)
self.assertEqual(result, ['apple', 'something'])
self.assertEqual(context.variables['foo'], ['apple', 'something'])
@defer.inlineCallbacks
def test_ask(self):
"""
You can ask the calling system for some information.
"""
runner = basicRunner()
context = Context(lambda key, prompt, kw: 'A robot %s' % (key,))
result = yield runner.runSingleAction('ask', {
'action': 'ask',
'key': 'THE KEY',
'prompt': 'Who are you?',
}, context)
self.assertEqual(result, 'A robot THE KEY')
self.assertEqual(context.variables['THE KEY'], 'A robot THE KEY')
@defer.inlineCallbacks
def test_ask_kwargs(self):
"""
You can send additional data to the prompting service.
"""
runner = basicRunner()
context = Context(lambda key, prompt, kw: 'A robot %s' % (kw['foo'],))
result = yield runner.runSingleAction('ask', {
'action': 'ask',
'key': 'THE KEY',
'prompt': 'Who are you?',
'kwargs': {'foo': 'heyo'},
}, context)
self.assertEqual(result, 'A robot heyo')
self.assertEqual(context.variables['THE KEY'], 'A robot heyo')
@defer.inlineCallbacks
def test_dump(self):
"""
You can dump a subset of the available data.
"""
runner = basicRunner()
context = Context()
result = yield runner.runActions([
{'action': 'set', 'key': 'foo', 'value': 'foo value'},
{'action': 'set', 'key': 'bar', 'value': 'bar value'},
{'action': 'dump', 'keys': ['bar']},
], context)
self.assertEqual(result, {'bar': 'bar value'})
class ContextTest(TestCase):
def test_variables(self):
context = Context()
context.variables['foo'] = 'bar'
def test_getUserInput(self):
"""
A context can be made to request user input.
"""
d = defer.Deferred()
called = []
def getUserInput(key, prompt, kwargs):
called.append(key)
called.append(prompt)
called.append(kwargs)
return d
context = Context(getUserInput)
result = context.getUserInput('id', 'What is your id?', {})
self.assertFalse(result.called)
self.assertEqual(called, ['id', 'What is your id?', {}])
d.callback('foo')
self.assertEqual(self.successResultOf(result), 'foo')
def test_requests(self):
"""
It should use a requests session by default.
"""
import requests
c = Context()
self.assertTrue(isinstance(c.requests, requests.Session),
"Should have a .requests attr that is a Session")
class interpolateTest(TestCase):
def test_basic(self):
"""
You can replace $vars with values from a dict
"""
variables = {'foo': [1, 2, 'foo']}
original = {'hey': '$foo'}
result = interpolate(original, variables)
self.assertEqual(result, {'hey': [1, 2, 'foo']})
self.assertEqual(original, {'hey': '$foo'})
def test_nonVars(self):
"""
Non variables should be ignored.
"""
variables = {'foo': 'foo value'}
original = {'hey': 5}
result = interpolate(original, variables)
self.assertEqual(result, {'hey': 5})
def test_deep_dict(self):
"""
All depths of dictionary should be traversed.
"""
variables = {'foo': 'foo value'}
original = {'hey': {'something': '$foo'}}
result = interpolate(original, variables)
self.assertEqual(result, {'hey': {'something': 'foo value'}})
def test_deep_list(self):
"""
All depths of lists should be traversed.
"""
variables = {'foo': 'foo value'}
original = {'hey': ['this', '$foo', 'thing']}
result = interpolate(original, variables)
self.assertEqual(result, {'hey': ['this', 'foo value', 'thing']})
def test_attributes(self):
"""
You can do attribute access.
"""
class Foo:
name = 'something'
variables = {'foo': Foo()}
original = {'foo': '$foo.name'}
result = interpolate(original, variables)
self.assertEqual(result, {'foo': 'something'})
def test_array(self):
"""
You can do index-based access
"""
variables = {'foo': [1, 'apple', 'cannon']}
original = {'foo': '$foo[1]'}
result = interpolate(original, variables)
self.assertEqual(result, {'foo': 'apple'})
| 30.495726
| 89
| 0.540452
|
96c49d83ef49cdf7ecb3d0021f41bbf665e50ba5
| 140
|
py
|
Python
|
HelloScrapy/HelloScrapy/items/__init__.py
|
callmejacob/spider
|
a3b8127d96547a8abc9ee7530de9586c98a09874
|
[
"BSD-2-Clause"
] | null | null | null |
HelloScrapy/HelloScrapy/items/__init__.py
|
callmejacob/spider
|
a3b8127d96547a8abc9ee7530de9586c98a09874
|
[
"BSD-2-Clause"
] | null | null | null |
HelloScrapy/HelloScrapy/items/__init__.py
|
callmejacob/spider
|
a3b8127d96547a8abc9ee7530de9586c98a09874
|
[
"BSD-2-Clause"
] | null | null | null |
# -- coding: utf-8 --
from item_author import AuthorItem
from item_article import ArticleItem
from item_article_desc import ArticleDescItem
| 28
| 45
| 0.828571
|
11e5b619a91b2bc8bcb796d1acc6a44677cc10e0
| 1,825
|
py
|
Python
|
TurtleDemo/Main.py
|
Alexiuce/ServerStudy
|
e004a2af4d9e3c96a07b7df6c4cb91e195efc352
|
[
"MIT"
] | 1
|
2019-01-09T07:58:32.000Z
|
2019-01-09T07:58:32.000Z
|
TurtleDemo/Main.py
|
Alexiuce/PythonRepo
|
e004a2af4d9e3c96a07b7df6c4cb91e195efc352
|
[
"MIT"
] | null | null | null |
TurtleDemo/Main.py
|
Alexiuce/PythonRepo
|
e004a2af4d9e3c96a07b7df6c4cb91e195efc352
|
[
"MIT"
] | null | null | null |
#/usr/bin/env python
# -*- coding: UTF-8 -*-
import turtle
import time
def draw():
window = turtle.Screen()
badge = turtle.Turtle()
badge.color('green','black')
badge.left(90)
badge.forward(100)
badge.right(90)
badge.color('black','black')
badge.begin_fill()
badge.circle(10)
badge.end_fill()
for i in range(1,24):
badge.left(15)
badge.forward(50)
badge.left(157)
badge.forward(50)
window.exitonclick()
boxsize = 200
caught = False
score = 0
window = turtle.Screen()
mouse = turtle.Turtle()
cat = turtle.Turtle()
def up():
mouse.forward(10)
checkbound()
def left():
mouse.left(45)
def right():
mouse.right(45)
def back():
mouse.backward(10)
checkbound()
def qiutTurles():
window.bye()
def checkbound():
global boxsize
if mouse.xcor() > boxsize:
mouse.goto(boxsize, mouse.ycor())
if mouse.xcor() < - boxsize:
mouse.goto(-boxsize, mouse.ycor())
if mouse.ycor() > boxsize:
mouse.goto(mouse.xcor(), boxsize)
if mouse.ycor() < -boxsize:
mouse.goto(mouse.xcor(), -boxsize)
mouse.penup()
cat.penup()
mouse.goto(100, 100)
window.onkeypress(up, 'Up')
window.onkeypress(left, 'Left')
window.onkeypress(right, 'Right')
window.onkeypress(back, 'Down')
window.onkeypress(qiutTurles, 'Escape')
difficulty = window.numinput('Difficulty', 'Enter a difficulty for easy(1) for hard(5)',minval=1, maxval=5)
window.listen()
while not caught:
cat.setheading(cat.towards(mouse))
cat.forward(8 + difficulty)
score += 1
if cat.distance(mouse) < 5:
caught = True
time.sleep(0.2 - (0.01 * difficulty))
window.textinput('Game Over', 'Well done, You scored:' + str(score * difficulty))
window.bye()
if __name__ == '__main__':
pass
| 18.069307
| 107
| 0.629041
|
8bc544c583a2508bb477131e1021108d794b22a4
| 863
|
py
|
Python
|
coding_interviews/elements_of_programming_interview/base_conversion.py
|
LeandroTk/Algorithms
|
569ed68eba3eeff902f8078992099c28ce4d7cd6
|
[
"MIT"
] | 205
|
2018-12-01T17:49:49.000Z
|
2021-12-22T07:02:27.000Z
|
coding_interviews/elements_of_programming_interview/base_conversion.py
|
LeandroTk/Algorithms
|
569ed68eba3eeff902f8078992099c28ce4d7cd6
|
[
"MIT"
] | 2
|
2020-01-01T16:34:29.000Z
|
2020-04-26T19:11:13.000Z
|
coding_interviews/elements_of_programming_interview/base_conversion.py
|
LeandroTk/Algorithms
|
569ed68eba3eeff902f8078992099c28ce4d7cd6
|
[
"MIT"
] | 50
|
2018-11-28T20:51:36.000Z
|
2021-11-29T04:08:25.000Z
|
'''
"615"
"5" --> 5 --> 5 * (7 ** 0)
"1" --> 1 --> 1 * (7 ** 1)
"6" --> 6 --> 6 * (7 ** 2)
=> 306
306 % 13 = 7
306 / 13 = 23
23 % 13 = 10
23 / 13 = 1
1 % 13 = 1
'''
num_representation = {
10: 'A',
11: 'B',
12: 'C',
13: 'D',
14: 'E',
15: 'F',
}
def to_num_representation(num):
if num < 10:
return str(num)
return num_representation[num]
def base_conversion(string_num, base1, base2):
decimal = 0
reversed_string_num = string_num[::-1]
for index in range(len(reversed_string_num)):
char_num = reversed_string_num[index]
decimal += int(char_num) * (base1 ** index)
digits = []
while decimal:
modulo = decimal % base2
decimal = decimal // base2
digits.append(to_num_representation(modulo))
return ''.join(digits)[::-1]
print(base_conversion("615", 7, 13))
| 16.283019
| 52
| 0.542294
|
3f9a64e618c10b58e94b2089fd6483522790200b
| 12,269
|
bzl
|
Python
|
bazel/ray_deps_setup.bzl
|
Phirefly9/ray
|
bbfb86c5130a1a6a11ba3cd6f928a7c4078788e1
|
[
"Apache-2.0"
] | null | null | null |
bazel/ray_deps_setup.bzl
|
Phirefly9/ray
|
bbfb86c5130a1a6a11ba3cd6f928a7c4078788e1
|
[
"Apache-2.0"
] | 26
|
2021-09-18T07:09:04.000Z
|
2022-03-26T07:07:35.000Z
|
bazel/ray_deps_setup.bzl
|
Phirefly9/ray
|
bbfb86c5130a1a6a11ba3cd6f928a7c4078788e1
|
[
"Apache-2.0"
] | null | null | null |
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository", "new_git_repository")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
def urlsplit(url):
""" Splits a URL like "https://example.com/a/b?c=d&e#f" into a tuple:
("https", ["example", "com"], ["a", "b"], ["c=d", "e"], "f")
A trailing slash will result in a correspondingly empty final path component.
"""
split_on_anchor = url.split("#", 1)
split_on_query = split_on_anchor[0].split("?", 1)
split_on_scheme = split_on_query[0].split("://", 1)
if len(split_on_scheme) <= 1: # Scheme is optional
split_on_scheme = [None] + split_on_scheme[:1]
split_on_path = split_on_scheme[1].split("/")
return {
"scheme": split_on_scheme[0],
"netloc": split_on_path[0].split("."),
"path": split_on_path[1:],
"query": split_on_query[1].split("&") if len(split_on_query) > 1 else None,
"fragment": split_on_anchor[1] if len(split_on_anchor) > 1 else None,
}
def auto_http_archive(
*,
name = None,
url = None,
urls = True,
build_file = None,
build_file_content = None,
strip_prefix = True,
**kwargs):
""" Intelligently choose mirrors based on the given URL for the download.
Either url or urls is required.
If name == None , it is auto-deduced, but this is NOT recommended.
If urls == True , mirrors are automatically chosen.
If build_file == True , it is auto-deduced.
If strip_prefix == True , it is auto-deduced.
"""
DOUBLE_SUFFIXES_LOWERCASE = [("tar", "bz2"), ("tar", "gz"), ("tar", "xz")]
mirror_prefixes = ["https://mirror.bazel.build/"]
canonical_url = url if url != None else urls[0]
url_parts = urlsplit(canonical_url)
url_except_scheme = (canonical_url.replace(url_parts["scheme"] + "://", "") if url_parts["scheme"] != None else canonical_url)
url_path_parts = url_parts["path"]
url_filename = url_path_parts[-1]
url_filename_parts = (url_filename.rsplit(".", 2) if (tuple(url_filename.lower().rsplit(".", 2)[-2:]) in
DOUBLE_SUFFIXES_LOWERCASE) else url_filename.rsplit(".", 1))
is_github = url_parts["netloc"] == ["github", "com"]
if name == None: # Deduce "com_github_user_project_name" from "https://github.com/user/project-name/..."
name = "_".join(url_parts["netloc"][::-1] + url_path_parts[:2]).replace("-", "_")
if build_file == True:
build_file = "@//%s:%s" % ("bazel", "BUILD." + name)
if urls == True:
prefer_url_over_mirrors = is_github
urls = [
mirror_prefix + url_except_scheme
for mirror_prefix in mirror_prefixes
if not canonical_url.startswith(mirror_prefix)
]
urls.insert(0 if prefer_url_over_mirrors else len(urls), canonical_url)
else:
print("No implicit mirrors used because urls were explicitly provided")
if strip_prefix == True:
prefix_without_v = url_filename_parts[0]
if prefix_without_v.startswith("v") and prefix_without_v[1:2].isdigit():
# GitHub automatically strips a leading 'v' in version numbers
prefix_without_v = prefix_without_v[1:]
strip_prefix = (url_path_parts[1] + "-" + prefix_without_v if is_github and url_path_parts[2:3] == ["archive"] else url_filename_parts[0])
return http_archive(
name = name,
url = url,
urls = urls,
build_file = build_file,
build_file_content = build_file_content,
strip_prefix = strip_prefix,
**kwargs
)
def ray_deps_setup():
# Explicitly bring in protobuf dependency to work around
# https://github.com/ray-project/ray/issues/14117
http_archive(
name = "com_google_protobuf",
strip_prefix = "protobuf-3.16.0",
urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.16.0.tar.gz"],
sha256 = "7892a35d979304a404400a101c46ce90e85ec9e2a766a86041bb361f626247f5",
)
auto_http_archive(
name = "com_github_antirez_redis",
build_file = "//bazel:BUILD.redis",
url = "https://github.com/redis/redis/archive/6.0.10.tar.gz",
sha256 = "900cb82227bac58242c9b7668e7113cd952253b256fe04bbdab1b78979cf255a",
patches = [
"//thirdparty/patches:redis-quiet.patch",
],
)
auto_http_archive(
name = "com_github_redis_hiredis",
build_file = "//bazel:BUILD.hiredis",
url = "https://github.com/redis/hiredis/archive/392de5d7f97353485df1237872cb682842e8d83f.tar.gz",
sha256 = "2101650d39a8f13293f263e9da242d2c6dee0cda08d343b2939ffe3d95cf3b8b",
patches = [
"//thirdparty/patches:hiredis-windows-msvc.patch",
],
)
auto_http_archive(
name = "com_github_spdlog",
build_file = "//bazel:BUILD.spdlog",
urls = ["https://github.com/gabime/spdlog/archive/v1.7.0.zip"],
sha256 = "c8f1e1103e0b148eb8832275d8e68036f2fdd3975a1199af0e844908c56f6ea5",
)
auto_http_archive(
name = "com_github_tporadowski_redis_bin",
build_file = "//bazel:BUILD.redis",
strip_prefix = None,
url = "https://github.com/tporadowski/redis/releases/download/v5.0.9/Redis-x64-5.0.9.zip",
sha256 = "b09565b22b50c505a5faa86a7e40b6683afb22f3c17c5e6a5e35fc9b7c03f4c2",
)
auto_http_archive(
name = "rules_jvm_external",
url = "https://github.com/bazelbuild/rules_jvm_external/archive/2.10.tar.gz",
sha256 = "5c1b22eab26807d5286ada7392d796cbc8425d3ef9a57d114b79c5f8ef8aca7c",
)
auto_http_archive(
name = "bazel_common",
url = "https://github.com/google/bazel-common/archive/084aadd3b854cad5d5e754a7e7d958ac531e6801.tar.gz",
sha256 = "a6e372118bc961b182a3a86344c0385b6b509882929c6b12dc03bb5084c775d5",
)
auto_http_archive(
name = "bazel_skylib",
strip_prefix = None,
url = "https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz",
sha256 = "97e70364e9249702246c0e9444bccdc4b847bed1eb03c5a3ece4f83dfe6abc44",
)
auto_http_archive(
# This rule is used by @com_github_nelhage_rules_boost and
# declaring it here allows us to avoid patching the latter.
name = "boost",
build_file = "@com_github_nelhage_rules_boost//:BUILD.boost",
sha256 = "d73a8da01e8bf8c7eda40b4c84915071a8c8a0df4a6734537ddde4a8580524ee",
url = "https://boostorg.jfrog.io/artifactory/main/release/1.71.0/source/boost_1_71_0.tar.bz2",
patches = [
"//thirdparty/patches:boost-exception-no_warn_typeid_evaluated.patch",
],
)
auto_http_archive(
name = "com_github_nelhage_rules_boost",
# If you update the Boost version, remember to update the 'boost' rule.
url = "https://github.com/nelhage/rules_boost/archive/2613d04ab3d22dfc4543ea0a083d9adeaa0daf09.tar.gz",
sha256 = "512f913240e026099d4ca4a98b1ce8048c99de77fdc8e8584e9e2539ee119ca2",
patches = [
"//thirdparty/patches:rules_boost-undefine-boost_fallthrough.patch",
"//thirdparty/patches:rules_boost-windows-linkopts.patch",
],
)
auto_http_archive(
name = "com_github_google_flatbuffers",
url = "https://github.com/google/flatbuffers/archive/63d51afd1196336a7d1f56a988091ef05deb1c62.tar.gz",
sha256 = "3f469032571d324eabea88d7014c05fec8565a5877dbe49b2a52d8d1a0f18e63",
)
auto_http_archive(
name = "com_google_googletest",
url = "https://github.com/google/googletest/archive/refs/tags/release-1.11.0.tar.gz",
sha256 = "b4870bf121ff7795ba20d20bcdd8627b8e088f2d1dab299a031c1034eddc93d5",
)
auto_http_archive(
name = "com_github_gflags_gflags",
url = "https://github.com/gflags/gflags/archive/e171aa2d15ed9eb17054558e0b3a6a413bb01067.tar.gz",
sha256 = "b20f58e7f210ceb0e768eb1476073d0748af9b19dfbbf53f4fd16e3fb49c5ac8",
)
auto_http_archive(
name = "cython",
build_file = True,
url = "https://github.com/cython/cython/archive/26cb654dcf4ed1b1858daf16b39fd13406b1ac64.tar.gz",
sha256 = "d21e155ac9a455831f81608bb06620e4a1d75012a630faf11f4c25ad10cfc9bb",
)
auto_http_archive(
name = "io_opencensus_cpp",
url = "https://github.com/census-instrumentation/opencensus-cpp/archive/b14a5c0dcc2da8a7fc438fab637845c73438b703.zip",
sha256 = "6592e07672e7f7980687f6c1abda81974d8d379e273fea3b54b6c4d855489b9d",
patches = [
"//thirdparty/patches:opencensus-cpp-harvest-interval.patch",
"//thirdparty/patches:opencensus-cpp-shutdown-api.patch",
],
)
# OpenCensus depends on Abseil so we have to explicitly pull it in.
# This is how diamond dependencies are prevented.
auto_http_archive(
name = "com_google_absl",
url = "https://github.com/abseil/abseil-cpp/archive/refs/tags/20210324.2.tar.gz",
sha256 = "59b862f50e710277f8ede96f083a5bb8d7c9595376146838b9580be90374ee1f",
)
# OpenCensus depends on jupp0r/prometheus-cpp
auto_http_archive(
name = "com_github_jupp0r_prometheus_cpp",
url = "https://github.com/jupp0r/prometheus-cpp/archive/60eaa4ea47b16751a8e8740b05fe70914c68a480.tar.gz",
sha256 = "ec825b802487ac18b0d98e2e8b7961487b12562f8f82e424521d0a891d9e1373",
patches = [
"//thirdparty/patches:prometheus-windows-headers.patch",
# https://github.com/jupp0r/prometheus-cpp/pull/225
"//thirdparty/patches:prometheus-windows-zlib.patch",
"//thirdparty/patches:prometheus-windows-pollfd.patch",
],
)
auto_http_archive(
name = "com_github_grpc_grpc",
# NOTE: If you update this, also update @boringssl's hash.
url = "https://github.com/grpc/grpc/archive/refs/tags/v1.38.1.tar.gz",
sha256 = "f60e5b112913bf776a22c16a3053cc02cf55e60bf27a959fd54d7aaf8e2da6e8",
patches = [
"//thirdparty/patches:grpc-cython-copts.patch",
"//thirdparty/patches:grpc-python.patch",
"//thirdparty/patches:grpc-windows-python-header-path.patch",
],
)
auto_http_archive(
# This rule is used by @com_github_grpc_grpc, and using a GitHub mirror
# provides a deterministic archive hash for caching. Explanation here:
# https://github.com/grpc/grpc/blob/4790ab6d97e634a1ede983be393f3bb3c132b2f7/bazel/grpc_deps.bzl#L102
name = "boringssl",
# Ensure this matches the commit used by grpc's bazel/grpc_deps.bzl
url = "https://github.com/google/boringssl/archive/688fc5cf5428868679d2ae1072cad81055752068.tar.gz",
sha256 = "f8616dff15cb8aad6705af53c7caf7a5f1103b6aaf59c76b55995e179d47f89c",
)
auto_http_archive(
name = "rules_proto_grpc",
url = "https://github.com/rules-proto-grpc/rules_proto_grpc/archive/a74fef39c5fe636580083545f76d1eab74f6450d.tar.gz",
sha256 = "2f6606151ec042e23396f07de9e7dcf6ca9a5db1d2b09f0cc93a7fc7f4008d1b",
)
auto_http_archive(
name = "msgpack",
build_file = True,
url = "https://github.com/msgpack/msgpack-c/archive/8085ab8721090a447cf98bb802d1406ad7afe420.tar.gz",
sha256 = "83c37c9ad926bbee68d564d9f53c6cbb057c1f755c264043ddd87d89e36d15bb",
patches = [
"//thirdparty/patches:msgpack-windows-iovec.patch",
],
)
http_archive(
name = "io_opencensus_proto",
strip_prefix = "opencensus-proto-0.3.0/src",
urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/v0.3.0.tar.gz"],
sha256 = "b7e13f0b4259e80c3070b583c2f39e53153085a6918718b1c710caf7037572b0",
)
http_archive(
name = "nlohmann_json",
strip_prefix = "json-3.9.1",
urls = ["https://github.com/nlohmann/json/archive/v3.9.1.tar.gz"],
sha256 = "4cf0df69731494668bdd6460ed8cb269b68de9c19ad8c27abc24cd72605b2d5b",
build_file = "@com_github_ray_project_ray//bazel:BUILD.nlohmann_json",
)
| 43.661922
| 146
| 0.669492
|
9f3b258c5e59b58097c72e040ef24de85c824a5d
| 135
|
py
|
Python
|
ckOLDab/python_CGI_tests/cgi_error_handling_using_cgitb_module_crude_approach.py
|
stephaneAG/Python_tests
|
dc0a8819b4f49f50f17b3ffcf009c082535e1dbe
|
[
"MIT"
] | null | null | null |
ckOLDab/python_CGI_tests/cgi_error_handling_using_cgitb_module_crude_approach.py
|
stephaneAG/Python_tests
|
dc0a8819b4f49f50f17b3ffcf009c082535e1dbe
|
[
"MIT"
] | null | null | null |
ckOLDab/python_CGI_tests/cgi_error_handling_using_cgitb_module_crude_approach.py
|
stephaneAG/Python_tests
|
dc0a8819b4f49f50f17b3ffcf009c082535e1dbe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
print "Content-Type: text/plain"
print
import sys
sys.stderr = sys.stdout
f = open('non-existent-file.txt', 'r')
| 19.285714
| 38
| 0.711111
|
0bacb76f19ceabdb1ad3dc497ff8fd6341b8e625
| 3,792
|
py
|
Python
|
Rationale_Analysis/models/classifiers/base_model.py
|
CMSC35100-JET/FRESH
|
ea2b23386f8411da7127ec84ff6dd6e684b1dced
|
[
"MIT"
] | 30
|
2020-05-15T02:24:54.000Z
|
2022-03-14T21:52:48.000Z
|
Rationale_Analysis/models/classifiers/base_model.py
|
CMSC35100-JET/FRESH
|
ea2b23386f8411da7127ec84ff6dd6e684b1dced
|
[
"MIT"
] | 5
|
2020-05-04T13:43:14.000Z
|
2022-02-14T19:37:01.000Z
|
Rationale_Analysis/models/classifiers/base_model.py
|
CMSC35100-JET/FRESH
|
ea2b23386f8411da7127ec84ff6dd6e684b1dced
|
[
"MIT"
] | 6
|
2020-10-12T21:09:57.000Z
|
2022-01-12T00:48:42.000Z
|
from typing import Optional, Dict
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import FBetaMeasure, CategoricalAccuracy
class RationaleBaseModel(Model):
def __init__(
self,
vocab: Vocabulary,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
):
super(RationaleBaseModel, self).__init__(vocab, regularizer)
self._vocabulary = vocab
self._f1_metric = FBetaMeasure()
self._accuracy = CategoricalAccuracy()
self.prediction_mode = False
initializer(self)
def forward(self, document, query=None, labels=None, metadata=None, **kwargs):
# pylint: disable=arguments-differ
raise NotImplementedError
def make_output_human_readable(self, output_dict):
output_dict = self._decode(output_dict)
output_labels = self._vocabulary.get_index_to_token_vocabulary("labels")
predicted_labels, gold_labels = [], []
for p, g in zip(output_dict["predicted_label"], output_dict["label"]):
predicted_labels.append(output_labels[int(p)])
gold_labels.append(output_labels[int(g)])
output_dict["predicted_label"] = predicted_labels
output_dict["label"] = gold_labels
output_dict["annotation_id"] = [d['annotation_id'] for d in output_dict['metadata']]
del output_dict['metadata']
return output_dict
def _call_metrics(self, output_dict):
self._f1_metric(output_dict["probs"], output_dict["gold_labels"])
self._accuracy(output_dict["probs"], output_dict["gold_labels"])
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics = self._f1_metric.get_metric(reset)
macro_avg = {'macro_' + k: sum(v) / len(v) for k, v in metrics.items()}
output_labels = self._vocabulary.get_index_to_token_vocabulary("labels")
output_labels = [output_labels[i] for i in range(len(output_labels))]
class_metrics = {}
for k, v in metrics.items():
assert len(v) == len(output_labels)
class_nums = dict(zip(output_labels, v))
class_metrics.update({k + "_" + str(kc): x for kc, x in class_nums.items()})
class_metrics.update({"accuracy": self._accuracy.get_metric(reset)})
class_metrics.update(macro_avg)
modified_class_metrics = {}
for k, v in class_metrics.items():
if k in ["accuracy", "macro_fscore"]:
modified_class_metrics[k] = v
else:
modified_class_metrics["_" + k] = v
modified_class_metrics["validation_metric"] = class_metrics["macro_fscore"]
return modified_class_metrics
def normalize_attentions(self, output_dict):
"""
In case, attention is over subtokens rather than at token level.
Combine subtoken attention into token attention.
"""
return output_dict
def combine_document_query(self, document, query):
reader = document[0]["reader_object"]
device = next(self.parameters()).device
return {
k: ({x: y.to(device) for x, y in v.items()} if type(v) == dict else v.to(device))
for k, v in reader.combine_document_query(document, query, self._vocabulary).items()
}
# Because Allennlp loads models with strict=True
# but encoder_generator type models requires
# rationale extractor without keepsake params
# Reader need not worry.
def load_state_dict(self, state_dict, strict=True) :
super().load_state_dict(state_dict, strict=False)
| 37.92
| 96
| 0.66693
|
f5545e8372c1d41bef3640a96cc4eb34d8914df2
| 4,205
|
py
|
Python
|
src/robot/htmldata/jsonwriter.py
|
vprashanth777/Selenium
|
b3c48b75e73322891bb697f251b32a9a9d8b4dbe
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-03-10T11:10:20.000Z
|
2018-03-10T11:10:20.000Z
|
src/robot/htmldata/jsonwriter.py
|
vprashanth777/Selenium
|
b3c48b75e73322891bb697f251b32a9a9d8b4dbe
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robot/htmldata/jsonwriter.py
|
vprashanth777/Selenium
|
b3c48b75e73322891bb697f251b32a9a9d8b4dbe
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import PY2
class JsonWriter(object):
def __init__(self, output, separator=''):
self._writer = JsonDumper(output)
self._separator = separator
def write_json(self, prefix, data, postfix=';\n', mapping=None,
separator=True):
self._writer.write(prefix)
self._writer.dump(data, mapping)
self._writer.write(postfix)
self._write_separator(separator)
def write(self, string, postfix=';\n', separator=True):
self._writer.write(string + postfix)
self._write_separator(separator)
def _write_separator(self, separator):
if separator and self._separator:
self._writer.write(self._separator)
class JsonDumper(object):
def __init__(self, output):
self._output = output
self._dumpers = (MappingDumper(self),
IntegerDumper(self),
TupleListDumper(self),
StringDumper(self),
NoneDumper(self),
DictDumper(self))
def dump(self, data, mapping=None):
for dumper in self._dumpers:
if dumper.handles(data, mapping):
dumper.dump(data, mapping)
return
raise ValueError('Dumping %s not supported.' % type(data))
def write(self, data):
self._output.write(data)
class _Dumper(object):
_handled_types = None
def __init__(self, jsondumper):
self._dump = jsondumper.dump
self._write = jsondumper.write
def handles(self, data, mapping):
return isinstance(data, self._handled_types)
def dump(self, data, mapping):
raise NotImplementedError
class StringDumper(_Dumper):
_handled_types = (str, unicode) if PY2 else str
_search_and_replace = [('\\', '\\\\'), ('"', '\\"'), ('\t', '\\t'),
('\n', '\\n'), ('\r', '\\r'), ('</', '\\x3c/')]
def dump(self, data, mapping):
self._write('"%s"' % (self._escape(data) if data else ''))
def _escape(self, string):
for search, replace in self._search_and_replace:
if search in string:
string = string.replace(search, replace)
return string
class IntegerDumper(_Dumper):
# Handles also bool
_handled_types = (int, long) if PY2 else int
def dump(self, data, mapping):
self._write(str(data).lower())
class DictDumper(_Dumper):
_handled_types = dict
def dump(self, data, mapping):
self._write('{')
last_index = len(data) - 1
for index, key in enumerate(sorted(data)):
self._dump(key, mapping)
self._write(':')
self._dump(data[key], mapping)
if index < last_index:
self._write(',')
self._write('}')
class TupleListDumper(_Dumper):
_handled_types = (tuple, list)
def dump(self, data, mapping):
self._write('[')
last_index = len(data) - 1
for index, item in enumerate(data):
self._dump(item, mapping)
if index < last_index:
self._write(',')
self._write(']')
class MappingDumper(_Dumper):
def handles(self, data, mapping):
try:
return mapping and data in mapping
except TypeError:
return False
def dump(self, data, mapping):
self._write(mapping[data])
class NoneDumper(_Dumper):
def handles(self, data, mapping):
return data is None
def dump(self, data, mapping):
self._write('null')
| 28.80137
| 75
| 0.600238
|
8ad35576e11e1348586a5044303d64a7a7a6b936
| 942
|
py
|
Python
|
character.py
|
jonh1036/AbusiveLangDic
|
c005beba3dfc7a3bf94d66a96a8cb20753ce9faf
|
[
"MIT"
] | 1
|
2020-10-18T20:39:05.000Z
|
2020-10-18T20:39:05.000Z
|
character.py
|
jonh1036/AbusiveLangDic
|
c005beba3dfc7a3bf94d66a96a8cb20753ce9faf
|
[
"MIT"
] | null | null | null |
character.py
|
jonh1036/AbusiveLangDic
|
c005beba3dfc7a3bf94d66a96a8cb20753ce9faf
|
[
"MIT"
] | null | null | null |
#def n_grams(self, twittes, n_grams):
teste = ["cbie informática educação tecnologia teste mais novo"]
n = 2
def ngrams(n,texto):
'''n -> número
texto -> String'''
if(type(texto)==str):
teste = [texto]
textoN_Grams = []
for index in range(len(teste)):
textoTemp = ""
if (index + n-1) < len(teste):
for index in range(index, index + n):
textoTemp += teste[index] + " "
textoN_Grams += [textoTemp.strip()]
return textoN_Grams
def char_ngrams(n,texto):
'''n -> número
texto -> String'''
teste = [texto]
textoN_Grams = []
for item in teste:
for index in range(len(item)):
textoTemp = ""
if (index + n-1) < len(item):
textoN_Grams += [item[index:index+n]]
return textoN_Grams
ngrams(2,char_ngrams(4,"cbie informática educação tecnologia teste mais novo"))
| 23.55
| 79
| 0.550955
|
394f1aa71d59227360e297b9ac53e8aef5ba45cb
| 225,273
|
py
|
Python
|
dependency/pefile.py
|
marche147/pepatch
|
9e06402008ea5d847ce0adb130137381e4477a4a
|
[
"MIT"
] | 29
|
2018-01-01T04:59:44.000Z
|
2021-09-15T21:03:12.000Z
|
dependency/pefile.py
|
marche147/pepatch
|
9e06402008ea5d847ce0adb130137381e4477a4a
|
[
"MIT"
] | null | null | null |
dependency/pefile.py
|
marche147/pepatch
|
9e06402008ea5d847ce0adb130137381e4477a4a
|
[
"MIT"
] | 3
|
2018-01-02T09:31:42.000Z
|
2018-07-08T18:20:23.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""pefile, Portable Executable reader module
All the PE file basic structures are available with their default names as
attributes of the instance returned.
Processed elements such as the import table are made available with lowercase
names, to differentiate them from the upper case basic structure names.
pefile has been tested against many edge cases such as corrupted and malformed
PEs as well as malware, which often attempts to abuse the format way beyond its
standard use. To the best of my knowledge most of the abuse is handled
gracefully.
Copyright (c) 2005-2017 Ero Carrera <ero.carrera@gmail.com>
All rights reserved.
"""
from __future__ import division
from __future__ import print_function
from builtins import bytes
from builtins import chr
from builtins import object
from builtins import range
from builtins import str
from builtins import zip
__author__ = 'Ero Carrera'
__version__ = '2017.11.5'
__contact__ = 'ero.carrera@gmail.com'
import os
import struct
import sys
import codecs
import time
import math
import re
import string
import array
import mmap
import ordlookup
from collections import Counter
from hashlib import sha1
from hashlib import sha256
from hashlib import sha512
from hashlib import md5
PY3 = sys.version_info > (3,)
if PY3:
long = int
def count_zeroes(data):
try:
# newbytes' count() takes a str in Python 2
count = data.count('\0')
except TypeError:
# bytes' count() takes an int in Python 3
count = data.count(0)
return count
fast_load = False
# This will set a maximum length of a string to be retrieved from the file.
# It's there to prevent loading massive amounts of data from memory mapped
# files. Strings longer than 1MB should be rather rare.
MAX_STRING_LENGTH = 0x100000 # 2^20
# Limit maximum length for specific string types separately
MAX_IMPORT_NAME_LENGTH = 0x200
MAX_DLL_LENGTH = 0x200
MAX_SYMBOL_NAME_LENGTH = 0x200
IMAGE_DOS_SIGNATURE = 0x5A4D
IMAGE_DOSZM_SIGNATURE = 0x4D5A
IMAGE_NE_SIGNATURE = 0x454E
IMAGE_LE_SIGNATURE = 0x454C
IMAGE_LX_SIGNATURE = 0x584C
IMAGE_TE_SIGNATURE = 0x5A56 # Terse Executables have a 'VZ' signature
IMAGE_NT_SIGNATURE = 0x00004550
IMAGE_NUMBEROF_DIRECTORY_ENTRIES= 16
IMAGE_ORDINAL_FLAG = 0x80000000
IMAGE_ORDINAL_FLAG64 = 0x8000000000000000
OPTIONAL_HEADER_MAGIC_PE = 0x10b
OPTIONAL_HEADER_MAGIC_PE_PLUS = 0x20b
directory_entry_types = [
('IMAGE_DIRECTORY_ENTRY_EXPORT', 0),
('IMAGE_DIRECTORY_ENTRY_IMPORT', 1),
('IMAGE_DIRECTORY_ENTRY_RESOURCE', 2),
('IMAGE_DIRECTORY_ENTRY_EXCEPTION', 3),
('IMAGE_DIRECTORY_ENTRY_SECURITY', 4),
('IMAGE_DIRECTORY_ENTRY_BASERELOC', 5),
('IMAGE_DIRECTORY_ENTRY_DEBUG', 6),
# Architecture on non-x86 platforms
('IMAGE_DIRECTORY_ENTRY_COPYRIGHT', 7),
('IMAGE_DIRECTORY_ENTRY_GLOBALPTR', 8),
('IMAGE_DIRECTORY_ENTRY_TLS', 9),
('IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG', 10),
('IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', 11),
('IMAGE_DIRECTORY_ENTRY_IAT', 12),
('IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', 13),
('IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR',14),
('IMAGE_DIRECTORY_ENTRY_RESERVED', 15) ]
DIRECTORY_ENTRY = dict(
[(e[1], e[0]) for e in directory_entry_types]+directory_entry_types)
image_characteristics = [
('IMAGE_FILE_RELOCS_STRIPPED', 0x0001),
('IMAGE_FILE_EXECUTABLE_IMAGE', 0x0002),
('IMAGE_FILE_LINE_NUMS_STRIPPED', 0x0004),
('IMAGE_FILE_LOCAL_SYMS_STRIPPED', 0x0008),
('IMAGE_FILE_AGGRESIVE_WS_TRIM', 0x0010),
('IMAGE_FILE_LARGE_ADDRESS_AWARE', 0x0020),
('IMAGE_FILE_16BIT_MACHINE', 0x0040),
('IMAGE_FILE_BYTES_REVERSED_LO', 0x0080),
('IMAGE_FILE_32BIT_MACHINE', 0x0100),
('IMAGE_FILE_DEBUG_STRIPPED', 0x0200),
('IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP', 0x0400),
('IMAGE_FILE_NET_RUN_FROM_SWAP', 0x0800),
('IMAGE_FILE_SYSTEM', 0x1000),
('IMAGE_FILE_DLL', 0x2000),
('IMAGE_FILE_UP_SYSTEM_ONLY', 0x4000),
('IMAGE_FILE_BYTES_REVERSED_HI', 0x8000) ]
IMAGE_CHARACTERISTICS = dict([(e[1], e[0]) for e in
image_characteristics]+image_characteristics)
section_characteristics = [
('IMAGE_SCN_TYPE_REG', 0x00000000), # reserved
('IMAGE_SCN_TYPE_DSECT', 0x00000001), # reserved
('IMAGE_SCN_TYPE_NOLOAD', 0x00000002), # reserved
('IMAGE_SCN_TYPE_GROUP', 0x00000004), # reserved
('IMAGE_SCN_TYPE_NO_PAD', 0x00000008), # reserved
('IMAGE_SCN_TYPE_COPY', 0x00000010), # reserved
('IMAGE_SCN_CNT_CODE', 0x00000020),
('IMAGE_SCN_CNT_INITIALIZED_DATA', 0x00000040),
('IMAGE_SCN_CNT_UNINITIALIZED_DATA', 0x00000080),
('IMAGE_SCN_LNK_OTHER', 0x00000100),
('IMAGE_SCN_LNK_INFO', 0x00000200),
('IMAGE_SCN_LNK_OVER', 0x00000400), # reserved
('IMAGE_SCN_LNK_REMOVE', 0x00000800),
('IMAGE_SCN_LNK_COMDAT', 0x00001000),
('IMAGE_SCN_MEM_PROTECTED', 0x00004000), # obsolete
('IMAGE_SCN_NO_DEFER_SPEC_EXC', 0x00004000),
('IMAGE_SCN_GPREL', 0x00008000),
('IMAGE_SCN_MEM_FARDATA', 0x00008000),
('IMAGE_SCN_MEM_SYSHEAP', 0x00010000), # obsolete
('IMAGE_SCN_MEM_PURGEABLE', 0x00020000),
('IMAGE_SCN_MEM_16BIT', 0x00020000),
('IMAGE_SCN_MEM_LOCKED', 0x00040000),
('IMAGE_SCN_MEM_PRELOAD', 0x00080000),
('IMAGE_SCN_ALIGN_1BYTES', 0x00100000),
('IMAGE_SCN_ALIGN_2BYTES', 0x00200000),
('IMAGE_SCN_ALIGN_4BYTES', 0x00300000),
('IMAGE_SCN_ALIGN_8BYTES', 0x00400000),
('IMAGE_SCN_ALIGN_16BYTES', 0x00500000), # default alignment
('IMAGE_SCN_ALIGN_32BYTES', 0x00600000),
('IMAGE_SCN_ALIGN_64BYTES', 0x00700000),
('IMAGE_SCN_ALIGN_128BYTES', 0x00800000),
('IMAGE_SCN_ALIGN_256BYTES', 0x00900000),
('IMAGE_SCN_ALIGN_512BYTES', 0x00A00000),
('IMAGE_SCN_ALIGN_1024BYTES', 0x00B00000),
('IMAGE_SCN_ALIGN_2048BYTES', 0x00C00000),
('IMAGE_SCN_ALIGN_4096BYTES', 0x00D00000),
('IMAGE_SCN_ALIGN_8192BYTES', 0x00E00000),
('IMAGE_SCN_ALIGN_MASK', 0x00F00000),
('IMAGE_SCN_LNK_NRELOC_OVFL', 0x01000000),
('IMAGE_SCN_MEM_DISCARDABLE', 0x02000000),
('IMAGE_SCN_MEM_NOT_CACHED', 0x04000000),
('IMAGE_SCN_MEM_NOT_PAGED', 0x08000000),
('IMAGE_SCN_MEM_SHARED', 0x10000000),
('IMAGE_SCN_MEM_EXECUTE', 0x20000000),
('IMAGE_SCN_MEM_READ', 0x40000000),
('IMAGE_SCN_MEM_WRITE', 0x80000000) ]
SECTION_CHARACTERISTICS = dict([(e[1], e[0]) for e in
section_characteristics]+section_characteristics)
debug_types = [
('IMAGE_DEBUG_TYPE_UNKNOWN', 0),
('IMAGE_DEBUG_TYPE_COFF', 1),
('IMAGE_DEBUG_TYPE_CODEVIEW', 2),
('IMAGE_DEBUG_TYPE_FPO', 3),
('IMAGE_DEBUG_TYPE_MISC', 4),
('IMAGE_DEBUG_TYPE_EXCEPTION', 5),
('IMAGE_DEBUG_TYPE_FIXUP', 6),
('IMAGE_DEBUG_TYPE_OMAP_TO_SRC', 7),
('IMAGE_DEBUG_TYPE_OMAP_FROM_SRC', 8),
('IMAGE_DEBUG_TYPE_BORLAND', 9),
('IMAGE_DEBUG_TYPE_RESERVED10', 10),
('IMAGE_DEBUG_TYPE_CLSID', 11),
('IMAGE_DEBUG_TYPE_VC_FEATURE', 12),
('IMAGE_DEBUG_TYPE_POGO', 13),
('IMAGE_DEBUG_TYPE_ILTCG', 14),
('IMAGE_DEBUG_TYPE_MPX', 15) ]
DEBUG_TYPE = dict([(e[1], e[0]) for e in debug_types]+debug_types)
subsystem_types = [
('IMAGE_SUBSYSTEM_UNKNOWN', 0),
('IMAGE_SUBSYSTEM_NATIVE', 1),
('IMAGE_SUBSYSTEM_WINDOWS_GUI', 2),
('IMAGE_SUBSYSTEM_WINDOWS_CUI', 3),
('IMAGE_SUBSYSTEM_OS2_CUI', 5),
('IMAGE_SUBSYSTEM_POSIX_CUI', 7),
('IMAGE_SUBSYSTEM_NATIVE_WINDOWS', 8),
('IMAGE_SUBSYSTEM_WINDOWS_CE_GUI', 9),
('IMAGE_SUBSYSTEM_EFI_APPLICATION', 10),
('IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER', 11),
('IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER', 12),
('IMAGE_SUBSYSTEM_EFI_ROM', 13),
('IMAGE_SUBSYSTEM_XBOX', 14),
('IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION', 16)]
SUBSYSTEM_TYPE = dict([(e[1], e[0]) for e in subsystem_types]+subsystem_types)
machine_types = [
('IMAGE_FILE_MACHINE_UNKNOWN', 0),
('IMAGE_FILE_MACHINE_I386', 0x014c),
('IMAGE_FILE_MACHINE_R3000', 0x0162),
('IMAGE_FILE_MACHINE_R4000', 0x0166),
('IMAGE_FILE_MACHINE_R10000', 0x0168),
('IMAGE_FILE_MACHINE_WCEMIPSV2',0x0169),
('IMAGE_FILE_MACHINE_ALPHA', 0x0184),
('IMAGE_FILE_MACHINE_SH3', 0x01a2),
('IMAGE_FILE_MACHINE_SH3DSP', 0x01a3),
('IMAGE_FILE_MACHINE_SH3E', 0x01a4),
('IMAGE_FILE_MACHINE_SH4', 0x01a6),
('IMAGE_FILE_MACHINE_SH5', 0x01a8),
('IMAGE_FILE_MACHINE_ARM', 0x01c0),
('IMAGE_FILE_MACHINE_THUMB', 0x01c2),
('IMAGE_FILE_MACHINE_ARMNT', 0x01c4),
('IMAGE_FILE_MACHINE_AM33', 0x01d3),
('IMAGE_FILE_MACHINE_POWERPC', 0x01f0),
('IMAGE_FILE_MACHINE_POWERPCFP',0x01f1),
('IMAGE_FILE_MACHINE_IA64', 0x0200),
('IMAGE_FILE_MACHINE_MIPS16', 0x0266),
('IMAGE_FILE_MACHINE_ALPHA64', 0x0284),
('IMAGE_FILE_MACHINE_AXP64', 0x0284), # same
('IMAGE_FILE_MACHINE_MIPSFPU', 0x0366),
('IMAGE_FILE_MACHINE_MIPSFPU16',0x0466),
('IMAGE_FILE_MACHINE_TRICORE', 0x0520),
('IMAGE_FILE_MACHINE_CEF', 0x0cef),
('IMAGE_FILE_MACHINE_EBC', 0x0ebc),
('IMAGE_FILE_MACHINE_AMD64', 0x8664),
('IMAGE_FILE_MACHINE_M32R', 0x9041),
('IMAGE_FILE_MACHINE_CEE', 0xc0ee),
]
MACHINE_TYPE = dict([(e[1], e[0]) for e in machine_types]+machine_types)
relocation_types = [
('IMAGE_REL_BASED_ABSOLUTE', 0),
('IMAGE_REL_BASED_HIGH', 1),
('IMAGE_REL_BASED_LOW', 2),
('IMAGE_REL_BASED_HIGHLOW', 3),
('IMAGE_REL_BASED_HIGHADJ', 4),
('IMAGE_REL_BASED_MIPS_JMPADDR', 5),
('IMAGE_REL_BASED_SECTION', 6),
('IMAGE_REL_BASED_REL', 7),
('IMAGE_REL_BASED_MIPS_JMPADDR16', 9),
('IMAGE_REL_BASED_IA64_IMM64', 9),
('IMAGE_REL_BASED_DIR64', 10),
('IMAGE_REL_BASED_HIGH3ADJ', 11) ]
RELOCATION_TYPE = dict(
[(e[1], e[0]) for e in relocation_types]+relocation_types)
dll_characteristics = [
('IMAGE_LIBRARY_PROCESS_INIT', 0x0001), # reserved
('IMAGE_LIBRARY_PROCESS_TERM', 0x0002), # reserved
('IMAGE_LIBRARY_THREAD_INIT', 0x0004), # reserved
('IMAGE_LIBRARY_THREAD_TERM', 0x0008), # reserved
('IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA', 0x0020),
('IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE', 0x0040),
('IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY', 0x0080),
('IMAGE_DLLCHARACTERISTICS_NX_COMPAT', 0x0100),
('IMAGE_DLLCHARACTERISTICS_NO_ISOLATION', 0x0200),
('IMAGE_DLLCHARACTERISTICS_NO_SEH', 0x0400),
('IMAGE_DLLCHARACTERISTICS_NO_BIND', 0x0800),
('IMAGE_DLLCHARACTERISTICS_APPCONTAINER', 0x1000),
('IMAGE_DLLCHARACTERISTICS_WDM_DRIVER', 0x2000),
('IMAGE_DLLCHARACTERISTICS_GUARD_CF', 0x4000),
('IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE', 0x8000) ]
DLL_CHARACTERISTICS = dict(
[(e[1], e[0]) for e in dll_characteristics]+dll_characteristics)
# Resource types
resource_type = [
('RT_CURSOR', 1),
('RT_BITMAP', 2),
('RT_ICON', 3),
('RT_MENU', 4),
('RT_DIALOG', 5),
('RT_STRING', 6),
('RT_FONTDIR', 7),
('RT_FONT', 8),
('RT_ACCELERATOR', 9),
('RT_RCDATA', 10),
('RT_MESSAGETABLE', 11),
('RT_GROUP_CURSOR', 12),
('RT_GROUP_ICON', 14),
('RT_VERSION', 16),
('RT_DLGINCLUDE', 17),
('RT_PLUGPLAY', 19),
('RT_VXD', 20),
('RT_ANICURSOR', 21),
('RT_ANIICON', 22),
('RT_HTML', 23),
('RT_MANIFEST', 24) ]
RESOURCE_TYPE = dict([(e[1], e[0]) for e in resource_type]+resource_type)
# Language definitions
lang = [
('LANG_NEUTRAL', 0x00),
('LANG_INVARIANT', 0x7f),
('LANG_AFRIKAANS', 0x36),
('LANG_ALBANIAN', 0x1c),
('LANG_ARABIC', 0x01),
('LANG_ARMENIAN', 0x2b),
('LANG_ASSAMESE', 0x4d),
('LANG_AZERI', 0x2c),
('LANG_BASQUE', 0x2d),
('LANG_BELARUSIAN', 0x23),
('LANG_BENGALI', 0x45),
('LANG_BULGARIAN', 0x02),
('LANG_CATALAN', 0x03),
('LANG_CHINESE', 0x04),
('LANG_CROATIAN', 0x1a),
('LANG_CZECH', 0x05),
('LANG_DANISH', 0x06),
('LANG_DIVEHI', 0x65),
('LANG_DUTCH', 0x13),
('LANG_ENGLISH', 0x09),
('LANG_ESTONIAN', 0x25),
('LANG_FAEROESE', 0x38),
('LANG_FARSI', 0x29),
('LANG_FINNISH', 0x0b),
('LANG_FRENCH', 0x0c),
('LANG_GALICIAN', 0x56),
('LANG_GEORGIAN', 0x37),
('LANG_GERMAN', 0x07),
('LANG_GREEK', 0x08),
('LANG_GUJARATI', 0x47),
('LANG_HEBREW', 0x0d),
('LANG_HINDI', 0x39),
('LANG_HUNGARIAN', 0x0e),
('LANG_ICELANDIC', 0x0f),
('LANG_INDONESIAN', 0x21),
('LANG_ITALIAN', 0x10),
('LANG_JAPANESE', 0x11),
('LANG_KANNADA', 0x4b),
('LANG_KASHMIRI', 0x60),
('LANG_KAZAK', 0x3f),
('LANG_KONKANI', 0x57),
('LANG_KOREAN', 0x12),
('LANG_KYRGYZ', 0x40),
('LANG_LATVIAN', 0x26),
('LANG_LITHUANIAN', 0x27),
('LANG_MACEDONIAN', 0x2f),
('LANG_MALAY', 0x3e),
('LANG_MALAYALAM', 0x4c),
('LANG_MANIPURI', 0x58),
('LANG_MARATHI', 0x4e),
('LANG_MONGOLIAN', 0x50),
('LANG_NEPALI', 0x61),
('LANG_NORWEGIAN', 0x14),
('LANG_ORIYA', 0x48),
('LANG_POLISH', 0x15),
('LANG_PORTUGUESE', 0x16),
('LANG_PUNJABI', 0x46),
('LANG_ROMANIAN', 0x18),
('LANG_RUSSIAN', 0x19),
('LANG_SANSKRIT', 0x4f),
('LANG_SERBIAN', 0x1a),
('LANG_SINDHI', 0x59),
('LANG_SLOVAK', 0x1b),
('LANG_SLOVENIAN', 0x24),
('LANG_SPANISH', 0x0a),
('LANG_SWAHILI', 0x41),
('LANG_SWEDISH', 0x1d),
('LANG_SYRIAC', 0x5a),
('LANG_TAMIL', 0x49),
('LANG_TATAR', 0x44),
('LANG_TELUGU', 0x4a),
('LANG_THAI', 0x1e),
('LANG_TURKISH', 0x1f),
('LANG_UKRAINIAN', 0x22),
('LANG_URDU', 0x20),
('LANG_UZBEK', 0x43),
('LANG_VIETNAMESE', 0x2a),
('LANG_GAELIC', 0x3c),
('LANG_MALTESE', 0x3a),
('LANG_MAORI', 0x28),
('LANG_RHAETO_ROMANCE',0x17),
('LANG_SAAMI', 0x3b),
('LANG_SORBIAN', 0x2e),
('LANG_SUTU', 0x30),
('LANG_TSONGA', 0x31),
('LANG_TSWANA', 0x32),
('LANG_VENDA', 0x33),
('LANG_XHOSA', 0x34),
('LANG_ZULU', 0x35),
('LANG_ESPERANTO', 0x8f),
('LANG_WALON', 0x90),
('LANG_CORNISH', 0x91),
('LANG_WELSH', 0x92),
('LANG_BRETON', 0x93) ]
LANG = dict(lang+[(e[1], e[0]) for e in lang])
# Sublanguage definitions
sublang = [
('SUBLANG_NEUTRAL', 0x00),
('SUBLANG_DEFAULT', 0x01),
('SUBLANG_SYS_DEFAULT', 0x02),
('SUBLANG_ARABIC_SAUDI_ARABIA', 0x01),
('SUBLANG_ARABIC_IRAQ', 0x02),
('SUBLANG_ARABIC_EGYPT', 0x03),
('SUBLANG_ARABIC_LIBYA', 0x04),
('SUBLANG_ARABIC_ALGERIA', 0x05),
('SUBLANG_ARABIC_MOROCCO', 0x06),
('SUBLANG_ARABIC_TUNISIA', 0x07),
('SUBLANG_ARABIC_OMAN', 0x08),
('SUBLANG_ARABIC_YEMEN', 0x09),
('SUBLANG_ARABIC_SYRIA', 0x0a),
('SUBLANG_ARABIC_JORDAN', 0x0b),
('SUBLANG_ARABIC_LEBANON', 0x0c),
('SUBLANG_ARABIC_KUWAIT', 0x0d),
('SUBLANG_ARABIC_UAE', 0x0e),
('SUBLANG_ARABIC_BAHRAIN', 0x0f),
('SUBLANG_ARABIC_QATAR', 0x10),
('SUBLANG_AZERI_LATIN', 0x01),
('SUBLANG_AZERI_CYRILLIC', 0x02),
('SUBLANG_CHINESE_TRADITIONAL', 0x01),
('SUBLANG_CHINESE_SIMPLIFIED', 0x02),
('SUBLANG_CHINESE_HONGKONG', 0x03),
('SUBLANG_CHINESE_SINGAPORE', 0x04),
('SUBLANG_CHINESE_MACAU', 0x05),
('SUBLANG_DUTCH', 0x01),
('SUBLANG_DUTCH_BELGIAN', 0x02),
('SUBLANG_ENGLISH_US', 0x01),
('SUBLANG_ENGLISH_UK', 0x02),
('SUBLANG_ENGLISH_AUS', 0x03),
('SUBLANG_ENGLISH_CAN', 0x04),
('SUBLANG_ENGLISH_NZ', 0x05),
('SUBLANG_ENGLISH_EIRE', 0x06),
('SUBLANG_ENGLISH_SOUTH_AFRICA', 0x07),
('SUBLANG_ENGLISH_JAMAICA', 0x08),
('SUBLANG_ENGLISH_CARIBBEAN', 0x09),
('SUBLANG_ENGLISH_BELIZE', 0x0a),
('SUBLANG_ENGLISH_TRINIDAD', 0x0b),
('SUBLANG_ENGLISH_ZIMBABWE', 0x0c),
('SUBLANG_ENGLISH_PHILIPPINES', 0x0d),
('SUBLANG_FRENCH', 0x01),
('SUBLANG_FRENCH_BELGIAN', 0x02),
('SUBLANG_FRENCH_CANADIAN', 0x03),
('SUBLANG_FRENCH_SWISS', 0x04),
('SUBLANG_FRENCH_LUXEMBOURG', 0x05),
('SUBLANG_FRENCH_MONACO', 0x06),
('SUBLANG_GERMAN', 0x01),
('SUBLANG_GERMAN_SWISS', 0x02),
('SUBLANG_GERMAN_AUSTRIAN', 0x03),
('SUBLANG_GERMAN_LUXEMBOURG', 0x04),
('SUBLANG_GERMAN_LIECHTENSTEIN', 0x05),
('SUBLANG_ITALIAN', 0x01),
('SUBLANG_ITALIAN_SWISS', 0x02),
('SUBLANG_KASHMIRI_SASIA', 0x02),
('SUBLANG_KASHMIRI_INDIA', 0x02),
('SUBLANG_KOREAN', 0x01),
('SUBLANG_LITHUANIAN', 0x01),
('SUBLANG_MALAY_MALAYSIA', 0x01),
('SUBLANG_MALAY_BRUNEI_DARUSSALAM', 0x02),
('SUBLANG_NEPALI_INDIA', 0x02),
('SUBLANG_NORWEGIAN_BOKMAL', 0x01),
('SUBLANG_NORWEGIAN_NYNORSK', 0x02),
('SUBLANG_PORTUGUESE', 0x02),
('SUBLANG_PORTUGUESE_BRAZILIAN', 0x01),
('SUBLANG_SERBIAN_LATIN', 0x02),
('SUBLANG_SERBIAN_CYRILLIC', 0x03),
('SUBLANG_SPANISH', 0x01),
('SUBLANG_SPANISH_MEXICAN', 0x02),
('SUBLANG_SPANISH_MODERN', 0x03),
('SUBLANG_SPANISH_GUATEMALA', 0x04),
('SUBLANG_SPANISH_COSTA_RICA', 0x05),
('SUBLANG_SPANISH_PANAMA', 0x06),
('SUBLANG_SPANISH_DOMINICAN_REPUBLIC', 0x07),
('SUBLANG_SPANISH_VENEZUELA', 0x08),
('SUBLANG_SPANISH_COLOMBIA', 0x09),
('SUBLANG_SPANISH_PERU', 0x0a),
('SUBLANG_SPANISH_ARGENTINA', 0x0b),
('SUBLANG_SPANISH_ECUADOR', 0x0c),
('SUBLANG_SPANISH_CHILE', 0x0d),
('SUBLANG_SPANISH_URUGUAY', 0x0e),
('SUBLANG_SPANISH_PARAGUAY', 0x0f),
('SUBLANG_SPANISH_BOLIVIA', 0x10),
('SUBLANG_SPANISH_EL_SALVADOR', 0x11),
('SUBLANG_SPANISH_HONDURAS', 0x12),
('SUBLANG_SPANISH_NICARAGUA', 0x13),
('SUBLANG_SPANISH_PUERTO_RICO', 0x14),
('SUBLANG_SWEDISH', 0x01),
('SUBLANG_SWEDISH_FINLAND', 0x02),
('SUBLANG_URDU_PAKISTAN', 0x01),
('SUBLANG_URDU_INDIA', 0x02),
('SUBLANG_UZBEK_LATIN', 0x01),
('SUBLANG_UZBEK_CYRILLIC', 0x02),
('SUBLANG_DUTCH_SURINAM', 0x03),
('SUBLANG_ROMANIAN', 0x01),
('SUBLANG_ROMANIAN_MOLDAVIA', 0x02),
('SUBLANG_RUSSIAN', 0x01),
('SUBLANG_RUSSIAN_MOLDAVIA', 0x02),
('SUBLANG_CROATIAN', 0x01),
('SUBLANG_LITHUANIAN_CLASSIC', 0x02),
('SUBLANG_GAELIC', 0x01),
('SUBLANG_GAELIC_SCOTTISH', 0x02),
('SUBLANG_GAELIC_MANX', 0x03) ]
SUBLANG = dict(sublang+[(e[1], e[0]) for e in sublang])
# Initialize the dictionary with all the name->value pairs
SUBLANG = dict( sublang )
# Now add all the value->name information, handling duplicates appropriately
for sublang_name, sublang_value in sublang:
if sublang_value in SUBLANG:
SUBLANG[ sublang_value ].append( sublang_name )
else:
SUBLANG[ sublang_value ] = [ sublang_name ]
# Resolve a sublang name given the main lang name
#
def get_sublang_name_for_lang( lang_value, sublang_value ):
lang_name = LANG.get(lang_value, '*unknown*')
for sublang_name in SUBLANG.get(sublang_value, list()):
# if the main language is a substring of sublang's name, then
# return that
if lang_name in sublang_name:
return sublang_name
# otherwise return the first sublang name
return SUBLANG.get(sublang_value, ['*unknown*'])[0]
# Ange Albertini's code to process resources' strings
#
def parse_strings(data, counter, l):
i = 0
error_count = 0
while i < len(data):
data_slice = data[i:i + 2]
if len(data_slice) < 2:
break
len_ = struct.unpack("<h", data_slice)[0]
i += 2
if len_ != 0 and 0 <= len_*2 <= len(data):
try:
l[counter] = b(data[i: i + len_ * 2]).decode('utf-16le')
except UnicodeDecodeError:
error_count += 1
pass
if error_count >= 3:
break
i += len_ * 2
counter += 1
def retrieve_flags(flag_dict, flag_filter):
"""Read the flags from a dictionary and return them in a usable form.
Will return a list of (flag, value) for all flags in "flag_dict"
matching the filter "flag_filter".
"""
return [(f[0], f[1]) for f in list(flag_dict.items()) if
isinstance(f[0], (str, bytes)) and f[0].startswith(flag_filter)]
def set_flags(obj, flag_field, flags):
"""Will process the flags and set attributes in the object accordingly.
The object "obj" will gain attributes named after the flags provided in
"flags" and valued True/False, matching the results of applying each
flag value from "flags" to flag_field.
"""
for flag in flags:
if flag[1] & flag_field:
#setattr(obj, flag[0], True)
obj.__dict__[flag[0]] = True
else:
#setattr(obj, flag[0], False)
obj.__dict__[flag[0]] = False
def power_of_two(val):
return val != 0 and (val & (val-1)) == 0
# These come from the great article[1] which contains great insights on
# working with unicode in both Python 2 and 3.
# [1]: http://python3porting.com/problems.html
if not PY3:
def handler(err):
start = err.start
end = err.end
values = [
('\\u{0:04x}' if ord(err.object[i]) > 255 else '\\x{0:02x}',
ord(err.object[i])) for i in range(start,end)]
return (
u"".join([elm[0].format(elm[1]) for elm in values]),
end)
import codecs
codecs.register_error('backslashreplace_', handler)
def b(x):
return x
else:
import codecs
codecs.register_error('backslashreplace_', codecs.lookup_error('backslashreplace'))
def b(x):
if isinstance(x, (bytes, bytearray)):
return bytes(x)
return codecs.encode(x, 'cp1252')
FILE_ALIGNMENT_HARDCODED_VALUE = 0x200
FileAlignment_Warning = False # We only want to print the warning once
SectionAlignment_Warning = False # We only want to print the warning once
class UnicodeStringWrapperPostProcessor(object):
"""This class attempts to help the process of identifying strings
that might be plain Unicode or Pascal. A list of strings will be
wrapped on it with the hope the overlappings will help make the
decision about their type."""
def __init__(self, pe, rva_ptr):
self.pe = pe
self.rva_ptr = rva_ptr
self.string = None
def get_rva(self):
"""Get the RVA of the string."""
return self.rva_ptr
def __str__(self):
"""Return the escaped UTF-8 representation of the string."""
return self.decode('utf-8', 'backslashreplace_')
def decode(self, *args):
if not self.string:
return ''
return self.string.decode(*args)
def invalidate(self):
"""Make this instance None, to express it's no known string type."""
self = None
def render_pascal_16(self):
self.string = self.pe.get_string_u_at_rva(
self.rva_ptr+2,
max_length=self.get_pascal_16_length())
def get_pascal_16_length(self):
return self.__get_word_value_at_rva(self.rva_ptr)
def __get_word_value_at_rva(self, rva):
try:
data = self.pe.get_data(self.rva_ptr, 2)
except PEFormatError as e:
return False
if len(data)<2:
return False
return struct.unpack('<H', data)[0]
def ask_unicode_16(self, next_rva_ptr):
"""The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
to see if there's a Unicode NULL character there.
"""
if self.__get_word_value_at_rva(next_rva_ptr-2) == 0:
self.length = next_rva_ptr - self.rva_ptr
return True
return False
def render_unicode_16(self):
self.string = self.pe.get_string_u_at_rva(self.rva_ptr)
class PEFormatError(Exception):
"""Generic PE format error exception."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Dump(object):
"""Convenience class for dumping the PE information."""
def __init__(self):
self.text = list()
def add_lines(self, txt, indent=0):
"""Adds a list of lines.
The list can be indented with the optional argument 'indent'.
"""
for line in txt:
self.add_line(line, indent)
def add_line(self, txt, indent=0):
"""Adds a line.
The line can be indented with the optional argument 'indent'.
"""
self.add(txt+'\n', indent)
def add(self, txt, indent=0):
"""Adds some text, no newline will be appended.
The text can be indented with the optional argument 'indent'.
"""
self.text.append(u'{0}{1}'.format(' '*indent, txt))
def add_header(self, txt):
"""Adds a header element."""
self.add_line('{0}{1}{0}\n'.format('-'*10, txt))
def add_newline(self):
"""Adds a newline."""
self.text.append('\n')
def get_text(self):
"""Get the text in its current state."""
return u''.join(u'{0}'.format(b) for b in self.text)
STRUCT_SIZEOF_TYPES = {
'x': 1, 'c': 1, 'b': 1, 'B': 1,
'h': 2, 'H': 2,
'i': 4, 'I': 4, 'l': 4, 'L': 4, 'f': 4,
'q': 8, 'Q': 8, 'd': 8,
's': 1 }
class Structure(object):
"""Prepare structure object to extract members from data.
Format is a list containing definitions for the elements
of the structure.
"""
def __init__(self, format, name=None, file_offset=None):
# Format is forced little endian, for big endian non Intel platforms
self.__format__ = '<'
self.__keys__ = []
self.__format_length__ = 0
self.__field_offsets__ = dict()
self.__unpacked_data_elms__ = []
self.__set_format__(format[1])
self.__all_zeroes__ = False
self.__file_offset__ = file_offset
if name:
self.name = name
else:
self.name = format[0]
def __get_format__(self):
return self.__format__
def get_field_absolute_offset(self, field_name):
"""Return the offset within the field for the requested field in the structure."""
return self.__file_offset__ + self.__field_offsets__[field_name]
def get_field_relative_offset(self, field_name):
"""Return the offset within the structure for the requested field."""
return self.__field_offsets__[field_name]
def get_file_offset(self):
return self.__file_offset__
def set_file_offset(self, offset):
self.__file_offset__ = offset
def all_zeroes(self):
"""Returns true is the unpacked data is all zeros."""
return self.__all_zeroes__
def sizeof_type(self, t):
count = 1
_t = t
if t[0] in string.digits:
# extract the count
count = int( ''.join([d for d in t if d in string.digits]) )
_t = ''.join([d for d in t if d not in string.digits])
return STRUCT_SIZEOF_TYPES[_t] * count
def __set_format__(self, format):
offset = 0
for elm in format:
if ',' in elm:
elm_type, elm_name = elm.split(',', 1)
self.__format__ += elm_type
self.__unpacked_data_elms__.append(None)
elm_names = elm_name.split(',')
names = []
for elm_name in elm_names:
if elm_name in self.__keys__:
search_list = [x[:len(elm_name)] for x in self.__keys__]
occ_count = search_list.count(elm_name)
elm_name = '{0}_{1:d}'.format(elm_name, occ_count)
names.append(elm_name)
self.__field_offsets__[elm_name] = offset
offset += self.sizeof_type(elm_type)
# Some PE header structures have unions on them, so a certain
# value might have different names, so each key has a list of
# all the possible members referring to the data.
self.__keys__.append(names)
self.__format_length__ = struct.calcsize(self.__format__)
def sizeof(self):
"""Return size of the structure."""
return self.__format_length__
def __unpack__(self, data):
data = b(data)
if len(data) > self.__format_length__:
data = data[:self.__format_length__]
# OC Patch:
# Some malware have incorrect header lengths.
# Fail gracefully if this occurs
# Buggy malware: a29b0118af8b7408444df81701ad5a7f
#
elif len(data) < self.__format_length__:
raise PEFormatError('Data length less than expected header length.')
if count_zeroes(data) == len(data):
self.__all_zeroes__ = True
self.__unpacked_data_elms__ = struct.unpack(self.__format__, data)
for i in range(len(self.__unpacked_data_elms__)):
for key in self.__keys__[i]:
setattr(self, key, self.__unpacked_data_elms__[i])
def __pack__(self):
new_values = []
for i in range(len(self.__unpacked_data_elms__)):
for key in self.__keys__[i]:
new_val = getattr(self, key)
old_val = self.__unpacked_data_elms__[i]
# In the case of Unions, when the first changed value
# is picked the loop is exited
if new_val != old_val:
break
new_values.append(new_val)
return struct.pack(self.__format__, *new_values)
def __str__(self):
return '\n'.join( self.dump() )
def __repr__(self):
return '<Structure: %s>' % (' '.join( [' '.join(s.split()) for s in self.dump()] ))
def dump(self, indentation=0):
"""Returns a string representation of the structure."""
dump = []
dump.append('[{0}]'.format(self.name))
printable_bytes = [ord(i) for i in string.printable if i not in string.whitespace]
# Refer to the __set_format__ method for an explanation
# of the following construct.
for keys in self.__keys__:
for key in keys:
val = getattr(self, key)
if isinstance(val, (int, long)):
val_str = '0x%-8X' % (val)
if key == 'TimeDateStamp' or key == 'dwTimeStamp':
try:
val_str += ' [%s UTC]' % time.asctime(time.gmtime(val))
except ValueError as e:
val_str += ' [INVALID TIME]'
else:
val_str = bytearray(val)
val_str = ''.join(
[chr(i) if (i in printable_bytes) else
'\\x{0:02x}'.format(i) for i in val_str.rstrip(b'\x00')])
dump.append('0x%-8X 0x%-3X %-30s %s' % (
self.__field_offsets__[key] + self.__file_offset__,
self.__field_offsets__[key], key+':', val_str))
return dump
def dump_dict(self):
"""Returns a dictionary representation of the structure."""
dump_dict = dict()
dump_dict['Structure'] = self.name
# Refer to the __set_format__ method for an explanation
# of the following construct.
for keys in self.__keys__:
for key in keys:
val = getattr(self, key)
if isinstance(val, (int, long)):
if key == 'TimeDateStamp' or key == 'dwTimeStamp':
try:
val = '0x%-8X [%s UTC]' % (val, time.asctime(time.gmtime(val)))
except ValueError as e:
val = '0x%-8X [INVALID TIME]' % val
else:
val = ''.join(chr(d) if chr(d) in string.printable
else "\\x%02x" % d for d in
[ord(c) if not isinstance(c, int) else c for c in val])
dump_dict[key] = {'FileOffset': self.__field_offsets__[key] + self.__file_offset__,
'Offset': self.__field_offsets__[key],
'Value': val}
return dump_dict
class SectionStructure(Structure):
"""Convenience section handling class."""
def __init__(self, *argl, **argd):
if 'pe' in argd:
self.pe = argd['pe']
del argd['pe']
Structure.__init__(self, *argl, **argd)
def get_data(self, start=None, length=None):
"""Get data chunk from a section.
Allows to query data from the section by passing the
addresses where the PE file would be loaded by default.
It is then possible to retrieve code and data by its real
addresses as it would be if loaded.
Returns bytes() under Python 3.x and set() under 2.7
"""
PointerToRawData_adj = self.pe.adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment )
VirtualAddress_adj = self.pe.adjust_SectionAlignment( self.VirtualAddress,
self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment )
if start is None:
offset = PointerToRawData_adj
else:
offset = ( start - VirtualAddress_adj ) + PointerToRawData_adj
if length is not None:
end = offset + length
else:
end = offset + self.SizeOfRawData
# PointerToRawData is not adjusted here as we might want to read any possible extra bytes
# that might get cut off by aligning the start (and hence cutting something off the end)
#
if end > self.PointerToRawData + self.SizeOfRawData:
end = self.PointerToRawData + self.SizeOfRawData
return self.pe.__data__[offset:end]
def __setattr__(self, name, val):
if name == 'Characteristics':
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
# Set the section's flags according to the Characteristics member
set_flags(self, val, section_flags)
elif 'IMAGE_SCN_' in name and hasattr(self, name):
if val:
self.__dict__['Characteristics'] |= SECTION_CHARACTERISTICS[name]
else:
self.__dict__['Characteristics'] ^= SECTION_CHARACTERISTICS[name]
self.__dict__[name] = val
def get_rva_from_offset(self, offset):
return offset - self.pe.adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment ) + self.pe.adjust_SectionAlignment( self.VirtualAddress,
self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment )
def get_offset_from_rva(self, rva):
return (rva -
self.pe.adjust_SectionAlignment(
self.VirtualAddress,
self.pe.OPTIONAL_HEADER.SectionAlignment,
self.pe.OPTIONAL_HEADER.FileAlignment )
) + self.pe.adjust_FileAlignment(
self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment )
def contains_offset(self, offset):
"""Check whether the section contains the file offset provided."""
if self.PointerToRawData is None:
# bss and other sections containing only uninitialized data must have 0
# and do not take space in the file
return False
return ( self.pe.adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment ) <=
offset <
self.pe.adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment ) +
self.SizeOfRawData )
def contains_rva(self, rva):
"""Check whether the section contains the address provided."""
# Check if the SizeOfRawData is realistic. If it's bigger than the size of
# the whole PE file minus the start address of the section it could be
# either truncated or the SizeOfRawData contain a misleading value.
# In either of those cases we take the VirtualSize
#
if len(self.pe.__data__) - self.pe.adjust_FileAlignment( self.PointerToRawData,
self.pe.OPTIONAL_HEADER.FileAlignment ) < self.SizeOfRawData:
# PECOFF documentation v8 says:
# VirtualSize: The total size of the section when loaded into memory.
# If this value is greater than SizeOfRawData, the section is zero-padded.
# This field is valid only for executable images and should be set to zero
# for object files.
#
size = self.Misc_VirtualSize
else:
size = max(self.SizeOfRawData, self.Misc_VirtualSize)
VirtualAddress_adj = self.pe.adjust_SectionAlignment( self.VirtualAddress,
self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment )
# Check whether there's any section after the current one that starts before the
# calculated end for the current one, if so, cut the current section's size
# to fit in the range up to where the next section starts.
if (self.next_section_virtual_address is not None and
self.next_section_virtual_address > self.VirtualAddress and
VirtualAddress_adj + size > self.next_section_virtual_address):
size = self.next_section_virtual_address - VirtualAddress_adj
return VirtualAddress_adj <= rva < VirtualAddress_adj + size
def contains(self, rva):
#print "DEPRECATION WARNING: you should use contains_rva() instead of contains()"
return self.contains_rva(rva)
def get_entropy(self):
"""Calculate and return the entropy for the section."""
return self.entropy_H( self.get_data() )
def get_hash_sha1(self):
"""Get the SHA-1 hex-digest of the section's data."""
if sha1 is not None:
return sha1( self.get_data() ).hexdigest()
def get_hash_sha256(self):
"""Get the SHA-256 hex-digest of the section's data."""
if sha256 is not None:
return sha256( self.get_data() ).hexdigest()
def get_hash_sha512(self):
"""Get the SHA-512 hex-digest of the section's data."""
if sha512 is not None:
return sha512( self.get_data() ).hexdigest()
def get_hash_md5(self):
"""Get the MD5 hex-digest of the section's data."""
if md5 is not None:
return md5( self.get_data() ).hexdigest()
def entropy_H(self, data):
"""Calculate the entropy of a chunk of data."""
if len(data) == 0:
return 0.0
occurences = Counter(bytearray(data))
entropy = 0
for x in occurences.values():
p_x = float(x) / len(data)
entropy -= p_x*math.log(p_x, 2)
return entropy
class DataContainer(object):
"""Generic data container."""
def __init__(self, **args):
bare_setattr = super(DataContainer, self).__setattr__
for key, value in list(args.items()):
bare_setattr(key, value)
class ImportDescData(DataContainer):
"""Holds import descriptor information.
dll: name of the imported DLL
imports: list of imported symbols (ImportData instances)
struct: IMAGE_IMPORT_DESCRIPTOR structure
"""
class ImportData(DataContainer):
"""Holds imported symbol's information.
ordinal: Ordinal of the symbol
name: Name of the symbol
bound: If the symbol is bound, this contains
the address.
"""
def __setattr__(self, name, val):
# If the instance doesn't yet have an ordinal attribute
# it's not fully initialized so can't do any of the
# following
#
if hasattr(self, 'ordinal') and hasattr(self, 'bound') and hasattr(self, 'name'):
if name == 'ordinal':
if self.pe.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
ordinal_flag = IMAGE_ORDINAL_FLAG
elif self.pe.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
ordinal_flag = IMAGE_ORDINAL_FLAG64
# Set the ordinal and flag the entry as importing by ordinal
self.struct_table.Ordinal = ordinal_flag | (val & 0xffff)
self.struct_table.AddressOfData = self.struct_table.Ordinal
self.struct_table.Function = self.struct_table.Ordinal
self.struct_table.ForwarderString = self.struct_table.Ordinal
elif name == 'bound':
if self.struct_iat is not None:
self.struct_iat.AddressOfData = val
self.struct_iat.AddressOfData = self.struct_iat.AddressOfData
self.struct_iat.Function = self.struct_iat.AddressOfData
self.struct_iat.ForwarderString = self.struct_iat.AddressOfData
elif name == 'address':
self.struct_table.AddressOfData = val
self.struct_table.Ordinal = self.struct_table.AddressOfData
self.struct_table.Function = self.struct_table.AddressOfData
self.struct_table.ForwarderString = self.struct_table.AddressOfData
elif name == 'name':
# Make sure we reset the entry in case the import had been set to import by ordinal
if self.name_offset:
name_rva = self.pe.get_rva_from_offset( self.name_offset )
self.pe.set_dword_at_offset( self.ordinal_offset, (0<<31) | name_rva )
# Complain if the length of the new name is longer than the existing one
if len(val) > len(self.name):
#raise Exception('The export name provided is longer than the existing one.')
pass
self.pe.set_bytes_at_offset( self.name_offset, val )
self.__dict__[name] = val
class ExportDirData(DataContainer):
"""Holds export directory information.
struct: IMAGE_EXPORT_DIRECTORY structure
symbols: list of exported symbols (ExportData instances)
"""
class ExportData(DataContainer):
"""Holds exported symbols' information.
ordinal: ordinal of the symbol
address: address of the symbol
name: name of the symbol (None if the symbol is
exported by ordinal only)
forwarder: if the symbol is forwarded it will
contain the name of the target symbol,
None otherwise.
"""
def __setattr__(self, name, val):
# If the instance doesn't yet have an ordinal attribute
# it's not fully initialized so can't do any of the
# following
#
if hasattr(self, 'ordinal') and hasattr(self, 'address') and hasattr(self, 'forwarder') and hasattr(self, 'name'):
if name == 'ordinal':
self.pe.set_word_at_offset( self.ordinal_offset, val )
elif name == 'address':
self.pe.set_dword_at_offset( self.address_offset, val )
elif name == 'name':
# Complain if the length of the new name is longer than the existing one
if len(val) > len(self.name):
#raise Exception('The export name provided is longer than the existing one.')
pass
self.pe.set_bytes_at_offset( self.name_offset, val )
elif name == 'forwarder':
# Complain if the length of the new name is longer than the existing one
if len(val) > len(self.forwarder):
#raise Exception('The forwarder name provided is longer than the existing one.')
pass
self.pe.set_bytes_at_offset( self.forwarder_offset, val )
self.__dict__[name] = val
class ResourceDirData(DataContainer):
"""Holds resource directory information.
struct: IMAGE_RESOURCE_DIRECTORY structure
entries: list of entries (ResourceDirEntryData instances)
"""
class ResourceDirEntryData(DataContainer):
"""Holds resource directory entry data.
struct: IMAGE_RESOURCE_DIRECTORY_ENTRY structure
name: If the resource is identified by name this
attribute will contain the name string. None
otherwise. If identified by id, the id is
available at 'struct.Id'
id: the id, also in struct.Id
directory: If this entry has a lower level directory
this attribute will point to the
ResourceDirData instance representing it.
data: If this entry has no further lower directories
and points to the actual resource data, this
attribute will reference the corresponding
ResourceDataEntryData instance.
(Either of the 'directory' or 'data' attribute will exist,
but not both.)
"""
class ResourceDataEntryData(DataContainer):
"""Holds resource data entry information.
struct: IMAGE_RESOURCE_DATA_ENTRY structure
lang: Primary language ID
sublang: Sublanguage ID
"""
class DebugData(DataContainer):
"""Holds debug information.
struct: IMAGE_DEBUG_DIRECTORY structure
entries: list of entries (IMAGE_DEBUG_TYPE instances)
"""
class BaseRelocationData(DataContainer):
"""Holds base relocation information.
struct: IMAGE_BASE_RELOCATION structure
entries: list of relocation data (RelocationData instances)
"""
class RelocationData(DataContainer):
"""Holds relocation information.
type: Type of relocation
The type string is can be obtained by
RELOCATION_TYPE[type]
rva: RVA of the relocation
"""
def __setattr__(self, name, val):
# If the instance doesn't yet have a struct attribute
# it's not fully initialized so can't do any of the
# following
#
if hasattr(self, 'struct'):
# Get the word containing the type and data
#
word = self.struct.Data
if name == 'type':
word = (val << 12) | (word & 0xfff)
elif name == 'rva':
offset = val-self.base_rva
if offset < 0:
offset = 0
word = ( word & 0xf000) | ( offset & 0xfff)
# Store the modified data
#
self.struct.Data = word
self.__dict__[name] = val
class TlsData(DataContainer):
"""Holds TLS information.
struct: IMAGE_TLS_DIRECTORY structure
"""
class BoundImportDescData(DataContainer):
"""Holds bound import descriptor data.
This directory entry will provide with information on the
DLLs this PE files has been bound to (if bound at all).
The structure will contain the name and timestamp of the
DLL at the time of binding so that the loader can know
whether it differs from the one currently present in the
system and must, therefore, re-bind the PE's imports.
struct: IMAGE_BOUND_IMPORT_DESCRIPTOR structure
name: DLL name
entries: list of entries (BoundImportRefData instances)
the entries will exist if this DLL has forwarded
symbols. If so, the destination DLL will have an
entry in this list.
"""
class LoadConfigData(DataContainer):
"""Holds Load Config data.
struct: IMAGE_LOAD_CONFIG_DIRECTORY structure
name: dll name
"""
class BoundImportRefData(DataContainer):
"""Holds bound import forwarder reference data.
Contains the same information as the bound descriptor but
for forwarded DLLs, if any.
struct: IMAGE_BOUND_FORWARDER_REF structure
name: dll name
"""
# Valid FAT32 8.3 short filename characters according to:
# http://en.wikipedia.org/wiki/8.3_filename
# This will help decide whether DLL ASCII names are likely
# to be valid or otherwise corrupt data
#
# The filename length is not checked because the DLLs filename
# can be longer that the 8.3
if PY3:
allowed_filename = b(
string.ascii_lowercase + string.ascii_uppercase +
string.digits + "!#$%&'()-@^_`{}~+,.;=[]")
else: # Python 2.x
allowed_filename = b(
string.lowercase + string.uppercase + string.digits +
b"!#$%&'()-@^_`{}~+,.;=[]")
def is_valid_dos_filename(s):
if s is None or not isinstance(s, (str, bytes, bytearray)):
return False
# Allow path separators as import names can contain directories.
allowed = allowed_filename + b'\\/'
for c in set(s):
if c not in allowed:
return False
return True
# Check if a imported name uses the valid accepted characters expected in mangled
# function names. If the symbol's characters don't fall within this charset
# we will assume the name is invalid
#
if PY3:
allowed_function_name = b(
string.ascii_lowercase + string.ascii_uppercase +
string.digits + '_?@$()<>')
else:
allowed_function_name = b(
string.lowercase + string.uppercase +
string.digits + b'_?@$()<>')
def is_valid_function_name(s):
if s is None or not isinstance(s, (str, bytes, bytearray)):
return False
for c in set(s):
if c not in allowed_function_name:
return False
return True
class PE(object):
"""A Portable Executable representation.
This class provides access to most of the information in a PE file.
It expects to be supplied the name of the file to load or PE data
to process and an optional argument 'fast_load' (False by default)
which controls whether to load all the directories information,
which can be quite time consuming.
pe = pefile.PE('module.dll')
pe = pefile.PE(name='module.dll')
would load 'module.dll' and process it. If the data would be already
available in a buffer the same could be achieved with:
pe = pefile.PE(data=module_dll_data)
The "fast_load" can be set to a default by setting its value in the
module itself by means, for instance, of a "pefile.fast_load = True".
That will make all the subsequent instances not to load the
whole PE structure. The "full_load" method can be used to parse
the missing data at a later stage.
Basic headers information will be available in the attributes:
DOS_HEADER
NT_HEADERS
FILE_HEADER
OPTIONAL_HEADER
All of them will contain among their attributes the members of the
corresponding structures as defined in WINNT.H
The raw data corresponding to the header (from the beginning of the
file up to the start of the first section) will be available in the
instance's attribute 'header' as a string.
The sections will be available as a list in the 'sections' attribute.
Each entry will contain as attributes all the structure's members.
Directory entries will be available as attributes (if they exist):
(no other entries are processed at this point)
DIRECTORY_ENTRY_IMPORT (list of ImportDescData instances)
DIRECTORY_ENTRY_EXPORT (ExportDirData instance)
DIRECTORY_ENTRY_RESOURCE (ResourceDirData instance)
DIRECTORY_ENTRY_DEBUG (list of DebugData instances)
DIRECTORY_ENTRY_BASERELOC (list of BaseRelocationData instances)
DIRECTORY_ENTRY_TLS
DIRECTORY_ENTRY_BOUND_IMPORT (list of BoundImportData instances)
The following dictionary attributes provide ways of mapping different
constants. They will accept the numeric value and return the string
representation and the opposite, feed in the string and get the
numeric constant:
DIRECTORY_ENTRY
IMAGE_CHARACTERISTICS
SECTION_CHARACTERISTICS
DEBUG_TYPE
SUBSYSTEM_TYPE
MACHINE_TYPE
RELOCATION_TYPE
RESOURCE_TYPE
LANG
SUBLANG
"""
#
# Format specifications for PE structures.
#
__IMAGE_DOS_HEADER_format__ = ('IMAGE_DOS_HEADER',
('H,e_magic', 'H,e_cblp', 'H,e_cp',
'H,e_crlc', 'H,e_cparhdr', 'H,e_minalloc',
'H,e_maxalloc', 'H,e_ss', 'H,e_sp', 'H,e_csum',
'H,e_ip', 'H,e_cs', 'H,e_lfarlc', 'H,e_ovno', '8s,e_res',
'H,e_oemid', 'H,e_oeminfo', '20s,e_res2',
'I,e_lfanew'))
__IMAGE_FILE_HEADER_format__ = ('IMAGE_FILE_HEADER',
('H,Machine', 'H,NumberOfSections',
'I,TimeDateStamp', 'I,PointerToSymbolTable',
'I,NumberOfSymbols', 'H,SizeOfOptionalHeader',
'H,Characteristics'))
__IMAGE_DATA_DIRECTORY_format__ = ('IMAGE_DATA_DIRECTORY',
('I,VirtualAddress', 'I,Size'))
__IMAGE_OPTIONAL_HEADER_format__ = ('IMAGE_OPTIONAL_HEADER',
('H,Magic', 'B,MajorLinkerVersion',
'B,MinorLinkerVersion', 'I,SizeOfCode',
'I,SizeOfInitializedData', 'I,SizeOfUninitializedData',
'I,AddressOfEntryPoint', 'I,BaseOfCode', 'I,BaseOfData',
'I,ImageBase', 'I,SectionAlignment', 'I,FileAlignment',
'H,MajorOperatingSystemVersion', 'H,MinorOperatingSystemVersion',
'H,MajorImageVersion', 'H,MinorImageVersion',
'H,MajorSubsystemVersion', 'H,MinorSubsystemVersion',
'I,Reserved1', 'I,SizeOfImage', 'I,SizeOfHeaders',
'I,CheckSum', 'H,Subsystem', 'H,DllCharacteristics',
'I,SizeOfStackReserve', 'I,SizeOfStackCommit',
'I,SizeOfHeapReserve', 'I,SizeOfHeapCommit',
'I,LoaderFlags', 'I,NumberOfRvaAndSizes' ))
__IMAGE_OPTIONAL_HEADER64_format__ = ('IMAGE_OPTIONAL_HEADER64',
('H,Magic', 'B,MajorLinkerVersion',
'B,MinorLinkerVersion', 'I,SizeOfCode',
'I,SizeOfInitializedData', 'I,SizeOfUninitializedData',
'I,AddressOfEntryPoint', 'I,BaseOfCode',
'Q,ImageBase', 'I,SectionAlignment', 'I,FileAlignment',
'H,MajorOperatingSystemVersion', 'H,MinorOperatingSystemVersion',
'H,MajorImageVersion', 'H,MinorImageVersion',
'H,MajorSubsystemVersion', 'H,MinorSubsystemVersion',
'I,Reserved1', 'I,SizeOfImage', 'I,SizeOfHeaders',
'I,CheckSum', 'H,Subsystem', 'H,DllCharacteristics',
'Q,SizeOfStackReserve', 'Q,SizeOfStackCommit',
'Q,SizeOfHeapReserve', 'Q,SizeOfHeapCommit',
'I,LoaderFlags', 'I,NumberOfRvaAndSizes' ))
__IMAGE_NT_HEADERS_format__ = ('IMAGE_NT_HEADERS', ('I,Signature',))
__IMAGE_SECTION_HEADER_format__ = ('IMAGE_SECTION_HEADER',
('8s,Name', 'I,Misc,Misc_PhysicalAddress,Misc_VirtualSize',
'I,VirtualAddress', 'I,SizeOfRawData', 'I,PointerToRawData',
'I,PointerToRelocations', 'I,PointerToLinenumbers',
'H,NumberOfRelocations', 'H,NumberOfLinenumbers',
'I,Characteristics'))
__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__ = ('IMAGE_DELAY_IMPORT_DESCRIPTOR',
('I,grAttrs', 'I,szName', 'I,phmod', 'I,pIAT', 'I,pINT',
'I,pBoundIAT', 'I,pUnloadIAT', 'I,dwTimeStamp'))
__IMAGE_IMPORT_DESCRIPTOR_format__ = ('IMAGE_IMPORT_DESCRIPTOR',
('I,OriginalFirstThunk,Characteristics',
'I,TimeDateStamp', 'I,ForwarderChain', 'I,Name', 'I,FirstThunk'))
__IMAGE_EXPORT_DIRECTORY_format__ = ('IMAGE_EXPORT_DIRECTORY',
('I,Characteristics',
'I,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion', 'I,Name',
'I,Base', 'I,NumberOfFunctions', 'I,NumberOfNames',
'I,AddressOfFunctions', 'I,AddressOfNames', 'I,AddressOfNameOrdinals'))
__IMAGE_RESOURCE_DIRECTORY_format__ = ('IMAGE_RESOURCE_DIRECTORY',
('I,Characteristics',
'I,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion',
'H,NumberOfNamedEntries', 'H,NumberOfIdEntries'))
__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__ = ('IMAGE_RESOURCE_DIRECTORY_ENTRY',
('I,Name',
'I,OffsetToData'))
__IMAGE_RESOURCE_DATA_ENTRY_format__ = ('IMAGE_RESOURCE_DATA_ENTRY',
('I,OffsetToData', 'I,Size', 'I,CodePage', 'I,Reserved'))
__VS_VERSIONINFO_format__ = ( 'VS_VERSIONINFO',
('H,Length', 'H,ValueLength', 'H,Type' ))
__VS_FIXEDFILEINFO_format__ = ( 'VS_FIXEDFILEINFO',
('I,Signature', 'I,StrucVersion', 'I,FileVersionMS', 'I,FileVersionLS',
'I,ProductVersionMS', 'I,ProductVersionLS', 'I,FileFlagsMask', 'I,FileFlags',
'I,FileOS', 'I,FileType', 'I,FileSubtype', 'I,FileDateMS', 'I,FileDateLS'))
__StringFileInfo_format__ = ( 'StringFileInfo',
('H,Length', 'H,ValueLength', 'H,Type' ))
__StringTable_format__ = ( 'StringTable',
('H,Length', 'H,ValueLength', 'H,Type' ))
__String_format__ = ( 'String',
('H,Length', 'H,ValueLength', 'H,Type' ))
__Var_format__ = ( 'Var', ('H,Length', 'H,ValueLength', 'H,Type' ))
__IMAGE_THUNK_DATA_format__ = ('IMAGE_THUNK_DATA',
('I,ForwarderString,Function,Ordinal,AddressOfData',))
__IMAGE_THUNK_DATA64_format__ = ('IMAGE_THUNK_DATA',
('Q,ForwarderString,Function,Ordinal,AddressOfData',))
__IMAGE_DEBUG_DIRECTORY_format__ = ('IMAGE_DEBUG_DIRECTORY',
('I,Characteristics', 'I,TimeDateStamp', 'H,MajorVersion',
'H,MinorVersion', 'I,Type', 'I,SizeOfData', 'I,AddressOfRawData',
'I,PointerToRawData'))
__IMAGE_BASE_RELOCATION_format__ = ('IMAGE_BASE_RELOCATION',
('I,VirtualAddress', 'I,SizeOfBlock') )
__IMAGE_BASE_RELOCATION_ENTRY_format__ = ('IMAGE_BASE_RELOCATION_ENTRY',
('H,Data',) )
__IMAGE_TLS_DIRECTORY_format__ = ('IMAGE_TLS_DIRECTORY',
('I,StartAddressOfRawData', 'I,EndAddressOfRawData',
'I,AddressOfIndex', 'I,AddressOfCallBacks',
'I,SizeOfZeroFill', 'I,Characteristics' ) )
__IMAGE_TLS_DIRECTORY64_format__ = ('IMAGE_TLS_DIRECTORY',
('Q,StartAddressOfRawData', 'Q,EndAddressOfRawData',
'Q,AddressOfIndex', 'Q,AddressOfCallBacks',
'I,SizeOfZeroFill', 'I,Characteristics' ) )
__IMAGE_LOAD_CONFIG_DIRECTORY_format__ = ('IMAGE_LOAD_CONFIG_DIRECTORY',
('I,Size',
'I,TimeDateStamp',
'H,MajorVersion',
'H,MinorVersion',
'I,GlobalFlagsClear',
'I,GlobalFlagsSet',
'I,CriticalSectionDefaultTimeout',
'I,DeCommitFreeBlockThreshold',
'I,DeCommitTotalFreeThreshold',
'I,LockPrefixTable',
'I,MaximumAllocationSize',
'I,VirtualMemoryThreshold',
'I,ProcessHeapFlags',
'I,ProcessAffinityMask',
'H,CSDVersion',
'H,Reserved1',
'I,EditList',
'I,SecurityCookie',
'I,SEHandlerTable',
'I,SEHandlerCount',
'I,GuardCFCheckFunctionPointer',
'I,Reserved2',
'I,GuardCFFunctionTable',
'I,GuardCFFunctionCount',
'I,GuardFlags' ) )
__IMAGE_LOAD_CONFIG_DIRECTORY64_format__ = ('IMAGE_LOAD_CONFIG_DIRECTORY',
('I,Size',
'I,TimeDateStamp',
'H,MajorVersion',
'H,MinorVersion',
'I,GlobalFlagsClear',
'I,GlobalFlagsSet',
'I,CriticalSectionDefaultTimeout',
'Q,DeCommitFreeBlockThreshold',
'Q,DeCommitTotalFreeThreshold',
'Q,LockPrefixTable',
'Q,MaximumAllocationSize',
'Q,VirtualMemoryThreshold',
'Q,ProcessAffinityMask',
'I,ProcessHeapFlags',
'H,CSDVersion',
'H,Reserved1',
'Q,EditList',
'Q,SecurityCookie',
'Q,SEHandlerTable',
'Q,SEHandlerCount',
'Q,GuardCFCheckFunctionPointer',
'Q,Reserved2',
'Q,GuardCFFunctionTable',
'Q,GuardCFFunctionCount',
'I,GuardFlags' ) )
__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__ = ('IMAGE_BOUND_IMPORT_DESCRIPTOR',
('I,TimeDateStamp', 'H,OffsetModuleName', 'H,NumberOfModuleForwarderRefs'))
__IMAGE_BOUND_FORWARDER_REF_format__ = ('IMAGE_BOUND_FORWARDER_REF',
('I,TimeDateStamp', 'H,OffsetModuleName', 'H,Reserved') )
def __init__(self, name=None, data=None, fast_load=None, length=0):
self.sections = []
self.__warnings = []
self.PE_TYPE = None
if name is None and data is None:
raise ValueError('Must supply either name or data')
# This list will keep track of all the structures created.
# That will allow for an easy iteration through the list
# in order to save the modifications made
self.__structures__ = []
self.__from_file = None
if not fast_load:
fast_load = globals()['fast_load']
try:
self.__parse__(name, data, fast_load, length=length)
except:
self.close()
raise
def close(self):
if ( self.__from_file is True and hasattr(self, '__data__') and
((isinstance(mmap.mmap, type) and isinstance(self.__data__, mmap.mmap)) or
'mmap.mmap' in repr(type(self.__data__))) ):
self.__data__.close()
del self.__data__
def __unpack_data__(self, format, data, file_offset):
"""Apply structure format to raw data.
Returns and unpacked structure object if successful, None otherwise.
"""
structure = Structure(format, file_offset=file_offset)
try:
structure.__unpack__(data)
except PEFormatError as err:
self.__warnings.append(
'Corrupt header "{0}" at file offset {1}. Exception: {2}'.format(
format[0], file_offset, err) )
return None
self.__structures__.append(structure)
return structure
def __parse__(self, fname, data, fast_load, length=0):
"""Parse a Portable Executable file.
Loads a PE file, parsing all its structures and making them available
through the instance's attributes.
"""
if fname is not None:
stat = os.stat(fname)
if stat.st_size == 0:
raise PEFormatError('The file is empty')
fd = None
try:
fd = open(fname, 'rb')
self.fileno = fd.fileno()
if hasattr(mmap, 'MAP_PRIVATE'):
# Unix
self.__data__ = mmap.mmap(self.fileno, length, mmap.MAP_PRIVATE)
else:
# Windows
self.__data__ = mmap.mmap(self.fileno, length, access=mmap.ACCESS_READ)
self.__from_file = True
except IOError as excp:
exception_msg = '{0}'.format(excp)
if exception_msg:
exception_msg = ': %s' % exception_msg
raise Exception('Unable to access file \'{0}\'{1}'.format(fname, exception_msg))
finally:
if fd is not None:
fd.close()
elif data is not None:
self.__data__ = data
self.__from_file = False
if not fast_load:
for byte, byte_count in Counter(bytearray(self.__data__)).items():
# Only report the cases where a byte makes up for more than 50% (if
# zero) or 15% (if non-zero) of the file's contents. There are
# legitimate PEs where 0x00 bytes are close to 50% of the whole
# file's contents.
if (byte == 0 and 1.0 * byte_count / len(self.__data__) > 0.5) or (
byte != 0 and 1.0 * byte_count / len(self.__data__) > 0.15):
self.__warnings.append(
("Byte 0x{0:02x} makes up {1:.4f}% of the file's contents."
" This may indicate truncation / malformation.").format(
byte, 100.0 * byte_count / len(self.__data__)))
dos_header_data = self.__data__[:64]
if len(dos_header_data) != 64:
raise PEFormatError('Unable to read the DOS Header, possibly a truncated file.')
self.DOS_HEADER = self.__unpack_data__(
self.__IMAGE_DOS_HEADER_format__,
dos_header_data, file_offset=0)
if self.DOS_HEADER.e_magic == IMAGE_DOSZM_SIGNATURE:
raise PEFormatError('Probably a ZM Executable (not a PE file).')
if not self.DOS_HEADER or self.DOS_HEADER.e_magic != IMAGE_DOS_SIGNATURE:
raise PEFormatError('DOS Header magic not found.')
# OC Patch:
# Check for sane value in e_lfanew
#
if self.DOS_HEADER.e_lfanew > len(self.__data__):
raise PEFormatError('Invalid e_lfanew value, probably not a PE file')
nt_headers_offset = self.DOS_HEADER.e_lfanew
self.NT_HEADERS = self.__unpack_data__(
self.__IMAGE_NT_HEADERS_format__,
self.__data__[nt_headers_offset:nt_headers_offset+8],
file_offset = nt_headers_offset)
# We better check the signature right here, before the file screws
# around with sections:
# OC Patch:
# Some malware will cause the Signature value to not exist at all
if not self.NT_HEADERS or not self.NT_HEADERS.Signature:
raise PEFormatError('NT Headers not found.')
if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_NE_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature. Probably a NE file')
if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_LE_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature. Probably a LE file')
if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_LX_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature. Probably a LX file')
if (0xFFFF & self.NT_HEADERS.Signature) == IMAGE_TE_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature. Probably a TE file')
if self.NT_HEADERS.Signature != IMAGE_NT_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature.')
self.FILE_HEADER = self.__unpack_data__(
self.__IMAGE_FILE_HEADER_format__,
self.__data__[nt_headers_offset+4:nt_headers_offset+4+32],
file_offset = nt_headers_offset+4)
image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
if not self.FILE_HEADER:
raise PEFormatError('File Header missing')
# Set the image's flags according the the Characteristics member
set_flags(self.FILE_HEADER, self.FILE_HEADER.Characteristics, image_flags)
optional_header_offset = \
nt_headers_offset+4+self.FILE_HEADER.sizeof()
# Note: location of sections can be controlled from PE header:
sections_offset = optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER_format__,
# Read up to 256 bytes to allow creating a copy of too much data
self.__data__[optional_header_offset:optional_header_offset+256],
file_offset = optional_header_offset)
# According to solardesigner's findings for his
# Tiny PE project, the optional header does not
# need fields beyond "Subsystem" in order to be
# loadable by the Windows loader (given that zeros
# are acceptable values and the header is loaded
# in a zeroed memory page)
# If trying to parse a full Optional Header fails
# we try to parse it again with some 0 padding
#
MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69
if ( self.OPTIONAL_HEADER is None and
len(self.__data__[optional_header_offset:optional_header_offset+0x200])
>= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ):
# Add enough zeros to make up for the unused fields
#
padding_length = 128
# Create padding
#
padded_data = self.__data__[optional_header_offset:optional_header_offset+0x200] + (
b'\0' * padding_length)
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER_format__,
padded_data,
file_offset = optional_header_offset)
# Check the Magic in the OPTIONAL_HEADER and set the PE file
# type accordingly
#
if self.OPTIONAL_HEADER is not None:
if self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE:
self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE
elif self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE_PLUS:
self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE_PLUS
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER64_format__,
self.__data__[optional_header_offset:optional_header_offset+0x200],
file_offset = optional_header_offset)
# Again, as explained above, we try to parse
# a reduced form of the Optional Header which
# is still valid despite not including all
# structure members
#
MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69+4
if ( self.OPTIONAL_HEADER is None and
len(self.__data__[optional_header_offset:optional_header_offset+0x200])
>= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ):
padding_length = 128
padded_data = self.__data__[optional_header_offset:optional_header_offset+0x200] + (
b'\0' * padding_length)
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER64_format__,
padded_data,
file_offset = optional_header_offset)
if not self.FILE_HEADER:
raise PEFormatError('File Header missing')
# OC Patch:
# Die gracefully if there is no OPTIONAL_HEADER field
# 975440f5ad5e2e4a92c4d9a5f22f75c1
if self.OPTIONAL_HEADER is None:
raise PEFormatError("No Optional Header found, invalid PE32 or PE32+ file.")
if self.PE_TYPE is None:
self.__warnings.append(
"Invalid type 0x{0:04x} in Optional Header.".format(
self.OPTIONAL_HEADER.Magic))
dll_characteristics_flags = retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLLCHARACTERISTICS_')
# Set the Dll Characteristics flags according the the DllCharacteristics member
set_flags(
self.OPTIONAL_HEADER,
self.OPTIONAL_HEADER.DllCharacteristics,
dll_characteristics_flags)
self.OPTIONAL_HEADER.DATA_DIRECTORY = []
#offset = (optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader)
offset = (optional_header_offset + self.OPTIONAL_HEADER.sizeof())
self.NT_HEADERS.FILE_HEADER = self.FILE_HEADER
self.NT_HEADERS.OPTIONAL_HEADER = self.OPTIONAL_HEADER
# Windows 8 specific check
#
if self.OPTIONAL_HEADER.AddressOfEntryPoint < self.OPTIONAL_HEADER.SizeOfHeaders:
self.__warnings.append(
'SizeOfHeaders is smaller than AddressOfEntryPoint: this file cannot run under Windows 8.')
# The NumberOfRvaAndSizes is sanitized to stay within
# reasonable limits so can be casted to an int
#
if self.OPTIONAL_HEADER.NumberOfRvaAndSizes > 0x10:
self.__warnings.append(
'Suspicious NumberOfRvaAndSizes in the Optional Header. '
'Normal values are never larger than 0x10, the value is: 0x%x' %
self.OPTIONAL_HEADER.NumberOfRvaAndSizes )
MAX_ASSUMED_VALID_NUMBER_OF_RVA_AND_SIZES = 0x100
for i in range(int(0x7fffffff & self.OPTIONAL_HEADER.NumberOfRvaAndSizes)):
if len(self.__data__) - offset == 0:
break
if len(self.__data__) - offset < 8:
data = self.__data__[offset:] + b'\0'*8
else:
data = self.__data__[offset:offset+MAX_ASSUMED_VALID_NUMBER_OF_RVA_AND_SIZES]
dir_entry = self.__unpack_data__(
self.__IMAGE_DATA_DIRECTORY_format__,
data,
file_offset = offset)
if dir_entry is None:
break
# Would fail if missing an entry
# 1d4937b2fa4d84ad1bce0309857e70ca offending sample
try:
dir_entry.name = DIRECTORY_ENTRY[i]
except (KeyError, AttributeError):
break
offset += dir_entry.sizeof()
self.OPTIONAL_HEADER.DATA_DIRECTORY.append(dir_entry)
# If the offset goes outside the optional header,
# the loop is broken, regardless of how many directories
# NumberOfRvaAndSizes says there are
#
# We assume a normally sized optional header, hence that we do
# a sizeof() instead of reading SizeOfOptionalHeader.
# Then we add a default number of directories times their size,
# if we go beyond that, we assume the number of directories
# is wrong and stop processing
if offset >= (optional_header_offset +
self.OPTIONAL_HEADER.sizeof() + 8*16) :
break
offset = self.parse_sections(sections_offset)
# OC Patch:
# There could be a problem if there are no raw data sections
# greater than 0
# fc91013eb72529da005110a3403541b6 example
# Should this throw an exception in the minimum header offset
# can't be found?
#
rawDataPointers = [
self.adjust_FileAlignment( s.PointerToRawData,
self.OPTIONAL_HEADER.FileAlignment )
for s in self.sections if s.PointerToRawData>0 ]
if len(rawDataPointers) > 0:
lowest_section_offset = min(rawDataPointers)
else:
lowest_section_offset = None
if not lowest_section_offset or lowest_section_offset < offset:
self.header = self.__data__[:offset]
else:
self.header = self.__data__[:lowest_section_offset]
# Check whether the entry point lies within a section
#
if self.get_section_by_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint) is not None:
# Check whether the entry point lies within the file
#
ep_offset = self.get_offset_from_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint)
if ep_offset > len(self.__data__):
self.__warnings.append(
'Possibly corrupt file. AddressOfEntryPoint lies outside the file. '
'AddressOfEntryPoint: 0x%x' %
self.OPTIONAL_HEADER.AddressOfEntryPoint )
else:
self.__warnings.append(
'AddressOfEntryPoint lies outside the sections\' boundaries. '
'AddressOfEntryPoint: 0x%x' %
self.OPTIONAL_HEADER.AddressOfEntryPoint )
if not fast_load:
self.full_load()
def parse_rich_header(self):
"""Parses the rich header
see http://www.ntcore.com/files/richsign.htm for more information
Structure:
00 DanS ^ checksum, checksum, checksum, checksum
10 Symbol RVA ^ checksum, Symbol size ^ checksum...
...
XX Rich, checksum, 0, 0,...
"""
# Rich Header constants
#
DANS = 0x536E6144 # 'DanS' as dword
RICH = 0x68636952 # 'Rich' as dword
rich_index = self.__data__.find(
b'Rich', 0x80, self.OPTIONAL_HEADER.get_file_offset())
if rich_index == -1:
return None
# Read a block of data
try:
# The end of the structure is 8 bytes after the start of the Rich
# string.
rich_data = self.get_data(0x80, rich_index + 8)
# Make the data have length a multiple of 4, otherwise the
# subsequent parsing will fail. It's not impossible that we retrieve
# truncated data that it's not a multiple.
rich_data = rich_data[:4*int(len(rich_data)/4)]
data = list(struct.unpack(
'<{0}I'.format(int(len(rich_data)/4)), rich_data))
if RICH not in data:
return None
except PEFormatError:
return None
# get key, raw_data and clear_data
key = struct.pack('<L', data[data.index(RICH)+1])
result = {"key": key}
raw_data = rich_data[:rich_data.find(b'Rich')]
result["raw_data"] = raw_data
ord_ = lambda c : ord(c) if not isinstance(c, int) else c
clear_data = bytearray()
for i in range(len(raw_data)):
clear_data.append((ord_(raw_data[i]) ^ ord_(key[i % len(key)])))
result["clear_data"] = bytes(clear_data)
# the checksum should be present 3 times after the DanS signature
#
checksum = data[1]
if (data[0] ^ checksum != DANS
or data[2] != checksum
or data[3] != checksum):
return None
result["checksum"] = checksum
headervalues = []
result["values"] = headervalues
data = data[4:]
for i in range(int(len(data) / 2)):
# Stop until the Rich footer signature is found
#
if data[2 * i] == RICH:
# it should be followed by the checksum
#
if data[2 * i + 1] != checksum:
self.__warnings.append('Rich Header is malformed')
break
# header values come by pairs
#
headervalues += [data[2 * i] ^ checksum, data[2 * i + 1] ^ checksum]
return result
def get_warnings(self):
"""Return the list of warnings.
Non-critical problems found when parsing the PE file are
appended to a list of warnings. This method returns the
full list.
"""
return self.__warnings
def show_warnings(self):
"""Print the list of warnings.
Non-critical problems found when parsing the PE file are
appended to a list of warnings. This method prints the
full list to standard output.
"""
for warning in self.__warnings:
print('>', warning)
def full_load(self):
"""Process the data directories.
This method will load the data directories which might not have
been loaded if the "fast_load" option was used.
"""
self.parse_data_directories()
class RichHeader(object):
pass
rich_header = self.parse_rich_header()
if rich_header:
self.RICH_HEADER = RichHeader()
self.RICH_HEADER.checksum = rich_header.get('checksum', None)
self.RICH_HEADER.values = rich_header.get('values', None)
self.RICH_HEADER.key = rich_header.get('key', None)
self.RICH_HEADER.raw_data = rich_header.get('raw_data', None)
self.RICH_HEADER.clear_data = rich_header.get('clear_data', None)
else:
self.RICH_HEADER = None
def write(self, filename=None):
"""Write the PE file.
This function will process all headers and components
of the PE file and include all changes made (by just
assigning to attributes in the PE objects) and write
the changes back to a file whose name is provided as
an argument. The filename is optional, if not
provided the data will be returned as a 'str' object.
"""
file_data = bytearray(self.__data__)
for structure in self.__structures__:
struct_data = bytearray(structure.__pack__())
offset = structure.get_file_offset()
file_data[offset:offset+len(struct_data)] = struct_data
if hasattr(self, 'VS_VERSIONINFO'):
if hasattr(self, 'FileInfo'):
for entry in self.FileInfo:
if hasattr(entry, 'StringTable'):
for st_entry in entry.StringTable:
for key, entry in list(st_entry.entries.items()):
# Offsets and lengths of the keys and values.
# Each value in the dictionary is a tuple:
# (key length, value length)
# The lengths are in characters, not in bytes.
offsets = st_entry.entries_offsets[key]
lengths = st_entry.entries_lengths[key]
if len( entry ) > lengths[1]:
l = entry.decode('utf-8').encode('utf-16le')
file_data[offsets[1]:offsets[1]+lengths[1]*2 ] = l[:lengths[1]*2]
else:
encoded_data = entry.decode('utf-8').encode('utf-16le')
file_data[offsets[1]:offsets[1]+len(encoded_data)] = encoded_data
new_file_data = file_data
if not filename:
return new_file_data
f = open(filename, 'wb+')
f.write(new_file_data)
f.close()
return
def parse_sections(self, offset):
"""Fetch the PE file sections.
The sections will be readily available in the "sections" attribute.
Its attributes will contain all the section information plus "data"
a buffer containing the section's data.
The "Characteristics" member will be processed and attributes
representing the section characteristics (with the 'IMAGE_SCN_'
string trimmed from the constant's names) will be added to the
section instance.
Refer to the SectionStructure class for additional info.
"""
self.sections = []
MAX_SIMULTANEOUS_ERRORS = 3
for i in range(self.FILE_HEADER.NumberOfSections):
simultaneous_errors = 0
section = SectionStructure( self.__IMAGE_SECTION_HEADER_format__, pe=self )
if not section:
break
section_offset = offset + section.sizeof() * i
section.set_file_offset(section_offset)
section_data = self.__data__[section_offset : section_offset + section.sizeof()]
# Check if the section is all nulls and stop if so.
if count_zeroes(section_data) == section.sizeof():
self.__warnings.append(
'Invalid section {0}. Contents are null-bytes.'.format(i))
break
if len(section_data) == 0:
self.__warnings.append(
'Invalid section {0}. No data in the file (is this corkami\'s virtsectblXP?).'.format(i))
break
section.__unpack__(section_data)
self.__structures__.append(section)
if section.SizeOfRawData+section.PointerToRawData > len(self.__data__):
simultaneous_errors += 1
self.__warnings.append(
'Error parsing section {0}. SizeOfRawData is larger than file.'.format(i))
if self.adjust_FileAlignment( section.PointerToRawData,
self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__):
simultaneous_errors += 1
self.__warnings.append(
'Error parsing section {0}. PointerToRawData points beyond the end of the file.'.format(i))
if section.Misc_VirtualSize > 0x10000000:
simultaneous_errors += 1
self.__warnings.append(
'Suspicious value found parsing section {0}. VirtualSize is extremely large > 256MiB.'.format(i))
if self.adjust_SectionAlignment( section.VirtualAddress,
self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) > 0x10000000:
simultaneous_errors += 1
self.__warnings.append(
'Suspicious value found parsing section {0}. VirtualAddress is beyond 0x10000000.'.format(i))
if ( self.OPTIONAL_HEADER.FileAlignment != 0 and
( section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0):
simultaneous_errors += 1
self.__warnings.append(
('Error parsing section {0}. '
'PointerToRawData should normally be '
'a multiple of FileAlignment, this might imply the file '
'is trying to confuse tools which parse this incorrectly.').format(i))
if simultaneous_errors >= MAX_SIMULTANEOUS_ERRORS:
self.__warnings.append('Too many warnings parsing section. Aborting.')
break
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
# Set the section's flags according the the Characteristics member
set_flags(section, section.Characteristics, section_flags)
if ( section.__dict__.get('IMAGE_SCN_MEM_WRITE', False) and
section.__dict__.get('IMAGE_SCN_MEM_EXECUTE', False) ):
if section.Name == 'PAGE' and self.is_driver():
# Drivers can have a PAGE section with those flags set without
# implying that it is malicious
pass
else:
self.__warnings.append(
('Suspicious flags set for section %d. ' % i) +
'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set. '
'This might indicate a packed executable.')
self.sections.append(section)
# Sort the sections by their VirtualAddress and add a field to each of them
# with the VirtualAddress of the next section. This will allow to check
# for potentially overlapping sections in badly constructed PEs.
self.sections.sort(key=lambda a: a.VirtualAddress)
for idx, section in enumerate(self.sections):
if idx == len(self.sections)-1:
section.next_section_virtual_address = None
else:
section.next_section_virtual_address = self.sections[idx+1].VirtualAddress
if self.FILE_HEADER.NumberOfSections > 0 and self.sections:
return offset + self.sections[0].sizeof()*self.FILE_HEADER.NumberOfSections
else:
return offset
def parse_data_directories(self, directories=None,
forwarded_exports_only=False,
import_dllnames_only=False):
"""Parse and process the PE file's data directories.
If the optional argument 'directories' is given, only
the directories at the specified indexes will be parsed.
Such functionality allows parsing of areas of interest
without the burden of having to parse all others.
The directories can then be specified as:
For export / import only:
directories = [ 0, 1 ]
or (more verbosely):
directories = [ DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'],
DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'] ]
If 'directories' is a list, the ones that are processed will be removed,
leaving only the ones that are not present in the image.
If `forwarded_exports_only` is True, the IMAGE_DIRECTORY_ENTRY_EXPORT
attribute will only contain exports that are forwarded to another DLL.
If `import_dllnames_only` is True, symbols will not be parsed from
the import table and the entries in the IMAGE_DIRECTORY_ENTRY_IMPORT
attribute will not have a `symbols` attribute.
"""
directory_parsing = (
('IMAGE_DIRECTORY_ENTRY_IMPORT', self.parse_import_directory),
('IMAGE_DIRECTORY_ENTRY_EXPORT', self.parse_export_directory),
('IMAGE_DIRECTORY_ENTRY_RESOURCE', self.parse_resources_directory),
('IMAGE_DIRECTORY_ENTRY_DEBUG', self.parse_debug_directory),
('IMAGE_DIRECTORY_ENTRY_BASERELOC', self.parse_relocations_directory),
('IMAGE_DIRECTORY_ENTRY_TLS', self.parse_directory_tls),
('IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG', self.parse_directory_load_config),
('IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', self.parse_delay_import_directory),
('IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', self.parse_directory_bound_imports) )
if directories is not None:
if not isinstance(directories, (tuple, list)):
directories = [directories]
for entry in directory_parsing:
# OC Patch:
#
try:
directory_index = DIRECTORY_ENTRY[entry[0]]
dir_entry = self.OPTIONAL_HEADER.DATA_DIRECTORY[directory_index]
except IndexError:
break
# Only process all the directories if no individual ones have
# been chosen
#
if directories is None or directory_index in directories:
if dir_entry.VirtualAddress:
if forwarded_exports_only and entry[0] == 'IMAGE_DIRECTORY_ENTRY_EXPORT':
value = entry[1](dir_entry.VirtualAddress, dir_entry.Size, forwarded_only=True)
elif import_dllnames_only and entry[0] == 'IMAGE_DIRECTORY_ENTRY_IMPORT':
value = entry[1](dir_entry.VirtualAddress, dir_entry.Size, dllnames_only=True)
else:
value = entry[1](dir_entry.VirtualAddress, dir_entry.Size)
if value:
setattr(self, entry[0][6:], value)
if (directories is not None) and isinstance(directories, list) and (entry[0] in directories):
directories.remove(directory_index)
def parse_directory_bound_imports(self, rva, size):
""""""
bnd_descr = Structure(self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__)
bnd_descr_size = bnd_descr.sizeof()
start = rva
bound_imports = []
while True:
bnd_descr = self.__unpack_data__(
self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__,
self.__data__[rva:rva+bnd_descr_size],
file_offset = rva)
if bnd_descr is None:
# If can't parse directory then silently return.
# This directory does not necessarily have to be valid to
# still have a valid PE file
self.__warnings.append(
'The Bound Imports directory exists but can\'t be parsed.')
return
if bnd_descr.all_zeroes():
break
rva += bnd_descr.sizeof()
section = self.get_section_by_offset(rva)
file_offset = self.get_offset_from_rva(rva)
if section is None:
safety_boundary = len(self.__data__) - file_offset
sections_after_offset = [
s.PointerToRawData for s in self.sections
if s.PointerToRawData > file_offset]
if sections_after_offset:
# Find the first section starting at a later offset than that
# specified by 'rva'
first_section_after_offset = min(sections_after_offset)
section = self.get_section_by_offset(first_section_after_offset)
if section is not None:
safety_boundary = section.PointerToRawData - file_offset
else:
safety_boundary = (section.PointerToRawData +
len(section.get_data()) - file_offset)
if not section:
self.__warnings.append(
('RVA of IMAGE_BOUND_IMPORT_DESCRIPTOR points '
'to an invalid address: {0:x}').format(rva))
return
forwarder_refs = []
# 8 is the size of __IMAGE_BOUND_IMPORT_DESCRIPTOR_format__
for idx in range(min(bnd_descr.NumberOfModuleForwarderRefs,
int(safety_boundary / 8))):
# Both structures IMAGE_BOUND_IMPORT_DESCRIPTOR and
# IMAGE_BOUND_FORWARDER_REF have the same size.
bnd_frwd_ref = self.__unpack_data__(
self.__IMAGE_BOUND_FORWARDER_REF_format__,
self.__data__[rva:rva+bnd_descr_size],
file_offset = rva)
# OC Patch:
if not bnd_frwd_ref:
raise PEFormatError(
"IMAGE_BOUND_FORWARDER_REF cannot be read")
rva += bnd_frwd_ref.sizeof()
offset = start+bnd_frwd_ref.OffsetModuleName
name_str = self.get_string_from_data(
0, self.__data__[offset : offset + MAX_STRING_LENGTH])
# OffsetModuleName points to a DLL name. These shouldn't be too long.
# Anything longer than a safety length of 128 will be taken to indicate
# a corrupt entry and abort the processing of these entries.
# Names shorted than 4 characters will be taken as invalid as well.
if name_str:
invalid_chars = [
c for c in bytearray(name_str) if
chr(c) not in string.printable]
if len(name_str) > 256 or invalid_chars:
break
forwarder_refs.append(BoundImportRefData(
struct = bnd_frwd_ref,
name = name_str))
offset = start+bnd_descr.OffsetModuleName
name_str = self.get_string_from_data(
0, self.__data__[offset : offset + MAX_STRING_LENGTH])
if name_str:
invalid_chars = [
c for c in bytearray(name_str) if
chr(c) not in string.printable]
if len(name_str) > 256 or invalid_chars:
break
if not name_str:
break
bound_imports.append(
BoundImportDescData(
struct = bnd_descr,
name = name_str,
entries = forwarder_refs))
return bound_imports
def parse_directory_tls(self, rva, size):
""""""
# By default let's pretend the format is a 32-bit PE. It may help
# produce some output for files where the Magic in the Optional Header
# is incorrect.
format = self.__IMAGE_TLS_DIRECTORY_format__
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
format = self.__IMAGE_TLS_DIRECTORY64_format__
try:
tls_struct = self.__unpack_data__(
format,
self.get_data( rva, Structure(format).sizeof() ),
file_offset = self.get_offset_from_rva(rva))
except PEFormatError:
self.__warnings.append(
'Invalid TLS information. Can\'t read '
'data at RVA: 0x%x' % rva)
tls_struct = None
if not tls_struct:
return None
return TlsData( struct = tls_struct )
def parse_directory_load_config(self, rva, size):
""""""
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
format = self.__IMAGE_LOAD_CONFIG_DIRECTORY_format__
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
format = self.__IMAGE_LOAD_CONFIG_DIRECTORY64_format__
else:
self.__warnings.append(
'Don\'t know how to parse LOAD_CONFIG information for non-PE32/'
'PE32+ file')
return None
load_config_struct = None
try:
load_config_struct = self.__unpack_data__(
format,
self.get_data( rva, Structure(format).sizeof() ),
file_offset = self.get_offset_from_rva(rva))
except PEFormatError:
self.__warnings.append(
'Invalid LOAD_CONFIG information. Can\'t read '
'data at RVA: 0x%x' % rva)
if not load_config_struct:
return None
return LoadConfigData( struct = load_config_struct )
def parse_relocations_directory(self, rva, size):
""""""
rlc_size = Structure(self.__IMAGE_BASE_RELOCATION_format__).sizeof()
end = rva+size
relocations = []
while rva < end:
# OC Patch:
# Malware that has bad RVA entries will cause an error.
# Just continue on after an exception
#
try:
rlc = self.__unpack_data__(
self.__IMAGE_BASE_RELOCATION_format__,
self.get_data(rva, rlc_size),
file_offset = self.get_offset_from_rva(rva) )
except PEFormatError:
self.__warnings.append(
'Invalid relocation information. Can\'t read '
'data at RVA: 0x%x' % rva)
rlc = None
if not rlc:
break
# rlc.VirtualAddress must lie within the Image
if rlc.VirtualAddress > self.OPTIONAL_HEADER.SizeOfImage:
self.__warnings.append(
'Invalid relocation information. VirtualAddress outside'
' of Image: 0x%x' % rlc.VirtualAddress)
break
# rlc.SizeOfBlock must be less or equal than the size of the image
# (It's a rather loose sanity test)
if rlc.SizeOfBlock > self.OPTIONAL_HEADER.SizeOfImage:
self.__warnings.append(
'Invalid relocation information. SizeOfBlock too large'
': %d' % rlc.SizeOfBlock)
break
reloc_entries = self.parse_relocations(
rva+rlc_size, rlc.VirtualAddress, rlc.SizeOfBlock-rlc_size )
relocations.append(
BaseRelocationData(
struct = rlc,
entries = reloc_entries))
if not rlc.SizeOfBlock:
break
rva += rlc.SizeOfBlock
return relocations
def parse_relocations(self, data_rva, rva, size):
""""""
try:
data = self.get_data(data_rva, size)
file_offset = self.get_offset_from_rva(data_rva)
except PEFormatError as excp:
self.__warnings.append(
'Bad RVA in relocation data: 0x%x' % (data_rva))
return []
entries = []
offsets_and_type = []
for idx in range( int(len(data) / 2) ):
entry = self.__unpack_data__(
self.__IMAGE_BASE_RELOCATION_ENTRY_format__,
data[idx*2:(idx+1)*2],
file_offset = file_offset )
if not entry:
break
word = entry.Data
reloc_type = (word>>12)
reloc_offset = (word & 0x0fff)
if (reloc_offset, reloc_type) in offsets_and_type:
self.__warnings.append(
'Overlapping offsets in relocation data '
'data at RVA: 0x%x' % (reloc_offset+rva))
break
if len(offsets_and_type) >= 1000:
offsets_and_type.pop()
offsets_and_type.insert(0, (reloc_offset, reloc_type))
entries.append(
RelocationData(
struct = entry,
type = reloc_type,
base_rva = rva,
rva = reloc_offset+rva))
file_offset += entry.sizeof()
return entries
def parse_debug_directory(self, rva, size):
""""""
dbg_size = Structure(self.__IMAGE_DEBUG_DIRECTORY_format__).sizeof()
debug = []
for idx in range(int(size / dbg_size)):
try:
data = self.get_data(rva+dbg_size*idx, dbg_size)
except PEFormatError as e:
self.__warnings.append(
'Invalid debug information. Can\'t read '
'data at RVA: 0x%x' % rva)
return None
dbg = self.__unpack_data__(
self.__IMAGE_DEBUG_DIRECTORY_format__,
data, file_offset = self.get_offset_from_rva(rva+dbg_size*idx))
if not dbg:
return None
# apply structure according to DEBUG_TYPE
# http://www.debuginfo.com/articles/debuginfomatch.html
#
dbg_type = None
if dbg.Type == 1:
# IMAGE_DEBUG_TYPE_COFF
pass
elif dbg.Type == 2:
# if IMAGE_DEBUG_TYPE_CODEVIEW
dbg_type_offset = dbg.PointerToRawData
dbg_type_size = dbg.SizeOfData
dbg_type_data = self.__data__[dbg_type_offset:dbg_type_offset+dbg_type_size]
if dbg_type_data[:4] == b'RSDS':
# pdb7.0
__CV_INFO_PDB70_format__ = ['CV_INFO_PDB70',
['I,CvSignature',
'I,Signature_Data1', # Signature is of GUID type
'H,Signature_Data2',
'H,Signature_Data3',
'H,Signature_Data4',
'H,Signature_Data5',
'I,Signature_Data6',
'I,Age']]
pdbFileName_size = (
dbg_type_size -
Structure(__CV_INFO_PDB70_format__).sizeof())
# pdbFileName_size can be negative here, as seen in the malware sample with hash
# MD5: 7c297600870d026c014d42596bb9b5fd
# SHA256: 83f4e63681fcba8a9d7bbb1688c71981b1837446514a1773597e0192bba9fac3
# Checking for positive size here to ensure proper parsing.
if pdbFileName_size > 0:
__CV_INFO_PDB70_format__[1].append(
'{0}s,PdbFileName'.format(pdbFileName_size))
dbg_type = self.__unpack_data__(
__CV_INFO_PDB70_format__,
dbg_type_data,
dbg_type_offset)
elif dbg_type_data[:4] == b'NB10':
# pdb2.0
__CV_INFO_PDB20_format__ = ['CV_INFO_PDB20',
['I,CvHeaderSignature',
'I,CvHeaderOffset',
'I,Signature',
'I,Age']]
pdbFileName_size = (
dbg_type_size -
Structure(__CV_INFO_PDB20_format__).sizeof())
# As with the PDB 7.0 case, ensuring a positive size for pdbFileName_size
# to ensure proper parsing.
if pdbFileName_size > 0:
# Add the last variable-length string field.
__CV_INFO_PDB20_format__[1].append(
'{0}s,PdbFileName'.format(pdbFileName_size))
dbg_type = self.__unpack_data__(
__CV_INFO_PDB20_format__,
dbg_type_data,
dbg_type_offset)
elif dbg.Type == 4:
# IMAGE_DEBUG_TYPE_MISC
dbg_type_offset = dbg.PointerToRawData
dbg_type_size = dbg.SizeOfData
dbg_type_data = self.__data__[dbg_type_offset:dbg_type_offset+dbg_type_size]
___IMAGE_DEBUG_MISC_format__ = ['IMAGE_DEBUG_MISC',
['I,DataType',
'I,Length',
'B,Unicode',
'B,Reserved1',
'H,Reserved2']]
dbg_type_partial = self.__unpack_data__(
___IMAGE_DEBUG_MISC_format__,
dbg_type_data,
dbg_type_offset)
# Need to check that dbg_type_partial contains a correctly unpacked data
# structure, as the malware sample with the following hash
# MD5: 5e7d6707d693108de5a303045c17d95b
# SHA256: 5dd94a95025f3b6e3dd440d52f7c6d2964fdd1aa119e0ee92e38c7bf83829e5c
# contains a value of None for dbg_type_partial after unpacking, presumably
# due to a malformed DEBUG entry.
if dbg_type_partial:
# The Unicode bool should be set to 0 or 1.
if dbg_type_partial.Unicode in (0, 1):
data_size = (
dbg_type_size -
Structure(___IMAGE_DEBUG_MISC_format__).sizeof())
# As with the PDB case, ensuring a positive size for data_size here
# to ensure proper parsing.
if data_size > 0:
___IMAGE_DEBUG_MISC_format__[1].append(
'{0}s,Data'.format(data_size))
dbg_type = self.__unpack_data__(
___IMAGE_DEBUG_MISC_format__,
dbg_type_data,
dbg_type_offset)
debug.append(
DebugData(
struct = dbg,
entry = dbg_type))
return debug
def parse_resources_directory(self, rva, size=0, base_rva = None, level = 0, dirs=None):
"""Parse the resources directory.
Given the RVA of the resources directory, it will process all
its entries.
The root will have the corresponding member of its structure,
IMAGE_RESOURCE_DIRECTORY plus 'entries', a list of all the
entries in the directory.
Those entries will have, correspondingly, all the structure's
members (IMAGE_RESOURCE_DIRECTORY_ENTRY) and an additional one,
"directory", pointing to the IMAGE_RESOURCE_DIRECTORY structure
representing upper layers of the tree. This one will also have
an 'entries' attribute, pointing to the 3rd, and last, level.
Another directory with more entries. Those last entries will
have a new attribute (both 'leaf' or 'data_entry' can be used to
access it). This structure finally points to the resource data.
All the members of this structure, IMAGE_RESOURCE_DATA_ENTRY,
are available as its attributes.
"""
# OC Patch:
if dirs is None:
dirs = [rva]
if base_rva is None:
base_rva = rva
resources_section = self.get_section_by_rva(rva)
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva, Structure(self.__IMAGE_RESOURCE_DIRECTORY_format__).sizeof() )
except PEFormatError as e:
self.__warnings.append(
'Invalid resources directory. Can\'t read '
'directory data at RVA: 0x%x' % rva)
return None
# Get the resource directory structure, that is, the header
# of the table preceding the actual entries
#
resource_dir = self.__unpack_data__(
self.__IMAGE_RESOURCE_DIRECTORY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
if resource_dir is None:
# If can't parse resources directory then silently return.
# This directory does not necessarily have to be valid to
# still have a valid PE file
self.__warnings.append(
'Invalid resources directory. Can\'t parse '
'directory data at RVA: 0x%x' % rva)
return None
dir_entries = []
# Advance the RVA to the position immediately following the directory
# table header and pointing to the first entry in the table
#
rva += resource_dir.sizeof()
number_of_entries = (
resource_dir.NumberOfNamedEntries +
resource_dir.NumberOfIdEntries )
# Set a hard limit on the maximum reasonable number of entries
MAX_ALLOWED_ENTRIES = 4096
if number_of_entries > MAX_ALLOWED_ENTRIES:
self.__warnings.append(
'Error parsing the resources directory. '
'The directory contains %d entries (>%s)' %
(number_of_entries, MAX_ALLOWED_ENTRIES) )
return None
strings_to_postprocess = list()
# Keep track of the last name's start and end offsets in order
# to be able to detect overlapping entries that might suggest
# and invalid or corrupt directory.
last_name_begin_end = None
for idx in range(number_of_entries):
res = self.parse_resource_entry(rva)
if res is None:
self.__warnings.append(
'Error parsing the resources directory, '
'Entry %d is invalid, RVA = 0x%x. ' %
(idx, rva) )
break
entry_name = None
entry_id = None
name_is_string = (res.Name & 0x80000000) >> 31
if not name_is_string:
entry_id = res.Name
else:
ustr_offset = base_rva+res.NameOffset
try:
entry_name = UnicodeStringWrapperPostProcessor(self, ustr_offset)
# If the last entry's offset points before the current's but its end
# is past the current's beginning, assume the overlap indicates a
# corrupt name.
if last_name_begin_end and (last_name_begin_end[0] < ustr_offset and
last_name_begin_end[1] >= ustr_offset):
# Remove the previous overlapping entry as it's likely to be already corrupt data.
strings_to_postprocess.pop()
self.__warnings.append(
'Error parsing the resources directory, '
'attempting to read entry name. '
'Entry names overlap 0x%x' %
(ustr_offset) )
break
last_name_begin_end = (ustr_offset, ustr_offset+entry_name.get_pascal_16_length())
strings_to_postprocess.append(entry_name)
except PEFormatError as excp:
self.__warnings.append(
'Error parsing the resources directory, '
'attempting to read entry name. '
'Can\'t read unicode string at offset 0x%x' %
(ustr_offset) )
if res.DataIsDirectory:
# OC Patch:
#
# One trick malware can do is to recursively reference
# the next directory. This causes hilarity to ensue when
# trying to parse everything correctly.
# If the original RVA given to this function is equal to
# the next one to parse, we assume that it's a trick.
# Instead of raising a PEFormatError this would skip some
# reasonable data so we just break.
#
# 9ee4d0a0caf095314fd7041a3e4404dc is the offending sample
if (base_rva + res.OffsetToDirectory) in dirs:
break
else:
entry_directory = self.parse_resources_directory(
base_rva+res.OffsetToDirectory,
size-(rva-base_rva), # size
base_rva=base_rva, level = level+1,
dirs=dirs + [base_rva + res.OffsetToDirectory])
if not entry_directory:
break
# Ange Albertini's code to process resources' strings
#
strings = None
if entry_id == RESOURCE_TYPE['RT_STRING']:
strings = dict()
for resource_id in entry_directory.entries:
if hasattr(resource_id, 'directory'):
resource_strings = dict()
for resource_lang in resource_id.directory.entries:
if (resource_lang is None or not hasattr(resource_lang, 'data') or
resource_lang.data.struct.Size is None or resource_id.id is None):
continue
string_entry_rva = resource_lang.data.struct.OffsetToData
string_entry_size = resource_lang.data.struct.Size
string_entry_id = resource_id.id
# XXX: has been raising exceptions preventing parsing
try:
string_entry_data = self.get_data(string_entry_rva, string_entry_size)
except:
self.__warnings.append(
'Error parsing resource of type RT_STRING at RVA 0x%x with size %d' %
(string_entry_rva, string_entry_size))
continue
parse_strings(string_entry_data, (int(string_entry_id) - 1) * 16, resource_strings)
strings.update(resource_strings)
resource_id.directory.strings = resource_strings
dir_entries.append(
ResourceDirEntryData(
struct = res,
name = entry_name,
id = entry_id,
directory = entry_directory))
else:
struct = self.parse_resource_data_entry(
base_rva + res.OffsetToDirectory)
if struct:
entry_data = ResourceDataEntryData(
struct = struct,
lang = res.Name & 0x3ff,
sublang = res.Name >> 10 )
dir_entries.append(
ResourceDirEntryData(
struct = res,
name = entry_name,
id = entry_id,
data = entry_data))
else:
break
# Check if this entry contains version information
#
if level == 0 and res.Id == RESOURCE_TYPE['RT_VERSION']:
if len(dir_entries)>0:
last_entry = dir_entries[-1]
rt_version_struct = None
try:
rt_version_struct = last_entry.directory.entries[0].directory.entries[0].data.struct
except:
# Maybe a malformed directory structure...?
# Lets ignore it
pass
if rt_version_struct is not None:
self.parse_version_information(rt_version_struct)
rva += res.sizeof()
string_rvas = [s.get_rva() for s in strings_to_postprocess]
string_rvas.sort()
for idx, s in enumerate(strings_to_postprocess):
s.render_pascal_16()
resource_directory_data = ResourceDirData(
struct = resource_dir,
entries = dir_entries)
return resource_directory_data
def parse_resource_data_entry(self, rva):
"""Parse a data entry from the resources directory."""
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva, Structure(self.__IMAGE_RESOURCE_DATA_ENTRY_format__).sizeof() )
except PEFormatError as excp:
self.__warnings.append(
'Error parsing a resource directory data entry, '
'the RVA is invalid: 0x%x' % ( rva ) )
return None
data_entry = self.__unpack_data__(
self.__IMAGE_RESOURCE_DATA_ENTRY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
return data_entry
def parse_resource_entry(self, rva):
"""Parse a directory entry from the resources directory."""
try:
data = self.get_data( rva, Structure(self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__).sizeof() )
except PEFormatError as excp:
# A warning will be added by the caller if this method returns None
return None
resource = self.__unpack_data__(
self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
if resource is None:
return None
#resource.NameIsString = (resource.Name & 0x80000000L) >> 31
resource.NameOffset = resource.Name & 0x7FFFFFFF
resource.__pad = resource.Name & 0xFFFF0000
resource.Id = resource.Name & 0x0000FFFF
resource.DataIsDirectory = (resource.OffsetToData & 0x80000000) >> 31
resource.OffsetToDirectory = resource.OffsetToData & 0x7FFFFFFF
return resource
def parse_version_information(self, version_struct):
"""Parse version information structure.
The date will be made available in three attributes of the PE object.
VS_VERSIONINFO will contain the first three fields of the main structure:
'Length', 'ValueLength', and 'Type'
VS_FIXEDFILEINFO will hold the rest of the fields, accessible as sub-attributes:
'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS',
'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags',
'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS'
FileInfo is a list of all StringFileInfo and VarFileInfo structures.
StringFileInfo structures will have a list as an attribute named 'StringTable'
containing all the StringTable structures. Each of those structures contains a
dictionary 'entries' with all the key / value version information string pairs.
VarFileInfo structures will have a list as an attribute named 'Var' containing
all Var structures. Each Var structure will have a dictionary as an attribute
named 'entry' which will contain the name and value of the Var.
"""
# Retrieve the data for the version info resource
#
start_offset = self.get_offset_from_rva( version_struct.OffsetToData )
raw_data = self.__data__[ start_offset : start_offset+version_struct.Size ]
# Map the main structure and the subsequent string
#
versioninfo_struct = self.__unpack_data__(
self.__VS_VERSIONINFO_format__, raw_data,
file_offset = start_offset )
if versioninfo_struct is None:
return
ustr_offset = version_struct.OffsetToData + versioninfo_struct.sizeof()
section = self.get_section_by_rva(ustr_offset)
section_end = None
if section:
section_end = section.VirtualAddress + max(
section.SizeOfRawData, section.Misc_VirtualSize)
versioninfo_string = None
# These should return 'ascii' decoded data. For the case when it's
# garbled data the ascii string will retain the byte values while
# encoding it to something else may yield values that don't match the
# file's contents.
try:
if section_end is None:
versioninfo_string = self.get_string_u_at_rva(
ustr_offset, encoding='ascii')
else:
versioninfo_string = self.get_string_u_at_rva(
ustr_offset, (section_end - ustr_offset) >> 1,
encoding='ascii')
except PEFormatError as excp:
self.__warnings.append(
'Error parsing the version information, '
'attempting to read VS_VERSION_INFO string. Can\'t '
'read unicode string at offset 0x%x' % (
ustr_offset))
if versioninfo_string == None:
self.__warnings.append('Invalid VS_VERSION_INFO block: {0}'.format(
versioninfo_string))
return
# If the structure does not contain the expected name, it's assumed to
# be invalid
if (versioninfo_string is not None and
versioninfo_string != b'VS_VERSION_INFO'):
if len(versioninfo_string) > 128:
excerpt = versioninfo_string[:128].decode('ascii')
# Don't leave any half-escaped characters
excerpt = excerpt[:excerpt.rfind('\\u')]
versioninfo_string = \
b('{0} ... ({1} bytes, too long to display)'.format(
excerpt,
len(versioninfo_string)))
self.__warnings.append('Invalid VS_VERSION_INFO block: {0}'.format(
versioninfo_string.decode('ascii').replace('\00', '\\00')))
return
# Set the PE object's VS_VERSIONINFO to this one
self.VS_VERSIONINFO = versioninfo_struct
# The the Key attribute to point to the unicode string identifying the structure
self.VS_VERSIONINFO.Key = versioninfo_string
if versioninfo_string is None:
versioninfo_string = ''
# Process the fixed version information, get the offset and structure
fixedfileinfo_offset = self.dword_align(
versioninfo_struct.sizeof() + 2 * (len(versioninfo_string) + 1),
version_struct.OffsetToData)
fixedfileinfo_struct = self.__unpack_data__(
self.__VS_FIXEDFILEINFO_format__,
raw_data[fixedfileinfo_offset:],
file_offset = start_offset+fixedfileinfo_offset )
if not fixedfileinfo_struct:
return
# Set the PE object's VS_FIXEDFILEINFO to this one
self.VS_FIXEDFILEINFO = fixedfileinfo_struct
# Start parsing all the StringFileInfo and VarFileInfo structures
# Get the first one
stringfileinfo_offset = self.dword_align(
fixedfileinfo_offset + fixedfileinfo_struct.sizeof(),
version_struct.OffsetToData)
original_stringfileinfo_offset = stringfileinfo_offset
# Set the PE object's attribute that will contain them all.
self.FileInfo = list()
while True:
# Process the StringFileInfo/VarFileInfo structure
stringfileinfo_struct = self.__unpack_data__(
self.__StringFileInfo_format__,
raw_data[stringfileinfo_offset:],
file_offset = start_offset+stringfileinfo_offset )
if stringfileinfo_struct is None:
self.__warnings.append(
'Error parsing StringFileInfo/VarFileInfo struct' )
return None
# Get the subsequent string defining the structure.
ustr_offset = ( version_struct.OffsetToData +
stringfileinfo_offset + versioninfo_struct.sizeof() )
try:
stringfileinfo_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError as excp:
self.__warnings.append(
'Error parsing the version information, '
'attempting to read StringFileInfo string. Can\'t '
'read unicode string at offset 0x{0:x}'.format(ustr_offset))
break
# Set such string as the Key attribute
stringfileinfo_struct.Key = stringfileinfo_string
# Append the structure to the PE object's list
self.FileInfo.append(stringfileinfo_struct)
# Parse a StringFileInfo entry
if stringfileinfo_string and stringfileinfo_string.startswith(b'StringFileInfo'):
if stringfileinfo_struct.Type in (0,1) and stringfileinfo_struct.ValueLength == 0:
stringtable_offset = self.dword_align(
stringfileinfo_offset + stringfileinfo_struct.sizeof() +
2*(len(stringfileinfo_string)+1),
version_struct.OffsetToData)
stringfileinfo_struct.StringTable = list()
# Process the String Table entries
while True:
stringtable_struct = self.__unpack_data__(
self.__StringTable_format__,
raw_data[stringtable_offset:],
file_offset = start_offset+stringtable_offset )
if not stringtable_struct:
break
ustr_offset = ( version_struct.OffsetToData + stringtable_offset +
stringtable_struct.sizeof() )
try:
stringtable_string = self.get_string_u_at_rva(ustr_offset)
except PEFormatError as excp:
self.__warnings.append(
'Error parsing the version information, '
'attempting to read StringTable string. Can\'t '
'read unicode string at offset 0x{0:x}'.format(ustr_offset) )
break
stringtable_struct.LangID = stringtable_string
stringtable_struct.entries = dict()
stringtable_struct.entries_offsets = dict()
stringtable_struct.entries_lengths = dict()
stringfileinfo_struct.StringTable.append(stringtable_struct)
entry_offset = self.dword_align(
stringtable_offset + stringtable_struct.sizeof() +
2*(len(stringtable_string)+1),
version_struct.OffsetToData)
# Process all entries in the string table
while entry_offset < stringtable_offset + stringtable_struct.Length:
string_struct = self.__unpack_data__(
self.__String_format__, raw_data[entry_offset:],
file_offset = start_offset+entry_offset )
if not string_struct:
break
ustr_offset = ( version_struct.OffsetToData + entry_offset +
string_struct.sizeof() )
try:
key = self.get_string_u_at_rva( ustr_offset )
key_offset = self.get_offset_from_rva( ustr_offset )
except PEFormatError as excp:
self.__warnings.append(
'Error parsing the version information, '
'attempting to read StringTable Key string. Can\'t '
'read unicode string at offset 0x{0:x}'.format(ustr_offset))
break
value_offset = self.dword_align(
2*(len(key)+1) + entry_offset + string_struct.sizeof(),
version_struct.OffsetToData)
ustr_offset = version_struct.OffsetToData + value_offset
try:
value = self.get_string_u_at_rva( ustr_offset,
max_length = string_struct.ValueLength )
value_offset = self.get_offset_from_rva( ustr_offset )
except PEFormatError as excp:
self.__warnings.append(
'Error parsing the version information, '
'attempting to read StringTable Value string. '
'Can\'t read unicode string at offset 0x{0:x}'.format(
ustr_offset))
break
if string_struct.Length == 0:
entry_offset = stringtable_offset + stringtable_struct.Length
else:
entry_offset = self.dword_align(
string_struct.Length+entry_offset, version_struct.OffsetToData)
stringtable_struct.entries[key] = value
stringtable_struct.entries_offsets[key] = (key_offset, value_offset)
stringtable_struct.entries_lengths[key] = (len(key), len(value))
new_stringtable_offset = self.dword_align(
stringtable_struct.Length + stringtable_offset,
version_struct.OffsetToData)
# Check if the entry is crafted in a way that would lead
# to an infinite loop and break if so.
if new_stringtable_offset == stringtable_offset:
break
stringtable_offset = new_stringtable_offset
if stringtable_offset >= stringfileinfo_struct.Length:
break
# Parse a VarFileInfo entry
elif stringfileinfo_string and stringfileinfo_string.startswith( b'VarFileInfo' ):
varfileinfo_struct = stringfileinfo_struct
varfileinfo_struct.name = 'VarFileInfo'
if varfileinfo_struct.Type in (0, 1) and varfileinfo_struct.ValueLength == 0:
var_offset = self.dword_align(
stringfileinfo_offset + varfileinfo_struct.sizeof() +
2*(len(stringfileinfo_string)+1),
version_struct.OffsetToData)
varfileinfo_struct.Var = list()
# Process all entries
while True:
var_struct = self.__unpack_data__(
self.__Var_format__,
raw_data[var_offset:],
file_offset = start_offset+var_offset )
if not var_struct:
break
ustr_offset = ( version_struct.OffsetToData + var_offset +
var_struct.sizeof() )
try:
var_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError as excp:
self.__warnings.append(
'Error parsing the version information, '
'attempting to read VarFileInfo Var string. '
'Can\'t read unicode string at offset 0x{0:x}'.format(ustr_offset))
break
if var_string is None:
break
varfileinfo_struct.Var.append(var_struct)
varword_offset = self.dword_align(
2*(len(var_string)+1) + var_offset + var_struct.sizeof(),
version_struct.OffsetToData)
orig_varword_offset = varword_offset
while varword_offset < orig_varword_offset + var_struct.ValueLength:
word1 = self.get_word_from_data(
raw_data[varword_offset:varword_offset+2], 0)
word2 = self.get_word_from_data(
raw_data[varword_offset+2:varword_offset+4], 0)
varword_offset += 4
if isinstance(word1, int) and isinstance(word2, int):
var_struct.entry = {var_string: '0x%04x 0x%04x' % (word1, word2)}
var_offset = self.dword_align(
var_offset+var_struct.Length, version_struct.OffsetToData)
if var_offset <= var_offset+var_struct.Length:
break
# Increment and align the offset
stringfileinfo_offset = self.dword_align(
stringfileinfo_struct.Length+stringfileinfo_offset,
version_struct.OffsetToData)
# Check if all the StringFileInfo and VarFileInfo items have been processed
if stringfileinfo_struct.Length == 0 or stringfileinfo_offset >= versioninfo_struct.Length:
break
def parse_export_directory(self, rva, size, forwarded_only=False):
"""Parse the export directory.
Given the RVA of the export directory, it will process all
its entries.
The exports will be made available as a list of ExportData
instances in the 'IMAGE_DIRECTORY_ENTRY_EXPORT' PE attribute.
"""
try:
export_dir = self.__unpack_data__(
self.__IMAGE_EXPORT_DIRECTORY_format__,
self.get_data( rva, Structure(self.__IMAGE_EXPORT_DIRECTORY_format__).sizeof() ),
file_offset = self.get_offset_from_rva(rva) )
except PEFormatError:
self.__warnings.append(
'Error parsing export directory at RVA: 0x%x' % ( rva ) )
return
if not export_dir:
return
# We keep track of the bytes left in the file and use it to set a upper
# bound in the number of items that can be read from the different
# arrays.
def length_until_eof(rva):
return len(self.__data__) - self.get_offset_from_rva(rva)
try:
address_of_names = self.get_data(
export_dir.AddressOfNames,
min(length_until_eof(export_dir.AddressOfNames),
export_dir.NumberOfNames*4))
address_of_name_ordinals = self.get_data(
export_dir.AddressOfNameOrdinals,
min(length_until_eof(export_dir.AddressOfNameOrdinals),
export_dir.NumberOfNames*4))
address_of_functions = self.get_data(
export_dir.AddressOfFunctions,
min(length_until_eof(export_dir.AddressOfFunctions),
export_dir.NumberOfFunctions*4))
except PEFormatError:
self.__warnings.append(
'Error parsing export directory at RVA: 0x%x' % ( rva ) )
return
exports = []
max_failed_entries_before_giving_up = 10
section = self.get_section_by_rva(export_dir.AddressOfNames)
# Overly generous upper bound
safety_boundary = len(self.__data__)
if section:
safety_boundary = (
section.VirtualAddress + len(section.get_data()) -
export_dir.AddressOfNames)
symbol_counter = Counter()
export_parsing_loop_completed_normally = True
for i in range(min(export_dir.NumberOfNames, int(safety_boundary / 4))):
symbol_ordinal = self.get_word_from_data(
address_of_name_ordinals, i)
if (symbol_ordinal is not None and
symbol_ordinal*4 < len(address_of_functions)):
symbol_address = self.get_dword_from_data(
address_of_functions, symbol_ordinal)
else:
# Corrupt? a bad pointer... we assume it's all
# useless, no exports
return None
if symbol_address is None or symbol_address == 0:
continue
# If the function's RVA points within the export directory
# it will point to a string with the forwarded symbol's string
# instead of pointing the the function start address.
if symbol_address >= rva and symbol_address < rva+size:
forwarder_str = self.get_string_at_rva(symbol_address)
try:
forwarder_offset = self.get_offset_from_rva( symbol_address )
except PEFormatError:
continue
else:
if forwarded_only:
continue
forwarder_str = None
forwarder_offset = None
symbol_name_address = self.get_dword_from_data(address_of_names, i)
if symbol_name_address is None:
max_failed_entries_before_giving_up -= 1
if max_failed_entries_before_giving_up <= 0:
export_parsing_loop_completed_normally = False
break
symbol_name = self.get_string_at_rva(symbol_name_address, MAX_SYMBOL_NAME_LENGTH)
if not is_valid_function_name(symbol_name):
export_parsing_loop_completed_normally = False
break
try:
symbol_name_offset = self.get_offset_from_rva(symbol_name_address)
except PEFormatError:
max_failed_entries_before_giving_up -= 1
if max_failed_entries_before_giving_up <= 0:
export_parsing_loop_completed_normally = False
break
try:
symbol_name_offset = self.get_offset_from_rva( symbol_name_address )
except PEFormatError:
max_failed_entries_before_giving_up -= 1
if max_failed_entries_before_giving_up <= 0:
export_parsing_loop_completed_normally = False
break
continue
# File 0b1d3d3664915577ab9a32188d29bbf3542b86c7b9ce333e245496c3018819f1
# was being parsed as potentially containing millions of exports.
# Checking for duplicates addresses the issue.
most_common = symbol_counter.most_common(1)
if most_common and most_common[0][1] > 10:
self.__warnings.append(
'Export directory contains more than 10 repeated entries. Assuming corrupt.')
break
symbol_counter[(symbol_name, symbol_address)] += 1
exports.append(
ExportData(
pe = self,
ordinal = export_dir.Base+symbol_ordinal,
ordinal_offset = self.get_offset_from_rva( export_dir.AddressOfNameOrdinals + 2*i ),
address = symbol_address,
address_offset = self.get_offset_from_rva( export_dir.AddressOfFunctions + 4*symbol_ordinal ),
name = symbol_name,
name_offset = symbol_name_offset,
forwarder = forwarder_str,
forwarder_offset = forwarder_offset ))
if not export_parsing_loop_completed_normally:
self.__warnings.append(
'RVA AddressOfNames in the export directory points to an invalid address: %x' %
export_dir.AddressOfNames)
ordinals = [exp.ordinal for exp in exports]
max_failed_entries_before_giving_up = 10
section = self.get_section_by_rva(export_dir.AddressOfFunctions)
# Overly generous upper bound
safety_boundary = len(self.__data__)
if section:
safety_boundary = (
section.VirtualAddress + len(section.get_data()) -
export_dir.AddressOfFunctions)
symbol_counter = Counter()
export_parsing_loop_completed_normally = True
for idx in range(min(
export_dir.NumberOfFunctions,
int(safety_boundary / 4))):
if not idx+export_dir.Base in ordinals:
try:
symbol_address = self.get_dword_from_data(
address_of_functions, idx)
except PEFormatError:
symbol_address = None
if symbol_address is None:
max_failed_entries_before_giving_up -= 1
if max_failed_entries_before_giving_up <= 0:
export_parsing_loop_completed_normally = False
break
if symbol_address == 0:
continue
# Checking for forwarder again.
if symbol_address is not None and symbol_address >= rva and symbol_address < rva+size:
forwarder_str = self.get_string_at_rva(symbol_address)
else:
forwarder_str = None
# File 0b1d3d3664915577ab9a32188d29bbf3542b86c7b9ce333e245496c3018819f1
# was being parsed as potentially containing millions of exports.
# Checking for duplicates addresses the issue.
most_common = symbol_counter.most_common(1)
if most_common and most_common[0][1] > 10:
self.__warnings.append(
'Export directory contains more than 10 repeated ordinal entries. Assuming corrupt.')
break
symbol_counter[symbol_address] += 1
exports.append(
ExportData(
ordinal = export_dir.Base+idx,
address = symbol_address,
name = None,
forwarder = forwarder_str))
if not export_parsing_loop_completed_normally:
self.__warnings.append(
'RVA AddressOfFunctions in the export directory points to an invalid address: %x' %
export_dir.AddressOfFunctions)
return
if not exports and export_dir.all_zeroes():
return None
return ExportDirData(struct=export_dir, symbols=exports,
name=self.get_string_at_rva(export_dir.Name))
def dword_align(self, offset, base):
return ((offset+base+3) & 0xfffffffc) - (base & 0xfffffffc)
def parse_delay_import_directory(self, rva, size):
"""Walk and parse the delay import directory."""
import_descs = []
error_count = 0
while True:
try:
# If the RVA is invalid all would blow up. Some PEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data( rva, Structure(self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__).sizeof() )
except PEFormatError as e:
self.__warnings.append(
'Error parsing the Delay import directory at RVA: 0x%x' % ( rva ) )
break
file_offset = self.get_offset_from_rva(rva)
import_desc = self.__unpack_data__(
self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__,
data, file_offset = file_offset )
# If the structure is all zeros, we reached the end of the list
if not import_desc or import_desc.all_zeroes():
break
rva += import_desc.sizeof()
# If the array of thunk's is somewhere earlier than the import
# descriptor we can set a maximum length for the array. Otherwise
# just set a maximum length of the size of the file
max_len = len(self.__data__) - file_offset
if rva > import_desc.pINT or rva > import_desc.pIAT:
max_len = max(rva-import_desc.pINT, rva-import_desc.pIAT)
import_data = []
try:
import_data = self.parse_imports(
import_desc.pINT,
import_desc.pIAT,
None,
max_length = max_len)
except PEFormatError as e:
self.__warnings.append(
'Error parsing the Delay import directory. '
'Invalid import data at RVA: 0x{0:x} ({1})'.format(
rva, e.value))
if error_count > 5:
self.__warnings.append(
'Too may errors parsing the Delay import directory. '
'Invalid import data at RVA: 0x{0:x}'.format(rva) )
break
if not import_data:
error_count += 1
continue
dll = self.get_string_at_rva(import_desc.szName, MAX_DLL_LENGTH)
if not is_valid_dos_filename(dll):
dll = b('*invalid*')
if dll:
for symbol in import_data:
if symbol.name is None:
funcname = ordlookup.ordLookup(dll.lower(), symbol.ordinal)
if funcname:
symbol.name = funcname
import_descs.append(
ImportDescData(
struct = import_desc,
imports = import_data,
dll = dll))
return import_descs
def get_imphash(self):
impstrs = []
exts = ['ocx', 'sys', 'dll']
if not hasattr(self, "DIRECTORY_ENTRY_IMPORT"):
return ""
for entry in self.DIRECTORY_ENTRY_IMPORT:
if isinstance(entry.dll, bytes):
libname = entry.dll.decode().lower()
else:
libname = entry.dll.lower()
parts = libname.rsplit('.', 1)
if len(parts) > 1 and parts[1] in exts:
libname = parts[0]
for imp in entry.imports:
funcname = None
if not imp.name:
funcname = ordlookup.ordLookup(entry.dll.lower(), imp.ordinal, make_name=True)
if not funcname:
raise Exception("Unable to look up ordinal %s:%04x" % (entry.dll, imp.ordinal))
else:
funcname = imp.name
if not funcname:
continue
if isinstance(funcname, bytes):
funcname = funcname.decode()
impstrs.append('%s.%s' % (libname.lower(),funcname.lower()))
return md5( ','.join( impstrs ).encode() ).hexdigest()
def parse_import_directory(self, rva, size, dllnames_only=False):
"""Walk and parse the import directory."""
import_descs = []
error_count = 0
while True:
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva, Structure(
self.__IMAGE_IMPORT_DESCRIPTOR_format__).sizeof() )
except PEFormatError as e:
self.__warnings.append(
'Error parsing the import directory at RVA: 0x%x' % ( rva ) )
break
file_offset = self.get_offset_from_rva(rva)
import_desc = self.__unpack_data__(
self.__IMAGE_IMPORT_DESCRIPTOR_format__,
data, file_offset = file_offset )
# If the structure is all zeros, we reached the end of the list
if not import_desc or import_desc.all_zeroes():
break
rva += import_desc.sizeof()
# If the array of thunk's is somewhere earlier than the import
# descriptor we can set a maximum length for the array. Otherwise
# just set a maximum length of the size of the file
max_len = len(self.__data__) - file_offset
if rva > import_desc.OriginalFirstThunk or rva > import_desc.FirstThunk:
max_len = max(rva-import_desc.OriginalFirstThunk, rva-import_desc.FirstThunk)
import_data = []
if not dllnames_only:
try:
import_data = self.parse_imports(
import_desc.OriginalFirstThunk,
import_desc.FirstThunk,
import_desc.ForwarderChain,
max_length = max_len)
except PEFormatError as e:
self.__warnings.append(
'Error parsing the import directory. '
'Invalid Import data at RVA: 0x{0:x} ({1})'.format(
rva, e.value))
if error_count > 5:
self.__warnings.append(
'Too may errors parsing the import directory. '
'Invalid import data at RVA: 0x{0:x}'.format(rva) )
break
if not import_data:
error_count += 1
# TODO: do not continue here
continue
dll = self.get_string_at_rva(import_desc.Name, MAX_DLL_LENGTH)
if not is_valid_dos_filename(dll):
dll = b('*invalid*')
if dll:
for symbol in import_data:
if symbol.name is None:
funcname = ordlookup.ordLookup(dll.lower(), symbol.ordinal)
if funcname:
symbol.name = funcname
import_descs.append(
ImportDescData(
struct = import_desc,
imports = import_data,
dll = dll))
if not dllnames_only:
suspicious_imports = set([ u'LoadLibrary', u'GetProcAddress' ])
suspicious_imports_count = 0
total_symbols = 0
for imp_dll in import_descs:
for symbol in imp_dll.imports:
for suspicious_symbol in suspicious_imports:
if not symbol or not symbol.name:
continue
name = symbol.name
if type(symbol.name) == bytes:
name = symbol.name.decode('utf-8')
if name.startswith(suspicious_symbol):
suspicious_imports_count += 1
break
total_symbols += 1
if suspicious_imports_count == len(suspicious_imports) and total_symbols < 20:
self.__warnings.append(
'Imported symbols contain entries typical of packed executables.' )
return import_descs
def parse_imports(
self, original_first_thunk, first_thunk,
forwarder_chain, max_length=None):
"""Parse the imported symbols.
It will fill a list, which will be available as the dictionary
attribute "imports". Its keys will be the DLL names and the values
all the symbols imported from that object.
"""
imported_symbols = []
# Import Lookup Table. Contains ordinals or pointers to strings.
ilt = self.get_import_table(original_first_thunk, max_length)
# Import Address Table. May have identical content to ILT if
# PE file is not bounded, Will contain the address of the
# imported symbols once the binary is loaded or if it is already
# bound.
iat = self.get_import_table(first_thunk, max_length)
# OC Patch:
# Would crash if IAT or ILT had None type
if (not iat or len(iat)==0) and (not ilt or len(ilt)==0):
self.__warnings.append(
'Damaged Import Table information. '
'ILT and/or IAT appear to be broken. '
'OriginalFirstThunk: 0x{0:x} FirstThunk: 0x{1:x}'.format(
original_first_thunk, first_thunk))
return []
table = None
if ilt:
table = ilt
elif iat:
table = iat
else:
return None
imp_offset = 4
address_mask = 0x7fffffff
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
ordinal_flag = IMAGE_ORDINAL_FLAG
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
ordinal_flag = IMAGE_ORDINAL_FLAG64
imp_offset = 8
address_mask = 0x7fffffffffffffff
else:
# Some PEs may have an invalid value in the Magic field of the
# Optional Header. Just in case the remaining file is parseable
# let's pretend it's a 32bit PE32 by default.
ordinal_flag = IMAGE_ORDINAL_FLAG
num_invalid = 0
for idx in range(len(table)):
imp_ord = None
imp_hint = None
imp_name = None
name_offset = None
hint_name_table_rva = None
if table[idx].AddressOfData:
# If imported by ordinal, we will append the ordinal number
#
if table[idx].AddressOfData & ordinal_flag:
import_by_ordinal = True
imp_ord = table[idx].AddressOfData & 0xffff
imp_name = None
name_offset = None
else:
import_by_ordinal = False
try:
hint_name_table_rva = table[idx].AddressOfData & address_mask
data = self.get_data(hint_name_table_rva, 2)
# Get the Hint
imp_hint = self.get_word_from_data(data, 0)
imp_name = self.get_string_at_rva(table[idx].AddressOfData+2, MAX_IMPORT_NAME_LENGTH)
if not is_valid_function_name(imp_name):
imp_name = b('*invalid*')
name_offset = self.get_offset_from_rva(table[idx].AddressOfData+2)
except PEFormatError as e:
pass
# by nriva: we want the ThunkRVA and ThunkOffset
thunk_offset = table[idx].get_file_offset()
thunk_rva = self.get_rva_from_offset(thunk_offset)
imp_address = first_thunk + self.OPTIONAL_HEADER.ImageBase + idx * imp_offset
struct_iat = None
try:
if iat and ilt and ilt[idx].AddressOfData != iat[idx].AddressOfData:
imp_bound = iat[idx].AddressOfData
struct_iat = iat[idx]
else:
imp_bound = None
except IndexError:
imp_bound = None
# The file with hashes:
#
# MD5: bfe97192e8107d52dd7b4010d12b2924
# SHA256: 3d22f8b001423cb460811ab4f4789f277b35838d45c62ec0454c877e7c82c7f5
#
# has an invalid table built in a way that it's parseable but contains invalid
# entries that lead pefile to take extremely long amounts of time to
# parse. It also leads to extreme memory consumption.
# To prevent similar cases, if invalid entries are found in the middle of a
# table the parsing will be aborted
#
if imp_ord == None and imp_name == None:
raise PEFormatError('Invalid entries, aborting parsing.')
# Some PEs appear to interleave valid and invalid imports. Instead of
# aborting the parsing altogether we will simply skip the invalid entries.
# Although if we see 1000 invalid entries and no legit ones, we abort.
if imp_name == b('*invalid*'):
if num_invalid > 1000 and num_invalid == idx:
raise PEFormatError('Too many invalid names, aborting parsing.')
num_invalid += 1
continue
if imp_ord or imp_name:
imported_symbols.append(
ImportData(
pe = self,
struct_table = table[idx],
struct_iat = struct_iat, # for bound imports if any
import_by_ordinal = import_by_ordinal,
ordinal = imp_ord,
ordinal_offset = table[idx].get_file_offset(),
hint = imp_hint,
name = imp_name,
name_offset = name_offset,
bound = imp_bound,
address = imp_address,
hint_name_table_rva = hint_name_table_rva,
thunk_offset = thunk_offset,
thunk_rva = thunk_rva ))
return imported_symbols
def get_import_table(self, rva, max_length=None):
table = []
# We need the ordinal flag for a simple heuristic
# we're implementing within the loop
#
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
ordinal_flag = IMAGE_ORDINAL_FLAG
format = self.__IMAGE_THUNK_DATA_format__
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
ordinal_flag = IMAGE_ORDINAL_FLAG64
format = self.__IMAGE_THUNK_DATA64_format__
else:
# Some PEs may have an invalid value in the Magic field of the
# Optional Header. Just in case the remaining file is parseable
# let's pretend it's a 32bit PE32 by default.
ordinal_flag = IMAGE_ORDINAL_FLAG
format = self.__IMAGE_THUNK_DATA_format__
MAX_ADDRESS_SPREAD = 128*2**20 # 64 MB
MAX_REPEATED_ADDRESSES = 15
repeated_address = 0
addresses_of_data_set_64 = set()
addresses_of_data_set_32 = set()
start_rva = rva
while True and rva:
if max_length is not None and rva >= start_rva+max_length:
self.__warnings.append(
'Error parsing the import table. Entries go beyond bounds.')
break
# if we see too many times the same entry we assume it could be
# a table containing bogus data (with malicious intent or otherwise)
if repeated_address >= MAX_REPEATED_ADDRESSES:
return []
# if the addresses point somewhere but the difference between the highest
# and lowest address is larger than MAX_ADDRESS_SPREAD we assume a bogus
# table as the addresses should be contained within a module
if (addresses_of_data_set_32 and
max(addresses_of_data_set_32) - min(addresses_of_data_set_32) > MAX_ADDRESS_SPREAD ):
return []
if (addresses_of_data_set_64 and
max(addresses_of_data_set_64) - min(addresses_of_data_set_64) > MAX_ADDRESS_SPREAD ):
return []
failed = False
try:
data = self.get_data(rva, Structure(format).sizeof())
except PEFormatError as e:
failed = True
if failed or len(data) != Structure(format).sizeof():
self.__warnings.append(
'Error parsing the import table. '
'Invalid data at RVA: 0x%x' % rva)
return None
thunk_data = self.__unpack_data__(
format, data, file_offset=self.get_offset_from_rva(rva) )
# Check if the AddressOfData lies within the range of RVAs that it's
# being scanned, abort if that is the case, as it is very unlikely
# to be legitimate data.
# Seen in PE with SHA256:
# 5945bb6f0ac879ddf61b1c284f3b8d20c06b228e75ae4f571fa87f5b9512902c
if thunk_data and thunk_data.AddressOfData >= start_rva and thunk_data.AddressOfData <= rva:
self.__warnings.append(
'Error parsing the import table. '
'AddressOfData overlaps with THUNK_DATA for '
'THUNK at RVA 0x%x' % ( rva ) )
break
if thunk_data and thunk_data.AddressOfData:
# If the entry looks like could be an ordinal...
if thunk_data.AddressOfData & ordinal_flag:
# but its value is beyond 2^16, we will assume it's a
# corrupted and ignore it altogether
if thunk_data.AddressOfData & 0x7fffffff > 0xffff:
return []
# and if it looks like it should be an RVA
else:
# keep track of the RVAs seen and store them to study their
# properties. When certain non-standard features are detected
# the parsing will be aborted
if (thunk_data.AddressOfData in addresses_of_data_set_32 or
thunk_data.AddressOfData in addresses_of_data_set_64):
repeated_address += 1
if thunk_data.AddressOfData >= 2**32:
addresses_of_data_set_64.add(thunk_data.AddressOfData)
else:
addresses_of_data_set_32.add(thunk_data.AddressOfData)
if not thunk_data or thunk_data.all_zeroes():
break
rva += thunk_data.sizeof()
table.append(thunk_data)
return table
def get_memory_mapped_image(self, max_virtual_address=0x10000000, ImageBase=None):
"""Returns the data corresponding to the memory layout of the PE file.
The data includes the PE header and the sections loaded at offsets
corresponding to their relative virtual addresses. (the VirtualAddress
section header member).
Any offset in this data corresponds to the absolute memory address
ImageBase+offset.
The optional argument 'max_virtual_address' provides with means of limiting
which sections are processed.
Any section with their VirtualAddress beyond this value will be skipped.
Normally, sections with values beyond this range are just there to confuse
tools. It's a common trick to see in packed executables.
If the 'ImageBase' optional argument is supplied, the file's relocations
will be applied to the image by calling the 'relocate_image()' method. Beware
that the relocation information is applied permanently.
"""
# Rebase if requested
#
if ImageBase is not None:
# Keep a copy of the image's data before modifying it by rebasing it
#
original_data = self.__data__
self.relocate_image(ImageBase)
# Collect all sections in one code block
mapped_data = self.__data__[:]
for section in self.sections:
# Miscellaneous integrity tests.
# Some packer will set these to bogus values to make tools go nuts.
if section.Misc_VirtualSize == 0 and section.SizeOfRawData == 0:
continue
if section.SizeOfRawData > len(self.__data__):
continue
if self.adjust_FileAlignment( section.PointerToRawData,
self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__):
continue
VirtualAddress_adj = self.adjust_SectionAlignment( section.VirtualAddress,
self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment )
if VirtualAddress_adj >= max_virtual_address:
continue
padding_length = VirtualAddress_adj - len(mapped_data)
if padding_length>0:
mapped_data += b'\0'*padding_length
elif padding_length<0:
mapped_data = mapped_data[:padding_length]
mapped_data += section.get_data()
# If the image was rebased, restore it to its original form
#
if ImageBase is not None:
self.__data__ = original_data
return mapped_data
def get_resources_strings(self):
"""Returns a list of all the strings found withing the resources (if any).
This method will scan all entries in the resources directory of the PE, if
there is one, and will return a list() with the strings.
An empty list will be returned otherwise.
"""
resources_strings = list()
if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):
for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
if hasattr(resource_type, 'directory'):
for resource_id in resource_type.directory.entries:
if hasattr(resource_id, 'directory'):
if hasattr(resource_id.directory, 'strings') and resource_id.directory.strings:
for res_string in list(resource_id.directory.strings.values()):
resources_strings.append(res_string)
return resources_strings
def get_data(self, rva=0, length=None):
"""Get data regardless of the section where it lies on.
Given a RVA and the size of the chunk to retrieve, this method
will find the section where the data lies and return the data.
"""
s = self.get_section_by_rva(rva)
if length:
end = rva + length
else:
end = None
if not s:
if rva < len(self.header):
return self.header[rva:end]
# Before we give up we check whether the file might
# contain the data anyway. There are cases of PE files
# without sections that rely on windows loading the first
# 8291 bytes into memory and assume the data will be
# there
# A functional file with these characteristics is:
# MD5: 0008892cdfbc3bda5ce047c565e52295
# SHA-1: c7116b9ff950f86af256defb95b5d4859d4752a9
#
if rva < len(self.__data__):
return self.__data__[rva:end]
raise PEFormatError('data at RVA can\'t be fetched. Corrupt header?')
return s.get_data(rva, length)
def get_rva_from_offset(self, offset):
"""Get the RVA corresponding to this file offset. """
s = self.get_section_by_offset(offset)
if not s:
if self.sections:
lowest_rva = min( [ self.adjust_SectionAlignment( s.VirtualAddress,
self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) for s in self.sections] )
if offset < lowest_rva:
# We will assume that the offset lies within the headers, or
# at least points before where the earliest section starts
# and we will simply return the offset as the RVA
#
# The case illustrating this behavior can be found at:
# http://corkami.blogspot.com/2010/01/hey-hey-hey-whats-in-your-head.html
# where the import table is not contained by any section
# hence the RVA needs to be resolved to a raw offset
return offset
return None
else:
return offset
#raise PEFormatError("specified offset (0x%x) doesn't belong to any section." % offset)
return s.get_rva_from_offset(offset)
def get_offset_from_rva(self, rva):
"""Get the file offset corresponding to this RVA.
Given a RVA , this method will find the section where the
data lies and return the offset within the file.
"""
s = self.get_section_by_rva(rva)
if not s:
# If not found within a section assume it might
# point to overlay data or otherwise data present
# but not contained in any section. In those
# cases the RVA should equal the offset
if rva < len(self.__data__):
return rva
raise PEFormatError('data at RVA can\'t be fetched. Corrupt header?')
return s.get_offset_from_rva(rva)
def get_string_at_rva(self, rva, max_length=MAX_STRING_LENGTH):
"""Get an ASCII string located at the given address."""
if rva is None:
return None
s = self.get_section_by_rva(rva)
if not s:
return self.get_string_from_data(0, self.__data__[rva:rva+max_length])
return self.get_string_from_data(0, s.get_data(rva, length=max_length))
def get_bytes_from_data(self, offset, data):
"""."""
if offset > len(data):
return b''
d = data[offset:]
if isinstance(d, bytearray):
return bytes(d)
return d
def get_string_from_data(self, offset, data):
"""Get an ASCII string from data."""
s = self.get_bytes_from_data(offset, data)
end = s.find(b'\0')
if end >= 0:
s = s[:end]
return s
def get_string_u_at_rva(self, rva, max_length = 2**16, encoding=None):
"""Get an Unicode string located at the given address."""
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva, 2)
except PEFormatError as e:
return None
# max_length is the maximum count of 16bit characters
# needs to be doubled to get size in bytes
max_length <<= 1
requested = min(max_length, 256)
data = self.get_data(rva, requested)
# try to find null-termination
null_index = -1
while True:
null_index = data.find(b'\x00\x00', null_index + 1)
if null_index == -1:
data_length = len(data)
if data_length < requested or data_length == max_length:
null_index = len(data) >> 1
break
else:
# Request remaining part of data limited by max_length
data += self.get_data(rva + data_length, max_length - data_length)
null_index = requested - 1
requested = max_length
elif null_index % 2 == 0:
null_index >>= 1
break
# convert selected part of the string to unicode
uchrs = struct.unpack('<{:d}H'.format(null_index), data[:null_index * 2])
s = u''.join(map(chr, uchrs))
if encoding:
return b(s.encode(encoding, 'backslashreplace_'))
return b(s.encode('utf-8', 'backslashreplace_'))
def get_section_by_offset(self, offset):
"""Get the section containing the given file offset."""
sections = [s for s in self.sections if s.contains_offset(offset)]
if sections:
return sections[0]
return None
def get_section_by_rva(self, rva):
"""Get the section containing the given address."""
sections = [s for s in self.sections if s.contains_rva(rva)]
if sections:
return sections[0]
return None
def __str__(self):
return self.dump_info()
def has_relocs(self):
"""Checks if the PE file has relocation directory"""
return hasattr(self, 'DIRECTORY_ENTRY_BASERELOC')
def print_info(self, encoding='utf-8'):
"""Print all the PE header information in a human readable from."""
print(self.dump_info(encoding=encoding))
def dump_info(self, dump=None, encoding='ascii'):
"""Dump all the PE header information into human readable string."""
if dump is None:
dump = Dump()
warnings = self.get_warnings()
if warnings:
dump.add_header('Parsing Warnings')
for warning in warnings:
dump.add_line(warning)
dump.add_newline()
dump.add_header('DOS_HEADER')
dump.add_lines(self.DOS_HEADER.dump())
dump.add_newline()
dump.add_header('NT_HEADERS')
dump.add_lines(self.NT_HEADERS.dump())
dump.add_newline()
dump.add_header('FILE_HEADER')
dump.add_lines(self.FILE_HEADER.dump())
image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
dump.add('Flags: ')
flags = []
for flag in sorted(image_flags):
if getattr(self.FILE_HEADER, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_newline()
if hasattr(self, 'OPTIONAL_HEADER') and self.OPTIONAL_HEADER is not None:
dump.add_header('OPTIONAL_HEADER')
dump.add_lines(self.OPTIONAL_HEADER.dump())
dll_characteristics_flags = retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLLCHARACTERISTICS_')
dump.add('DllCharacteristics: ')
flags = []
for flag in sorted(dll_characteristics_flags):
if getattr(self.OPTIONAL_HEADER, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_newline()
dump.add_header('PE Sections')
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
for section in self.sections:
dump.add_lines(section.dump())
dump.add('Flags: ')
flags = []
for flag in sorted(section_flags):
if getattr(section, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_line('Entropy: {0:f} (Min=0.0, Max=8.0)'.format(
section.get_entropy()))
if md5 is not None:
dump.add_line('MD5 hash: {0}'.format(
section.get_hash_md5()))
if sha1 is not None:
dump.add_line('SHA-1 hash: %s' % section.get_hash_sha1() )
if sha256 is not None:
dump.add_line('SHA-256 hash: %s' % section.get_hash_sha256() )
if sha512 is not None:
dump.add_line('SHA-512 hash: %s' % section.get_hash_sha512() )
dump.add_newline()
if (hasattr(self, 'OPTIONAL_HEADER') and
hasattr(self.OPTIONAL_HEADER, 'DATA_DIRECTORY') ):
dump.add_header('Directories')
for idx in range(len(self.OPTIONAL_HEADER.DATA_DIRECTORY)):
directory = self.OPTIONAL_HEADER.DATA_DIRECTORY[idx]
dump.add_lines(directory.dump())
dump.add_newline()
if hasattr(self, 'VS_VERSIONINFO'):
dump.add_header('Version Information')
dump.add_lines(self.VS_VERSIONINFO.dump())
dump.add_newline()
if hasattr(self, 'VS_FIXEDFILEINFO'):
dump.add_lines(self.VS_FIXEDFILEINFO.dump())
dump.add_newline()
if hasattr(self, 'FileInfo'):
for entry in self.FileInfo:
dump.add_lines(entry.dump())
dump.add_newline()
if hasattr(entry, 'StringTable'):
for st_entry in entry.StringTable:
[dump.add_line(u' '+line) for line in st_entry.dump()]
dump.add_line(u' LangID: {0}'.format(
st_entry.LangID.decode(encoding, 'backslashreplace_')))
dump.add_newline()
for str_entry in sorted(list(st_entry.entries.items())):
# try:
dump.add_line( u' {0}: {1}'.format(
str_entry[0].decode(encoding, 'backslashreplace_'),
str_entry[1].decode(encoding, 'backslashreplace_')))
dump.add_newline()
elif hasattr(entry, 'Var'):
for var_entry in entry.Var:
if hasattr(var_entry, 'entry'):
[dump.add_line(' '+line) for line in var_entry.dump()]
dump.add_line(
u' {0}: {1}'.format(
list(var_entry.entry.keys())[0].decode(
'utf-8', 'backslashreplace_'),
list(var_entry.entry.values())[0]))
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_EXPORT'):
dump.add_header('Exported symbols')
dump.add_lines(self.DIRECTORY_ENTRY_EXPORT.struct.dump())
dump.add_newline()
dump.add_line(u'%-10s %-10s %s' % ('Ordinal', 'RVA', 'Name'))
for export in self.DIRECTORY_ENTRY_EXPORT.symbols:
if export.address is not None:
name = b('None')
if export.name:
name = export.name
dump.add(u'%-10d 0x%08Xh %s' % (
export.ordinal, export.address, name.decode(encoding)))
if export.forwarder:
dump.add_line(u' forwarder: {0}'.format(
export.forwarder.decode(encoding, 'backslashreplace_')))
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
dump.add_header('Imported symbols')
for module in self.DIRECTORY_ENTRY_IMPORT:
dump.add_lines(module.struct.dump())
# Print the name of the DLL if there are no imports.
if not module.imports:
dump.add(' Name -> {0}'.format(
self.get_string_at_rva(module.struct.Name).decode(
encoding, 'backslashreplace_')))
dump.add_newline()
dump.add_newline()
for symbol in module.imports:
if symbol.import_by_ordinal is True:
if symbol.name is not None:
dump.add('{0}.{1} Ordinal[{2}] (Imported by Ordinal)'.format(
module.dll.decode('utf-8'),
symbol.name.decode('utf-8'),
symbol.ordinal))
else:
dump.add('{0} Ordinal[{1}] (Imported by Ordinal)'.format(
module.dll.decode('utf-8'), symbol.ordinal))
else:
dump.add('{0}.{1} Hint[{2:d}]'.format(
module.dll.decode(encoding, 'backslashreplace_'),
symbol.name.decode(encoding, 'backslashreplace_'),
symbol.hint))
if symbol.bound:
dump.add_line(' Bound: 0x{0:08X}'.format(symbol.bound))
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_BOUND_IMPORT'):
dump.add_header('Bound imports')
for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT:
dump.add_lines(bound_imp_desc.struct.dump())
dump.add_line('DLL: {0}'.format(
bound_imp_desc.name.decode(encoding, 'backslashreplace_')))
dump.add_newline()
for bound_imp_ref in bound_imp_desc.entries:
dump.add_lines(bound_imp_ref.struct.dump(), 4)
dump.add_line('DLL: {0}'.format(
bound_imp_ref.name.decode(encoding, 'backslashreplace_')), 4)
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_DELAY_IMPORT'):
dump.add_header('Delay Imported symbols')
for module in self.DIRECTORY_ENTRY_DELAY_IMPORT:
dump.add_lines(module.struct.dump())
dump.add_newline()
for symbol in module.imports:
if symbol.import_by_ordinal is True:
dump.add('{0} Ordinal[{1:d}] (Imported by Ordinal)'.format(
module.dll.decode(encoding, 'backslashreplace_'),
symbol.ordinal))
else:
dump.add('{0}.{1} Hint[{2}]'.format(
module.dll.decode(encoding, 'backslashreplace_'),
symbol.name.decode(encoding, 'backslashreplace_'), symbol.hint))
if symbol.bound:
dump.add_line(' Bound: 0x{0:08X}'.format(symbol.bound))
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):
dump.add_header('Resource directory')
dump.add_lines(self.DIRECTORY_ENTRY_RESOURCE.struct.dump())
for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
if resource_type.name is not None:
# name = str(resource_type.name) #.string if resource_type.name.string else ''
dump.add_line(u'Name: [{0}]'.format(
resource_type.name.decode(encoding, 'backslashreplace_')
), 2)
else:
dump.add_line(u'Id: [0x{0:X}] ({1})'.format(
resource_type.struct.Id, RESOURCE_TYPE.get(
resource_type.struct.Id, '-')),
2)
dump.add_lines(resource_type.struct.dump(), 2)
if hasattr(resource_type, 'directory'):
dump.add_lines(resource_type.directory.struct.dump(), 4)
for resource_id in resource_type.directory.entries:
if resource_id.name is not None:
dump.add_line(u'Name: [{0}]'.format(
resource_id.name.decode(
'utf-8', 'backslashreplace_')), 6)
else:
dump.add_line('Id: [0x{0:X}]'.format(resource_id.struct.Id), 6)
dump.add_lines(resource_id.struct.dump(), 6)
if hasattr(resource_id, 'directory'):
dump.add_lines(resource_id.directory.struct.dump(), 8)
for resource_lang in resource_id.directory.entries:
if hasattr(resource_lang, 'data'):
dump.add_line(u'\\--- LANG [%d,%d][%s,%s]' % (
resource_lang.data.lang,
resource_lang.data.sublang,
LANG.get(resource_lang.data.lang, '*unknown*'),
get_sublang_name_for_lang( resource_lang.data.lang, resource_lang.data.sublang ) ), 8)
dump.add_lines(resource_lang.struct.dump(), 10)
dump.add_lines(resource_lang.data.struct.dump(), 12)
if hasattr(resource_id.directory, 'strings') and resource_id.directory.strings:
dump.add_line(u'[STRINGS]' , 10 )
for idx, res_string in list(resource_id.directory.strings.items()):
dump.add_line( '{0:6d}: {1}'.format(idx,
res_string.encode(
'unicode-escape',
'backslashreplace').decode(
'ascii')),
12)
dump.add_newline()
dump.add_newline()
if ( hasattr(self, 'DIRECTORY_ENTRY_TLS') and
self.DIRECTORY_ENTRY_TLS and
self.DIRECTORY_ENTRY_TLS.struct ):
dump.add_header('TLS')
dump.add_lines(self.DIRECTORY_ENTRY_TLS.struct.dump())
dump.add_newline()
if ( hasattr(self, 'DIRECTORY_ENTRY_LOAD_CONFIG') and
self.DIRECTORY_ENTRY_LOAD_CONFIG and
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct ):
dump.add_header('LOAD_CONFIG')
dump.add_lines(self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.dump())
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_DEBUG'):
dump.add_header('Debug information')
for dbg in self.DIRECTORY_ENTRY_DEBUG:
dump.add_lines(dbg.struct.dump())
try:
dump.add_line('Type: '+DEBUG_TYPE[dbg.struct.Type])
except KeyError:
dump.add_line(
'Type: 0x{0:x}(Unknown)'.format(dbg.struct.Type))
dump.add_newline()
if dbg.entry:
dump.add_lines(dbg.entry.dump(), 4)
dump.add_newline()
if self.has_relocs():
dump.add_header('Base relocations')
for base_reloc in self.DIRECTORY_ENTRY_BASERELOC:
dump.add_lines(base_reloc.struct.dump())
for reloc in base_reloc.entries:
try:
dump.add_line('%08Xh %s' % (
reloc.rva, RELOCATION_TYPE[reloc.type][16:]), 4)
except KeyError:
dump.add_line('0x%08X 0x%x(Unknown)' % (
reloc.rva, reloc.type), 4)
dump.add_newline()
return dump.get_text()
def dump_dict(self, dump=None):
"""Dump all the PE header information into a dictionary."""
dump_dict = dict()
warnings = self.get_warnings()
if warnings:
dump_dict['Parsing Warnings'] = warnings
dump_dict['DOS_HEADER'] = self.DOS_HEADER.dump_dict()
dump_dict['NT_HEADERS'] = self.NT_HEADERS.dump_dict()
dump_dict['FILE_HEADER'] = self.FILE_HEADER.dump_dict()
image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
dump_dict['Flags'] = list()
for flag in image_flags:
if getattr(self.FILE_HEADER, flag[0]):
dump_dict['Flags'].append(flag[0])
if hasattr(self, 'OPTIONAL_HEADER') and self.OPTIONAL_HEADER is not None:
dump_dict['OPTIONAL_HEADER'] = self.OPTIONAL_HEADER.dump_dict()
dll_characteristics_flags = retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLLCHARACTERISTICS_')
dump_dict['DllCharacteristics'] = list()
for flag in dll_characteristics_flags:
if getattr(self.OPTIONAL_HEADER, flag[0]):
dump_dict['DllCharacteristics'].append(flag[0])
dump_dict['PE Sections'] = list()
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
for section in self.sections:
section_dict = section.dump_dict()
dump_dict['PE Sections'].append(section_dict)
section_dict['Flags'] = list()
for flag in section_flags:
if getattr(section, flag[0]):
section_dict['Flags'].append(flag[0])
section_dict['Entropy'] = section.get_entropy()
if md5 is not None:
section_dict['MD5'] = section.get_hash_md5()
if sha1 is not None:
section_dict['SHA1'] = section.get_hash_sha1()
if sha256 is not None:
section_dict['SHA256'] = section.get_hash_sha256()
if sha512 is not None:
section_dict['SHA512'] = section.get_hash_sha512()
if (hasattr(self, 'OPTIONAL_HEADER') and
hasattr(self.OPTIONAL_HEADER, 'DATA_DIRECTORY') ):
dump_dict['Directories'] = list()
for idx in range(len(self.OPTIONAL_HEADER.DATA_DIRECTORY)):
directory = self.OPTIONAL_HEADER.DATA_DIRECTORY[idx]
dump_dict['Directories'].append(directory.dump_dict())
if hasattr(self, 'VS_VERSIONINFO'):
dump_dict['Version Information'] = list()
dump_dict['Version Information'].append(self.VS_VERSIONINFO.dump_dict())
if hasattr(self, 'VS_FIXEDFILEINFO'):
dump_dict['Version Information'].append(self.VS_FIXEDFILEINFO.dump_dict())
if hasattr(self, 'FileInfo'):
fileinfo_list = list()
for entry in self.FileInfo:
fileinfo_list.append(entry.dump_dict())
if hasattr(entry, 'StringTable'):
stringtable_dict = dict()
for st_entry in entry.StringTable:
[fileinfo_list.append(line) for line in st_entry.dump_dict()]
stringtable_dict['LangID'] = st_entry.LangID
for str_entry in list(st_entry.entries.items()):
stringtable_dict[str_entry[0]] = str_entry[1]
fileinfo_list.append(stringtable_dict)
elif hasattr(entry, 'Var'):
for var_entry in entry.Var:
var_dict = dict()
if hasattr(var_entry, 'entry'):
[fileinfo_list.append(line) for line in var_entry.dump_dict()]
var_dict[list(var_entry.entry.keys())[0]] = list(
var_entry.entry.values())[0]
fileinfo_list.append(var_dict)
if hasattr(self, 'DIRECTORY_ENTRY_EXPORT'):
dump_dict['Exported symbols'] = list()
dump_dict['Exported symbols'].append(self.DIRECTORY_ENTRY_EXPORT.struct.dump_dict())
for export in self.DIRECTORY_ENTRY_EXPORT.symbols:
export_dict = dict()
if export.address is not None:
export_dict.update({'Ordinal': export.ordinal, 'RVA': export.address, 'Name': export.name})
if export.forwarder:
export_dict['forwarder'] = export.forwarder
dump_dict['Exported symbols'].append(export_dict)
if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
dump_dict['Imported symbols'] = list()
for module in self.DIRECTORY_ENTRY_IMPORT:
import_list = list()
dump_dict['Imported symbols'].append(import_list)
import_list.append(module.struct.dump_dict())
for symbol in module.imports:
symbol_dict = dict()
if symbol.import_by_ordinal is True:
symbol_dict['DLL'] = module.dll
symbol_dict['Ordinal'] = symbol.ordinal
else:
symbol_dict['DLL'] = module.dll
symbol_dict['Name'] = symbol.name
symbol_dict['Hint'] = symbol.hint
if symbol.bound:
symbol_dict['Bound'] = symbol.bound
import_list.append(symbol_dict)
if hasattr(self, 'DIRECTORY_ENTRY_BOUND_IMPORT'):
dump_dict['Bound imports'] = list()
for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT:
bound_imp_desc_dict = dict()
dump_dict['Bound imports'].append(bound_imp_desc_dict)
bound_imp_desc_dict.update(bound_imp_desc.struct.dump_dict())
bound_imp_desc_dict['DLL'] = bound_imp_desc.name
for bound_imp_ref in bound_imp_desc.entries:
bound_imp_ref_dict = dict()
bound_imp_ref_dict.update(bound_imp_ref.struct.dump_dict())
bound_imp_ref_dict['DLL'] = bound_imp_ref.name
if hasattr(self, 'DIRECTORY_ENTRY_DELAY_IMPORT'):
dump_dict['Delay Imported symbols'] = list()
for module in self.DIRECTORY_ENTRY_DELAY_IMPORT:
module_list = list()
dump_dict['Delay Imported symbols'].append(module_list)
module_list.append(module.struct.dump_dict())
for symbol in module.imports:
symbol_dict = dict()
if symbol.import_by_ordinal is True:
symbol_dict['DLL'] = module.dll
symbol_dict['Ordinal'] = symbol.ordinal
else:
symbol_dict['DLL'] = module.dll
symbol_dict['Name'] = symbol.name
symbol_dict['Hint'] = symbol.hint
if symbol.bound:
symbol_dict['Bound'] = symbol.bound
module_list.append(symbol_dict)
if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):
dump_dict['Resource directory'] = list()
dump_dict['Resource directory'].append(self.DIRECTORY_ENTRY_RESOURCE.struct.dump_dict())
for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
resource_type_dict = dict()
if resource_type.name is not None:
resource_type_dict['Name'] = resource_type.name
else:
resource_type_dict['Id'] = (
resource_type.struct.Id, RESOURCE_TYPE.get(resource_type.struct.Id, '-'))
resource_type_dict.update(resource_type.struct.dump_dict())
dump_dict['Resource directory'].append(resource_type_dict)
if hasattr(resource_type, 'directory'):
directory_list = list()
directory_list.append(resource_type.directory.struct.dump_dict())
dump_dict['Resource directory'].append(directory_list)
for resource_id in resource_type.directory.entries:
resource_id_dict = dict()
if resource_id.name is not None:
resource_id_dict['Name'] = resource_id.name
else:
resource_id_dict['Id'] = resource_id.struct.Id
resource_id_dict.update(resource_id.struct.dump_dict())
directory_list.append(resource_id_dict)
if hasattr(resource_id, 'directory'):
resource_id_list = list()
resource_id_list.append(resource_id.directory.struct.dump_dict())
directory_list.append(resource_id_list)
for resource_lang in resource_id.directory.entries:
if hasattr(resource_lang, 'data'):
resource_lang_dict = dict()
resource_lang_dict['LANG'] = resource_lang.data.lang
resource_lang_dict['SUBLANG'] = resource_lang.data.sublang
resource_lang_dict['LANG_NAME'] = LANG.get(resource_lang.data.lang, '*unknown*')
resource_lang_dict['SUBLANG_NAME'] = get_sublang_name_for_lang(resource_lang.data.lang, resource_lang.data.sublang)
resource_lang_dict.update(resource_lang.struct.dump_dict())
resource_lang_dict.update(resource_lang.data.struct.dump_dict())
resource_id_list.append(resource_lang_dict)
if hasattr(resource_id.directory, 'strings') and resource_id.directory.strings:
for idx, res_string in list(resource_id.directory.strings.items()):
resource_id_list.append(res_string.encode(
'unicode-escape',
'backslashreplace').decode(
'ascii'))
if ( hasattr(self, 'DIRECTORY_ENTRY_TLS') and
self.DIRECTORY_ENTRY_TLS and
self.DIRECTORY_ENTRY_TLS.struct ):
dump_dict['TLS'] = self.DIRECTORY_ENTRY_TLS.struct.dump_dict()
if ( hasattr(self, 'DIRECTORY_ENTRY_LOAD_CONFIG') and
self.DIRECTORY_ENTRY_LOAD_CONFIG and
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct ):
dump_dict['LOAD_CONFIG'] = self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.dump_dict()
if hasattr(self, 'DIRECTORY_ENTRY_DEBUG'):
dump_dict['Debug information'] = list()
for dbg in self.DIRECTORY_ENTRY_DEBUG:
dbg_dict = dict()
dump_dict['Debug information'].append(dbg_dict)
dbg_dict.update(dbg.struct.dump_dict())
dbg_dict['Type'] = DEBUG_TYPE.get(dbg.struct.Type, dbg.struct.Type)
if self.has_relocs():
dump_dict['Base relocations'] = list()
for base_reloc in self.DIRECTORY_ENTRY_BASERELOC:
base_reloc_list = list()
dump_dict['Base relocations'].append(base_reloc_list)
base_reloc_list.append(base_reloc.struct.dump_dict())
for reloc in base_reloc.entries:
reloc_dict = dict()
base_reloc_list.append(reloc_dict)
reloc_dict['RVA'] = reloc.rva
try:
reloc_dict['Type'] = RELOCATION_TYPE[reloc.type][16:]
except KeyError:
reloc_dict['Type'] = reloc.type
return dump_dict
# OC Patch
def get_physical_by_rva(self, rva):
"""Gets the physical address in the PE file from an RVA value."""
try:
return self.get_offset_from_rva(rva)
except Exception:
return None
##
# Double-Word get / set
##
def get_data_from_dword(self, dword):
"""Return a four byte string representing the double word value. (little endian)."""
return struct.pack('<L', dword & 0xffffffff)
def get_dword_from_data(self, data, offset):
"""Convert four bytes of data to a double word (little endian)
'offset' is assumed to index into a dword array. So setting it to
N will return a dword out of the data starting at offset N*4.
Returns None if the data can't be turned into a double word.
"""
if (offset+1)*4 > len(data):
return None
return struct.unpack('<I', data[offset*4:(offset+1)*4])[0]
def get_dword_at_rva(self, rva):
"""Return the double word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_dword_from_data(self.get_data(rva, 4), 0)
except PEFormatError:
return None
def get_dword_from_offset(self, offset):
"""Return the double word value at the given file offset. (little endian)"""
if offset+4 > len(self.__data__):
return None
return self.get_dword_from_data(self.__data__[offset:offset+4], 0)
def set_dword_at_rva(self, rva, dword):
"""Set the double word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_dword(dword))
def set_dword_at_offset(self, offset, dword):
"""Set the double word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_dword(dword))
##
# Word get / set
##
def get_data_from_word(self, word):
"""Return a two byte string representing the word value. (little endian)."""
return struct.pack('<H', word)
def get_word_from_data(self, data, offset):
"""Convert two bytes of data to a word (little endian)
'offset' is assumed to index into a word array. So setting it to
N will return a dword out of the data starting at offset N*2.
Returns None if the data can't be turned into a word.
"""
if (offset+1)*2 > len(data):
return None
return struct.unpack('<H', data[offset*2:(offset+1)*2])[0]
def get_word_at_rva(self, rva):
"""Return the word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_word_from_data(self.get_data(rva)[:2], 0)
except PEFormatError:
return None
def get_word_from_offset(self, offset):
"""Return the word value at the given file offset. (little endian)"""
if offset+2 > len(self.__data__):
return None
return self.get_word_from_data(self.__data__[offset:offset+2], 0)
def set_word_at_rva(self, rva, word):
"""Set the word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_word(word))
def set_word_at_offset(self, offset, word):
"""Set the word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_word(word))
##
# Quad-Word get / set
##
def get_data_from_qword(self, word):
"""Return a eight byte string representing the quad-word value. (little endian)."""
return struct.pack('<Q', word)
def get_qword_from_data(self, data, offset):
"""Convert eight bytes of data to a word (little endian)
'offset' is assumed to index into a word array. So setting it to
N will return a dword out of the data starting at offset N*8.
Returns None if the data can't be turned into a quad word.
"""
if (offset+1)*8 > len(data):
return None
return struct.unpack('<Q', data[offset*8:(offset+1)*8])[0]
def get_qword_at_rva(self, rva):
"""Return the quad-word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_qword_from_data(self.get_data(rva)[:8], 0)
except PEFormatError:
return None
def get_qword_from_offset(self, offset):
"""Return the quad-word value at the given file offset. (little endian)"""
if offset+8 > len(self.__data__):
return None
return self.get_qword_from_data(self.__data__[offset:offset+8], 0)
def set_qword_at_rva(self, rva, qword):
"""Set the quad-word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_qword(qword))
def set_qword_at_offset(self, offset, qword):
"""Set the quad-word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_qword(qword))
##
# Set bytes
##
def set_bytes_at_rva(self, rva, data):
"""Overwrite, with the given string, the bytes at the file offset corresponding to the given RVA.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries.
"""
if not isinstance(data, bytes):
raise TypeError('data should be of type: bytes')
offset = self.get_physical_by_rva(rva)
if not offset:
return False
return self.set_bytes_at_offset(offset, data)
def set_bytes_at_offset(self, offset, data):
"""Overwrite the bytes at the given file offset with the given string.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries.
"""
if not isinstance(data, bytes):
raise TypeError('data should be of type: bytes')
if offset >= 0 and offset < len(self.__data__):
self.__data__ = ( self.__data__[:offset] + data + self.__data__[offset+len(data):] )
else:
return False
return True
##### PATCH START #####
def get_bytes_at_rva(self, rva, nbytes):
offset = self.get_physical_by_rva(rva)
if not offset:
raise IndexError("Could not get corresponding offset by RVA")
return self.get_bytes_at_offset(offset, nbytes)
def get_bytes_at_offset(self, offset, nbytes):
if offset >= 0 and offset + nbytes < len(self.__data__):
return self.__data__[offset:offset+nbytes]
raise IndexError("Offset out of bound")
###### PATCH END ######
def merge_modified_section_data(self):
"""Update the PE image content with any individual section data that has been modified."""
for section in self.sections:
section_data_start = self.adjust_FileAlignment( section.PointerToRawData,
self.OPTIONAL_HEADER.FileAlignment )
section_data_end = section_data_start+section.SizeOfRawData
if section_data_start < len(self.__data__) and section_data_end < len(self.__data__):
self.__data__ = self.__data__[:section_data_start] + section.get_data() + self.__data__[section_data_end:]
def relocate_image(self, new_ImageBase):
"""Apply the relocation information to the image using the provided new image base.
This method will apply the relocation information to the image. Given the new base,
all the relocations will be processed and both the raw data and the section's data
will be fixed accordingly.
The resulting image can be retrieved as well through the method:
get_memory_mapped_image()
In order to get something that would more closely match what could be found in memory
once the Windows loader finished its work.
"""
relocation_difference = new_ImageBase - self.OPTIONAL_HEADER.ImageBase
if hasattr(self, 'DIRECTORY_ENTRY_BASERELOC'):
for reloc in self.DIRECTORY_ENTRY_BASERELOC:
virtual_address = reloc.struct.VirtualAddress
size_of_block = reloc.struct.SizeOfBlock
# We iterate with an index because if the relocation is of type
# IMAGE_REL_BASED_HIGHADJ we need to also process the next entry
# at once and skip it for the next iteration
#
entry_idx = 0
while entry_idx<len(reloc.entries):
entry = reloc.entries[entry_idx]
entry_idx += 1
if entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_ABSOLUTE']:
# Nothing to do for this type of relocation
pass
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGH']:
# Fix the high 16-bits of a relocation
#
# Add high 16-bits of relocation_difference to the
# 16-bit value at RVA=entry.rva
self.set_word_at_rva(
entry.rva,
( self.get_word_at_rva(entry.rva) + relocation_difference>>16)&0xffff )
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_LOW']:
# Fix the low 16-bits of a relocation
#
# Add low 16 bits of relocation_difference to the 16-bit value
# at RVA=entry.rva
self.set_word_at_rva(
entry.rva,
( self.get_word_at_rva(entry.rva) + relocation_difference)&0xffff)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHLOW']:
# Handle all high and low parts of a 32-bit relocation
#
# Add relocation_difference to the value at RVA=entry.rva
self.set_dword_at_rva(
entry.rva,
self.get_dword_at_rva(entry.rva)+relocation_difference)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHADJ']:
# Fix the high 16-bits of a relocation and adjust
#
# Add high 16-bits of relocation_difference to the 32-bit value
# composed from the (16-bit value at RVA=entry.rva)<<16 plus
# the 16-bit value at the next relocation entry.
#
# If the next entry is beyond the array's limits,
# abort... the table is corrupt
#
if entry_idx == len(reloc.entries):
break
next_entry = reloc.entries[entry_idx]
entry_idx += 1
self.set_word_at_rva( entry.rva,
((self.get_word_at_rva(entry.rva)<<16) + next_entry.rva +
relocation_difference & 0xffff0000) >> 16 )
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_DIR64']:
# Apply the difference to the 64-bit value at the offset
# RVA=entry.rva
self.set_qword_at_rva(
entry.rva,
self.get_qword_at_rva(entry.rva) + relocation_difference)
def verify_checksum(self):
return self.OPTIONAL_HEADER.CheckSum == self.generate_checksum()
def generate_checksum(self):
# This will make sure that the data representing the PE image
# is updated with any changes that might have been made by
# assigning values to header fields as those are not automatically
# updated upon assignment.
#
# data = self.write()
# print('{0}'.format(len(data)))
# for idx, b in enumerate(data):
# if b != ord(self.__data__[idx]) or (idx > 1244440 and idx < 1244460):
# print('Idx: {0} G {1:02x} {3} B {2:02x}'.format(
# idx, ord(self.__data__[idx]), b,
# self.__data__[idx], chr(b)))
self.__data__ = self.write()
# Get the offset to the CheckSum field in the OptionalHeader
# (The offset is the same in PE32 and PE32+)
checksum_offset = self.OPTIONAL_HEADER.get_file_offset() + 0x40 # 64
checksum = 0
# Verify the data is dword-aligned. Add padding if needed
#
remainder = len(self.__data__) % 4
data_len = len(self.__data__) + ((4-remainder) * ( remainder != 0 ))
for i in range( int(data_len / 4) ):
# Skip the checksum field
if i == int(checksum_offset / 4):
continue
if i+1 == (int(data_len / 4)) and remainder:
dword = struct.unpack('I', self.__data__[i*4:]+ (b'\0' * (4-remainder)) )[0]
else:
dword = struct.unpack('I', self.__data__[ i*4 : i*4+4 ])[0]
# Optimized the calculation (thanks to Emmanuel Bourg for pointing it out!)
checksum += dword
if checksum >= 2**32:
checksum = (checksum & 0xffffffff) + (checksum >> 32)
checksum = (checksum & 0xffff) + (checksum >> 16)
checksum = (checksum) + (checksum >> 16)
checksum = checksum & 0xffff
# The length is the one of the original data, not the padded one
#
return checksum + len(self.__data__)
def is_exe(self):
"""Check whether the file is a standard executable.
This will return true only if the file has the IMAGE_FILE_EXECUTABLE_IMAGE flag set
and the IMAGE_FILE_DLL not set and the file does not appear to be a driver either.
"""
EXE_flag = IMAGE_CHARACTERISTICS['IMAGE_FILE_EXECUTABLE_IMAGE']
if (not self.is_dll()) and (not self.is_driver()) and (
EXE_flag & self.FILE_HEADER.Characteristics) == EXE_flag:
return True
return False
def is_dll(self):
"""Check whether the file is a standard DLL.
This will return true only if the image has the IMAGE_FILE_DLL flag set.
"""
DLL_flag = IMAGE_CHARACTERISTICS['IMAGE_FILE_DLL']
if ( DLL_flag & self.FILE_HEADER.Characteristics) == DLL_flag:
return True
return False
def is_driver(self):
"""Check whether the file is a Windows driver.
This will return true only if there are reliable indicators of the image
being a driver.
"""
# Checking that the ImageBase field of the OptionalHeader is above or
# equal to 0x80000000 (that is, whether it lies in the upper 2GB of
# the address space, normally belonging to the kernel) is not a
# reliable enough indicator. For instance, PEs that play the invalid
# ImageBase trick to get relocated could be incorrectly assumed to be
# drivers.
# This is not reliable either...
#
# if any((section.Characteristics &
# SECTION_CHARACTERISTICS['IMAGE_SCN_MEM_NOT_PAGED']) for
# section in self.sections ):
# return True
# If the import directory was not parsed (fast_load = True); do it now.
if not hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
self.parse_data_directories(directories=[
DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT']])
# If there's still no import directory (the PE doesn't have one or it's
# malformed), give up.
if not hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
return False
# self.DIRECTORY_ENTRY_IMPORT will now exist, although it may be empty.
# If it imports from "ntoskrnl.exe" or other kernel components it should
# be a driver
#
system_DLLs = set(
('ntoskrnl.exe', 'hal.dll', 'ndis.sys', 'bootvid.dll', 'kdcom.dll'))
if system_DLLs.intersection(
[imp.dll.lower() for imp in self.DIRECTORY_ENTRY_IMPORT]):
return True
driver_like_section_names = set(
('page', 'paged'))
if driver_like_section_names.intersection(
[section.Name.lower() for section in self.sections]) and (
self.OPTIONAL_HEADER.Subsystem in (
SUBSYSTEM_TYPE['IMAGE_SUBSYSTEM_NATIVE'],
SUBSYSTEM_TYPE['IMAGE_SUBSYSTEM_NATIVE_WINDOWS'])):
return True
return False
def get_overlay_data_start_offset(self):
"""Get the offset of data appended to the file and not contained within
the area described in the headers."""
largest_offset_and_size = (0, 0)
def update_if_sum_is_larger_and_within_file(offset_and_size, file_size=len(self.__data__)):
if sum(offset_and_size) <= file_size and sum(offset_and_size) > sum(largest_offset_and_size):
return offset_and_size
return largest_offset_and_size
if hasattr(self, 'OPTIONAL_HEADER'):
largest_offset_and_size = update_if_sum_is_larger_and_within_file(
(self.OPTIONAL_HEADER.get_file_offset(), self.FILE_HEADER.SizeOfOptionalHeader))
for section in self.sections:
largest_offset_and_size = update_if_sum_is_larger_and_within_file(
(section.PointerToRawData, section.SizeOfRawData))
skip_directories = [DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']]
for idx, directory in enumerate(self.OPTIONAL_HEADER.DATA_DIRECTORY):
if idx in skip_directories:
continue
try:
largest_offset_and_size = update_if_sum_is_larger_and_within_file(
(self.get_offset_from_rva(directory.VirtualAddress), directory.Size))
# Ignore directories with RVA out of file
except PEFormatError:
continue
if len(self.__data__) > sum(largest_offset_and_size):
return sum(largest_offset_and_size)
return None
def get_overlay(self):
"""Get the data appended to the file and not contained within the area described in the headers."""
overlay_data_offset = self.get_overlay_data_start_offset()
if overlay_data_offset is not None:
return self.__data__[ overlay_data_offset : ]
return None
def trim(self):
"""Return the just data defined by the PE headers, removing any overlayed data."""
overlay_data_offset = self.get_overlay_data_start_offset()
if overlay_data_offset is not None:
return self.__data__[ : overlay_data_offset ]
return self.__data__[:]
# According to http://corkami.blogspot.com/2010/01/parce-que-la-planche-aura-brule.html
# if PointerToRawData is less that 0x200 it's rounded to zero. Loading the test file
# in a debugger it's easy to verify that the PointerToRawData value of 1 is rounded
# to zero. Hence we reproduce the behavior
#
# According to the document:
# [ Microsoft Portable Executable and Common Object File Format Specification ]
# "The alignment factor (in bytes) that is used to align the raw data of sections in
# the image file. The value should be a power of 2 between 512 and 64 K, inclusive.
# The default is 512. If the SectionAlignment is less than the architecture's page
# size, then FileAlignment must match SectionAlignment."
#
# The following is a hard-coded constant if the Windows loader
def adjust_FileAlignment( self, val, file_alignment ):
global FileAlignment_Warning
if file_alignment > FILE_ALIGNMENT_HARDCODED_VALUE:
# If it's not a power of two, report it:
if not power_of_two(file_alignment) and FileAlignment_Warning is False:
self.__warnings.append(
'If FileAlignment > 0x200 it should be a power of 2. Value: %x' % (
file_alignment) )
FileAlignment_Warning = True
if file_alignment < FILE_ALIGNMENT_HARDCODED_VALUE:
return val
return (int(val / 0x200)) * 0x200
# According to the document:
# [ Microsoft Portable Executable and Common Object File Format Specification ]
# "The alignment (in bytes) of sections when they are loaded into memory. It must be
# greater than or equal to FileAlignment. The default is the page size for the
# architecture."
#
def adjust_SectionAlignment( self, val, section_alignment, file_alignment ):
global SectionAlignment_Warning
if file_alignment < FILE_ALIGNMENT_HARDCODED_VALUE:
if file_alignment != section_alignment and SectionAlignment_Warning is False:
self.__warnings.append(
'If FileAlignment(%x) < 0x200 it should equal SectionAlignment(%x)' % (
file_alignment, section_alignment) )
SectionAlignment_Warning = True
if section_alignment < 0x1000: # page size
section_alignment = file_alignment
# 0x200 is the minimum valid FileAlignment according to the documentation
# although ntoskrnl.exe has an alignment of 0x80 in some Windows versions
#
#elif section_alignment < 0x80:
# section_alignment = 0x80
if section_alignment and val % section_alignment:
return section_alignment * ( int(val / section_alignment) )
return val
#################################################
# HACKS
#################################################
def add_section(self, name, fileoffset, length, characterstics):
# check validity
file_align = self.OPTIONAL_HEADER.FileAlignment
# assume theres gap between the header & the first section
raise NotImplementedError("TODO")
return
def __len__(self):
return len(self.__data__)
@property
def data(self):
return bytearray(self.__data__)
| 39.800883
| 151
| 0.575191
|
09cac988991b03d36be04c0f3b41b47b9ee21c08
| 9,366
|
py
|
Python
|
libs/core/operators.py
|
SFXiang/Fast_Seg
|
c662ac584e3440b993e4fd7fbbb3b72da4d5bbed
|
[
"Apache-2.0"
] | 1
|
2019-11-29T14:37:25.000Z
|
2019-11-29T14:37:25.000Z
|
libs/core/operators.py
|
SFXiang/Fast_Seg
|
c662ac584e3440b993e4fd7fbbb3b72da4d5bbed
|
[
"Apache-2.0"
] | null | null | null |
libs/core/operators.py
|
SFXiang/Fast_Seg
|
c662ac584e3440b993e4fd7fbbb3b72da4d5bbed
|
[
"Apache-2.0"
] | null | null | null |
# Common Segmentation Operator implemented by Pytorch
# XiangtaiLi(lxtpku@pku.edu.cn)
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import BatchNorm2d
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
in_size = inputs.size()
inputs = inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)
inputs = inputs.view(in_size[0], in_size[1], 1, 1)
return inputs
class SELayer(nn.Module):
def __init__(self, in_planes, out_planes, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(in_planes, out_planes // reduction),
nn.ReLU(inplace=True),
nn.Linear(out_planes // reduction, out_planes),
nn.Sigmoid()
)
self.out_planes = out_planes
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, self.out_planes, 1, 1)
return y
class ConvBnRelu(nn.Module):
def __init__(self, in_planes, out_planes, ksize, stride, pad, dilation=1,
groups=1, has_bn=True, norm_layer=nn.BatchNorm2d, bn_eps=1e-5,
has_relu=True, inplace=True, has_bias=False):
super(ConvBnRelu, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=ksize,
stride=stride, padding=pad,
dilation=dilation, groups=groups, bias=has_bias)
self.has_bn = has_bn
if self.has_bn:
self.bn = norm_layer(out_planes, eps=bn_eps)
self.has_relu = has_relu
if self.has_relu:
self.relu = nn.ReLU(inplace=inplace)
def forward(self, x):
x = self.conv(x)
if self.has_bn:
x = self.bn(x)
if self.has_relu:
x = self.relu(x)
return x
class ASPPModule(nn.Module):
"""
Reference:
Chen, Liang-Chieh, et al. *"Rethinking Atrous Convolution for Semantic Image Segmentation."*
"""
def __init__(self, features, inner_features=256, out_features=512, dilations=(12, 24, 36), norm_layer=nn.BatchNorm2d):
super(ASPPModule, self).__init__()
self.conv1 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1,
bias=False),
norm_layer(inner_features),
nn.ReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1, bias=False),
norm_layer(inner_features), nn.ReLU())
self.conv3 = nn.Sequential(
nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[0], dilation=dilations[0], bias=False),
norm_layer(inner_features), nn.ReLU())
self.conv4 = nn.Sequential(
nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[1], dilation=dilations[1], bias=False),
norm_layer(inner_features), nn.ReLU())
self.conv5 = nn.Sequential(
nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[2], dilation=dilations[2], bias=False),
norm_layer(inner_features), nn.ReLU())
self.bottleneck = nn.Sequential(
nn.Conv2d(inner_features * 5, out_features, kernel_size=1, padding=0, dilation=1, bias=False),
norm_layer(out_features),
nn.ReLU(),
nn.Dropout2d(0.1)
)
def forward(self, x):
_, _, h, w = x.size()
feat1 = F.upsample(self.conv1(x), size=(h, w), mode='bilinear', align_corners=True)
feat2 = self.conv2(x)
feat3 = self.conv3(x)
feat4 = self.conv4(x)
feat5 = self.conv5(x)
out = torch.cat((feat1, feat2, feat3, feat4, feat5), 1)
bottle = self.bottleneck(out)
return bottle
class A2Block(nn.Module):
def __init__(self, inplane, plane):
super(A2Block, self).__init__()
self.down = nn.Conv2d(inplane, plane, 1)
self.up = nn.Conv2d(plane, inplane, 1)
self.gather_down = nn.Conv2d(inplane, plane, 1)
self.distribue_down = nn.Conv2d(inplane, plane, 1)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
res = x
A = self.down(res)
B = self.gather_down(res)
b, c, h, w = A.size()
A = A.view(b, c, -1) # (b, c, h*w)
B = B.view(b, c, -1) # (b, c, h*w)
B = self.softmax(B)
B = B.permute(0, 2, 1) # (b, h*w, c)
G = torch.bmm(A, B) # (b,c,c)
C = self.distribue_down(res)
C = C.view(b, c, -1) # (b, c, h*w)
C = self.softmax(C)
C = C.permute(0, 2, 1) # (b, h*w, c)
atten = torch.bmm(C, G) # (b, h*w, c)
atten = atten.permute(0, 2, 1).view(b, c, h, -1)
atten = self.up(atten)
out = res + atten
return out
class PSPModule(nn.Module):
"""
Reference:
Zhao, Hengshuang, et al. *"Pyramid scene parsing network."*
"""
def __init__(self, features, out_features=512, sizes=(1, 2, 3, 6), norm_layer=BatchNorm2d):
super(PSPModule, self).__init__()
self.stages = []
self.stages = nn.ModuleList([self._make_stage(features, out_features, size, norm_layer) for size in sizes])
self.bottleneck = nn.Sequential(
nn.Conv2d(features+len(sizes)*out_features, out_features, kernel_size=1, padding=1, dilation=1, bias=False),
norm_layer(out_features),
nn.ReLU(),
nn.Dropout2d(0.1)
)
def _make_stage(self, features, out_features, size, norm_layer):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
conv = nn.Conv2d(features, out_features, kernel_size=1, bias=False)
bn = norm_layer(out_features)
return nn.Sequential(prior, conv, bn)
def forward(self, feats):
h, w = feats.size(2), feats.size(3)
priors = [F.upsample(input=stage(feats), size=(h, w), mode='bilinear', align_corners=True) for stage in self.stages] + [feats]
bottle = self.bottleneck(torch.cat(priors, 1))
return bottle
class ConvBnRelu(nn.Module):
def __init__(self, in_planes, out_planes, ksize, stride, pad, dilation=1,
groups=1, has_bn=True, norm_layer=nn.BatchNorm2d, bn_eps=1e-5,
has_relu=True, inplace=True, has_bias=False):
super(ConvBnRelu, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=ksize,
stride=stride, padding=pad,
dilation=dilation, groups=groups, bias=has_bias)
self.has_bn = has_bn
if self.has_bn:
self.bn = norm_layer(out_planes, eps=bn_eps)
self.has_relu = has_relu
if self.has_relu:
self.relu = nn.ReLU(inplace=inplace)
def forward(self, x):
x = self.conv(x)
if self.has_bn:
x = self.bn(x)
if self.has_relu:
x = self.relu(x)
return x
# For BiSeNet
class AttentionRefinement(nn.Module):
def __init__(self, in_planes, out_planes,
norm_layer=nn.BatchNorm2d):
super(AttentionRefinement, self).__init__()
self.conv_3x3 = ConvBnRelu(in_planes, out_planes, 3, 1, 1,
has_bn=True, norm_layer=norm_layer,
has_relu=True, has_bias=False)
self.channel_attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
ConvBnRelu(out_planes, out_planes, 1, 1, 0,
has_bn=True, norm_layer=norm_layer,
has_relu=False, has_bias=False),
nn.Sigmoid()
)
def forward(self, x):
fm = self.conv_3x3(x)
fm_se = self.channel_attention(fm)
fm = fm * fm_se
return fm
# For BiSeNet
class FeatureFusion(nn.Module):
def __init__(self, in_planes, out_planes,
reduction=1, norm_layer=nn.BatchNorm2d):
super(FeatureFusion, self).__init__()
self.conv_1x1 = ConvBnRelu(in_planes, out_planes, 1, 1, 0,
has_bn=True, norm_layer=norm_layer,
has_relu=True, has_bias=False)
self.channel_attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
ConvBnRelu(out_planes, out_planes // reduction, 1, 1, 0,
has_bn=False, norm_layer=norm_layer,
has_relu=True, has_bias=False),
ConvBnRelu(out_planes // reduction, out_planes, 1, 1, 0,
has_bn=False, norm_layer=norm_layer,
has_relu=False, has_bias=False),
nn.Sigmoid()
)
def forward(self, x1, x2):
fm = torch.cat([x1, x2], dim=1)
fm = self.conv_1x1(fm)
fm_se = self.channel_attention(fm)
output = fm + fm * fm_se
return output
| 36.874016
| 134
| 0.572283
|
ed547925438ebf2ff6b06c88a9656172367fe9a5
| 3,517
|
py
|
Python
|
doajtest/unit/test_index_searchbox.py
|
gaybro8777/doaj
|
27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58
|
[
"Apache-2.0"
] | 47
|
2015-04-24T13:13:39.000Z
|
2022-03-06T03:22:42.000Z
|
doajtest/unit/test_index_searchbox.py
|
gaybro8777/doaj
|
27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58
|
[
"Apache-2.0"
] | 1,215
|
2015-01-02T14:29:38.000Z
|
2022-03-28T14:19:13.000Z
|
doajtest/unit/test_index_searchbox.py
|
gaybro8777/doaj
|
27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58
|
[
"Apache-2.0"
] | 14
|
2015-11-27T13:01:23.000Z
|
2021-05-21T07:57:23.000Z
|
from doajtest.helpers import DoajTestCase
from flask import url_for
class TestSearchBox(DoajTestCase):
def setUp(self):
super(TestSearchBox, self).setUp()
def tearDown(self):
super(TestSearchBox, self).tearDown()
def test_01_with_referrer(self):
""" Utilise the search endpoint using the ref field. We expect a redirect with the referrer to be appended """
with self.app_test.test_client() as c:
resp1 = c.post('/search?source={"query": {"query_string": {"query": "cheese", "default_operator": "AND"}}}',
data={'ref': 'homepage-box', 'origin': 'ui'})
assert resp1.status_code == 302, resp1.status_code
assert resp1.location.endswith('&ref=homepage-box')
resp2 = c.post('/search?source={"query": {"query_string": {"query": "cheese", "default_operator": "AND"}}}',
data={'ref': 'homepage-box', 'origin': 'ui'}, follow_redirects=True)
assert resp2.status_code == 200, resp2.status_code
def test_02_without_origin(self):
""" Omit the origin field when emulating the text box - this is disallowed."""
with self.app_test.test_client() as c:
resp = c.post('/search?source={"query": {"query_string": {"query": "cheese", "default_operator": "AND"}}}',
data={'ref': 'homepage-box'})
assert resp.status_code == 400, resp.status_code
pass
def test_03_without_referrer(self):
""" Omit the referrer field when emulating the text box """
with self.app_test.test_client() as c:
resp = c.post('/search?source={"query": {"query_string": {"query": "cheese", "default_operator": "AND"}}}',
data={'origin': 'ui'})
assert resp.status_code == 400, resp.status_code
pass
def test_04_legacy_search_routes(self):
with self.app_test.test_request_context():
with self.app_test.test_client() as t:
# A plain /search goes to the journal search
resp = t.get('/search')
assert resp.status_code == 301, resp.status_code
assert resp.location == url_for('doaj.journals_search', _external=True), resp.location
# A legacy /search?source=...journal... also goes to the journal search page
resp = t.get('/search?source={"query": {"filtered": {"filter": {"bool": {"must": [{"term": {"_type": "journal"}}]}}, "query": {"match_all": {}}}}}')
assert resp.status_code == 301, resp.status_code
assert resp.location == url_for('doaj.journals_search', _external=True), resp.location
# A legacy /search?source=...article... goes to the article search page
resp = t.get('/search?source={"query":{"filtered":{"filter":{"bool":{"must":[{"term":{"_type":"article"}}]}},"query":{"match_all":{}}}}}')
assert resp.status_code == 301, resp.status_code
assert resp.location == url_for('doaj.articles_search', _external=True), resp.location
# Even if there's whitespace in the query
resp = t.get('/search?source={"query": {"filtered": {"filter": {"bool": {"must": [{"term": {"_type": "article"}}]}}, "query": {"match_all": {}}}}}')
assert resp.status_code == 301, resp.status_code
assert resp.location == url_for('doaj.articles_search', _external=True), resp.location
| 54.953125
| 164
| 0.589138
|
8bdb68b7f3a1a82fa011556e1e1f08ce808c55da
| 969
|
py
|
Python
|
pass2.py
|
Vidhu-Chaudhary/Assembler
|
f8227790102904e8aa63eded6107ea1e8b173af0
|
[
"MIT"
] | null | null | null |
pass2.py
|
Vidhu-Chaudhary/Assembler
|
f8227790102904e8aa63eded6107ea1e8b173af0
|
[
"MIT"
] | null | null | null |
pass2.py
|
Vidhu-Chaudhary/Assembler
|
f8227790102904e8aa63eded6107ea1e8b173af0
|
[
"MIT"
] | null | null | null |
from Opcode import *
from pass1 import *
wr = open("output.txt","w")
def pass2():
locptr = 0
#print(labels)
with open("input.txt","r") as f1:
for x1 in f1:
x1 = x1.strip("\n")
arr = x1.split(' ')
wr.write(str(f'{locptr:08b}')+" ")
if len(arr) == 1:
wr.write(str(getOpcode(arr[0]))+" 00000000"+"\n")
if len(arr) == 2 and ifVariable(arr[0]):
wr.write(str(getOpcode(arr[1]))+" 00000000"+"\n")
if len(arr) == 2 and ifOpcode(arr[0]):
if (arr[0] == "BRZ" or arr[0] == "BRP" or arr[0] == "BRN") and (arr[1] not in labels):
print("Error: Cannot branch to a variable/opcode, need label")
wr.write(str(getOpcode(arr[0]))+" "+str(find_address(arr[1]))+"\n")
if(len(arr) > 2):
wr.write(str(getOpcode(arr[1]))+" "+str(find_address(arr[2]))+"\n")
locptr += 1
def find_address(_str):
with open("Symboltable.txt","r") as f:
for x in f:
x = x.strip("\n")
arr = x.split(' ')
if(arr[0] == _str):
return arr[1]
| 25.5
| 90
| 0.567595
|
cf4db6049fb080078c84f0d448eb82605ac64e5e
| 6,526
|
py
|
Python
|
model_helper.py
|
smtnkc/pytorch-animals
|
2077322423b26398e221197d701d8bd123195d1a
|
[
"MIT"
] | 1
|
2020-10-14T15:18:20.000Z
|
2020-10-14T15:18:20.000Z
|
model_helper.py
|
smtnkc/pytorch-animals
|
2077322423b26398e221197d701d8bd123195d1a
|
[
"MIT"
] | null | null | null |
model_helper.py
|
smtnkc/pytorch-animals
|
2077322423b26398e221197d701d8bd123195d1a
|
[
"MIT"
] | 1
|
2021-05-20T08:25:39.000Z
|
2021-05-20T08:25:39.000Z
|
import os
import time
import copy
import torch
import pandas as pd
from torchvision import models
import cfg
from utils import fprint, calculate_metrics, get_sub_dump_dir
def initialize_model(is_pretrained):
model = models.alexnet(pretrained=is_pretrained)
# initially disable all parameter updates
if is_pretrained:
for param in model.parameters():
param.requires_grad = False
# reshape the output layer
in_size = model.classifier[6].in_features
model.classifier[6] = torch.nn.Linear(in_size, cfg.NUM_CATEGORIES)
if is_pretrained:
params_to_update = []
for param in model.parameters():
if param.requires_grad:
params_to_update.append(param) # parameters of reshaped layer
else:
params_to_update = model.parameters() # parameters of all layers
return model, params_to_update
#
#
#
#
#
#
def train_model(model, data_loaders, criterion, optimizer, args):
# create states df and csv file
stats_df = pd.DataFrame(
columns=['epoch', 'train_loss', 'train_acc', 'train_f1', 'val_loss', 'val_acc', 'val_f1'])
sub_dump_dir = get_sub_dump_dir(args)
stats_path = os.path.join(sub_dump_dir, 'stats.csv')
stats_df.to_csv(stats_path, sep=',', index=False) # write loss and acc values
fprint('\nCreated stats file\t-> {}'.format(stats_path), args)
fprint('\nTRAINING {} EPOCHS...\n'.format(args.epochs), args)
since = time.time()
# initialize best values
best_model_state_dict = copy.deepcopy(model.state_dict())
best_opt_state_dict = copy.deepcopy(optimizer.state_dict())
best_loss = 999999.9
best_acc = 0.0
best_epoch = 0
for epoch in range(args.epochs):
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
phase_loss = 0.0
phase_corrects = 0
phase_preds = torch.LongTensor()
phase_category_ids = torch.LongTensor()
# Iterate over data
for inputs, category_ids in data_loaders[phase]:
inputs = inputs.to(torch.device(args.device))
category_ids = category_ids.to(torch.device(args.device))
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# Get model outputs and calculate loss
outputs = model(inputs)
loss = criterion(outputs, category_ids)
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# stats
batch_loss = loss.item() * inputs.size(0)
batch_corrects = torch.sum(preds == category_ids.data)
phase_loss += batch_loss
phase_corrects += batch_corrects
phase_preds = torch.cat((phase_preds, preds), 0)
phase_category_ids = torch.cat((phase_category_ids, category_ids), 0)
epoch_loss = phase_loss / len(data_loaders[phase].dataset)
epoch_acc, epoch_f1 = calculate_metrics(phase_preds, phase_category_ids)
stats_df.at[0, 'epoch'] = epoch
stats_df.at[0, phase + '_loss'] = round(epoch_loss, 6)
stats_df.at[0, phase + '_acc'] = round(epoch_acc, 6)
stats_df.at[0, phase + '_f1'] = round(epoch_f1, 6)
# define the new bests
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_state_dict = copy.deepcopy(model.state_dict())
best_opt_state_dict = copy.deepcopy(optimizer.state_dict())
best_loss = copy.deepcopy(epoch_loss)
best_epoch = epoch
# append epoch stats to file
fprint(stats_df.to_string(index=False, header=(epoch == 0), col_space=10, justify='right'), args)
stats_df.to_csv(stats_path, mode='a', header=False, index=False)
time_elapsed = time.time() - since
fprint('\nTraining completed in {:.0f}m {:.0f}s\n'.format(
time_elapsed // 60, time_elapsed % 60), args)
# reload best model weights and best optimizer variables
model.load_state_dict(best_model_state_dict)
optimizer.load_state_dict(best_opt_state_dict)
# save best checkpoint
if not os.path.exists(cfg.MODEL_DIR):
os.makedirs(cfg.MODEL_DIR)
cp_path = os.path.join(cfg.MODEL_DIR, '{}_{}_{:.6f}.pth'.format(
'pt' if args.pretrained else 'fs', args.t_start, best_acc))
if args.save:
torch.save({
'epoch': best_epoch,
'model_state_dict': best_model_state_dict,
'optimizer_state_dict': best_opt_state_dict,
'loss': best_loss,
'acc': best_acc
}, cp_path)
fprint('Saved best checkpoint\t-> {}'.format(cp_path), args)
return model, optimizer
#
#
#
#
#
#
def test_model(model, data_loaders, args):
fprint('\nTESTING...', args)
was_training = model.training # store mode
model.eval() # run in evaluation mode
with torch.no_grad():
phase_corrects = 0
phase_preds = torch.LongTensor()
phase_category_ids = torch.LongTensor()
for inputs, category_ids in data_loaders['test']:
inputs = inputs.to(torch.device(args.device))
category_ids = category_ids.to(torch.device(args.device))
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
batch_corrects = torch.sum(preds == category_ids.data)
phase_corrects += batch_corrects
phase_preds = torch.cat((phase_preds, preds), 0)
phase_category_ids = torch.cat((phase_category_ids, category_ids), 0)
dataset = data_loaders['test'].dataset
acc, f1 = calculate_metrics(phase_preds, phase_category_ids)
fprint('{}/{} predictions are correct -> Test acc: {:.6f} f1: {:.6f}\n'.format(
phase_corrects, len(dataset), acc, f1), args)
model.train(mode=was_training) # reinstate the previous mode
return acc
| 33.639175
| 105
| 0.610788
|
b3ccefeda975bdfbf41f6d69cfb2d5fc73f82c0c
| 1,289
|
py
|
Python
|
src/plugins/setu/utils.py
|
Lycreal/qbot
|
e0cb5af8295efb1d58780ac4a420551e1183ba8b
|
[
"MIT"
] | 7
|
2019-10-09T07:09:37.000Z
|
2020-07-15T01:30:25.000Z
|
src/plugins/setu/utils.py
|
Lycreal/cqbot
|
b189a17283a63e982bf7f99e529486af8d2bfb76
|
[
"MIT"
] | 59
|
2021-05-20T07:21:50.000Z
|
2022-03-25T21:17:07.000Z
|
src/plugins/setu/utils.py
|
Lycreal/qbot
|
e0cb5af8295efb1d58780ac4a420551e1183ba8b
|
[
"MIT"
] | 2
|
2020-04-02T09:21:44.000Z
|
2020-06-25T13:40:13.000Z
|
import typing as T
from datetime import datetime, timedelta
from random import randint
import cv2
import numpy as np
from pydantic import BaseModel
def shuzi2number(shuzi: T.Optional[str]) -> int:
s = {'一': 1, '两': 2, '二': 2, '三': 3,
'四': 4, '五': 5, '六': 6, '七': 7,
'八': 8, '九': 9, '十': 10}
if not shuzi:
return 1
elif shuzi.isdecimal():
return int(shuzi)
elif shuzi in s.keys():
return s[shuzi]
else:
return 1
def shuffle(image_bytes: bytes) -> bytes:
image = cv2.imdecode(np.asarray(bytearray(image_bytes)), cv2.IMREAD_COLOR)
image[0, 0] = randint(0, 255)
image[0, -1] = randint(0, 255)
image[-1, 0] = randint(0, 255)
image[-1, -1] = randint(0, 255)
img_encode = cv2.imencode('.jpg', image)[1]
return bytes(img_encode)
class CoolDown(BaseModel):
"""example:
cd = CoolDown(app='app1', td=20)
cd.update(123)
cd.check(123)
"""
app: str
td: float # timedelta
value: T.Dict[int, datetime] = {}
def update(self, mid: int) -> None:
self.value.update({mid: datetime.now()})
def check(self, mid: int) -> bool:
ret = datetime.now() >= self.value.get(mid, datetime.utcfromtimestamp(0)) + timedelta(seconds=self.td)
return ret
| 25.78
| 110
| 0.588053
|
ec02349796a48dfff1c73ba56dc4eacac04320ab
| 800
|
py
|
Python
|
test/functional/rpc_uptime.py
|
bitcorub/bitrub
|
28711e4e8ebdee144a1437ece07afcf792a7cf60
|
[
"MIT"
] | 1
|
2019-12-09T18:33:47.000Z
|
2019-12-09T18:33:47.000Z
|
test/functional/rpc_uptime.py
|
bitcorub/bitrub
|
28711e4e8ebdee144a1437ece07afcf792a7cf60
|
[
"MIT"
] | null | null | null |
test/functional/rpc_uptime.py
|
bitcorub/bitrub
|
28711e4e8ebdee144a1437ece07afcf792a7cf60
|
[
"MIT"
] | 1
|
2019-12-12T20:05:36.000Z
|
2019-12-12T20:05:36.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The BitRub Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC call related to the uptime command.
Test corresponds to code in rpc/server.cpp.
"""
import time
from test_framework.test_framework import BitRubTestFramework
class UptimeTest(BitRubTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self._test_uptime()
def _test_uptime(self):
wait_time = 10
self.nodes[0].setmocktime(int(time.time() + wait_time))
assert self.nodes[0].uptime() >= wait_time
if __name__ == '__main__':
UptimeTest().main()
| 25.806452
| 69
| 0.70875
|
db2bed47f51615764fb6802ddda6a0a2bfa45c31
| 4,811
|
py
|
Python
|
homeassistant/components/toon/climate.py
|
edofullin/core
|
106dc4d28ad59cb192c60fc7a354cafa86899ea4
|
[
"Apache-2.0"
] | 1
|
2021-03-20T12:25:26.000Z
|
2021-03-20T12:25:26.000Z
|
homeassistant/components/toon/climate.py
|
edofullin/core
|
106dc4d28ad59cb192c60fc7a354cafa86899ea4
|
[
"Apache-2.0"
] | 60
|
2020-08-03T07:32:56.000Z
|
2022-03-31T06:02:07.000Z
|
homeassistant/components/toon/climate.py
|
edofullin/core
|
106dc4d28ad59cb192c60fc7a354cafa86899ea4
|
[
"Apache-2.0"
] | 4
|
2017-01-10T04:17:33.000Z
|
2021-09-02T16:37:24.000Z
|
"""Support for Toon thermostat."""
from __future__ import annotations
from typing import Any
from toonapi import (
ACTIVE_STATE_AWAY,
ACTIVE_STATE_COMFORT,
ACTIVE_STATE_HOME,
ACTIVE_STATE_SLEEP,
)
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_HEAT,
PRESET_AWAY,
PRESET_COMFORT,
PRESET_HOME,
PRESET_SLEEP,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.helpers.typing import HomeAssistantType
from .const import DEFAULT_MAX_TEMP, DEFAULT_MIN_TEMP, DOMAIN
from .helpers import toon_exception_handler
from .models import ToonDisplayDeviceEntity
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up a Toon binary sensors based on a config entry."""
coordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
[ToonThermostatDevice(coordinator, name="Thermostat", icon="mdi:thermostat")]
)
class ToonThermostatDevice(ToonDisplayDeviceEntity, ClimateEntity):
"""Representation of a Toon climate device."""
@property
def unique_id(self) -> str:
"""Return the unique ID for this thermostat."""
agreement_id = self.coordinator.data.agreement.agreement_id
# This unique ID is a bit ugly and contains unneeded information.
# It is here for lecagy / backward compatible reasons.
return f"{DOMAIN}_{agreement_id}_climate"
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode."""
return HVAC_MODE_HEAT
@property
def hvac_modes(self) -> list[str]:
"""Return the list of available hvac operation modes."""
return [HVAC_MODE_HEAT]
@property
def hvac_action(self) -> str | None:
"""Return the current running hvac operation."""
if self.coordinator.data.thermostat.heating:
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode, e.g., home, away, temp."""
mapping = {
ACTIVE_STATE_AWAY: PRESET_AWAY,
ACTIVE_STATE_COMFORT: PRESET_COMFORT,
ACTIVE_STATE_HOME: PRESET_HOME,
ACTIVE_STATE_SLEEP: PRESET_SLEEP,
}
return mapping.get(self.coordinator.data.thermostat.active_state)
@property
def preset_modes(self) -> list[str]:
"""Return a list of available preset modes."""
return [PRESET_AWAY, PRESET_COMFORT, PRESET_HOME, PRESET_SLEEP]
@property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
return self.coordinator.data.thermostat.current_display_temperature
@property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
return self.coordinator.data.thermostat.current_setpoint
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return DEFAULT_MIN_TEMP
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return DEFAULT_MAX_TEMP
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the current state of the burner."""
return {"heating_type": self.coordinator.data.agreement.heating_type}
@toon_exception_handler
async def async_set_temperature(self, **kwargs) -> None:
"""Change the setpoint of the thermostat."""
temperature = kwargs.get(ATTR_TEMPERATURE)
await self.coordinator.toon.set_current_setpoint(temperature)
@toon_exception_handler
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
mapping = {
PRESET_AWAY: ACTIVE_STATE_AWAY,
PRESET_COMFORT: ACTIVE_STATE_COMFORT,
PRESET_HOME: ACTIVE_STATE_HOME,
PRESET_SLEEP: ACTIVE_STATE_SLEEP,
}
if preset_mode in mapping:
await self.coordinator.toon.set_active_state(mapping[preset_mode])
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
# Intentionally left empty
# The HAVC mode is always HEAT
| 33.17931
| 85
| 0.686967
|
cf4fbba2f50a450b03a996e63d896758d613abc5
| 108
|
py
|
Python
|
Workspace for Python/studying file/module/path test/moduletest.py
|
ArchibaldChain/python-workspace
|
71890f296c376155e374b2096ac3d8f1d286b7d2
|
[
"MIT"
] | null | null | null |
Workspace for Python/studying file/module/path test/moduletest.py
|
ArchibaldChain/python-workspace
|
71890f296c376155e374b2096ac3d8f1d286b7d2
|
[
"MIT"
] | 3
|
2020-06-17T16:01:27.000Z
|
2022-01-13T02:52:53.000Z
|
Workspace for Python/studying file/module/path test/moduletest.py
|
ArchibaldChain/python-workspace
|
71890f296c376155e374b2096ac3d8f1d286b7d2
|
[
"MIT"
] | null | null | null |
print("The output of module")
name = "moduletest"
def mtpr():
print("the function in the module")
| 18
| 40
| 0.648148
|
78b58eeddcfd433445560f6dd4b97ddc756b571c
| 675
|
py
|
Python
|
tests/test_use_cases/test_task/test_task_fetcher.py
|
marioidival/todclean
|
75be8ccdaaa443ebdeb6a7ef270cd92eb693afe4
|
[
"MIT"
] | null | null | null |
tests/test_use_cases/test_task/test_task_fetcher.py
|
marioidival/todclean
|
75be8ccdaaa443ebdeb6a7ef270cd92eb693afe4
|
[
"MIT"
] | null | null | null |
tests/test_use_cases/test_task/test_task_fetcher.py
|
marioidival/todclean
|
75be8ccdaaa443ebdeb6a7ef270cd92eb693afe4
|
[
"MIT"
] | null | null | null |
import unittest
from todclean.use_cases.task.fetcher import TaskFetch
class TestTaskFetch(unittest.TestCase):
def setUp(self):
from todclean.repositories.in_memory import InMemory
from todclean import repo_manager as manager
self.repo = InMemory()
manager.save_repo('task', self.repo)
from todclean.use_cases.task.adder import TaskAdd
from todclean.entities.task import Task
adder = TaskAdd()
for i in range(10):
adder.add_task(Task('Test Task {}'.format(i)))
self.fetcher = TaskFetch()
def test_task_fetch(self):
self.assertEqual(10, len(self.fetcher.fetch_tasks()))
| 28.125
| 61
| 0.674074
|
faca6369f6be5f8f1507250ba0ff546f9071eecd
| 5,529
|
py
|
Python
|
code/feedback_control.py
|
WallabyLester/Mobile_Manipulation
|
68321d406d44ad1dc66ef01dd77f246f8e6c25d3
|
[
"MIT"
] | null | null | null |
code/feedback_control.py
|
WallabyLester/Mobile_Manipulation
|
68321d406d44ad1dc66ef01dd77f246f8e6c25d3
|
[
"MIT"
] | null | null | null |
code/feedback_control.py
|
WallabyLester/Mobile_Manipulation
|
68321d406d44ad1dc66ef01dd77f246f8e6c25d3
|
[
"MIT"
] | null | null | null |
import numpy as np
import modern_robotics as mr
from modern_robotics.core import Adjoint, FKinBody, JacobianBody, MatrixLog6, se3ToVec, TransInv
"""
Code to calculate the feedforward control for the youBot
"""
def FeedbackControl(X, Xd, Xd_next, Kp, Ki, dt, curr_config, Xerr_integral):
""" Calculates the kinematic task-space feedforward and feedback control law
Makes use of the equation: V(t) = [Adx^-1xd]Vd(t) + KpXerr(t) + Ki*integral(0:t)(Xerr(t))dt
Args:
X : Current actual end-effector configuration Tse
Xd : Current end-effector reference configuration Tse,d
Xd_next : End-effector reference configuration at the next timestep in
the reference trajectory Xd at Δt later
Kp : The feedback proportional gain
Ki : The feedback integral gain
dt : Timestep Δt between reference trajectory configurations
curr_config : The current configuration of the robot
Xerr_integral : Initial integral of the error (zeros)
Returns:
V : End-effector twist expressed in end-effector frame {e}
Controls : The commanded wheel and arm joint speeds (m/s)
Xerr : Error in X
"""
# initialize kinematics variables
l = 0.47/2 # forward-backward distance between the wheels (m)
w = 0.3/2 # side-to-side distance between wheels (m)
r = 0.0475 # radius of each wheel (m)
# the fixed offset from the chassis frame {b} to the base frame of the arm {0}
Tb0 = np.array([[1, 0, 0, 0.1662],
[0, 1, 0, 0],
[0, 0, 1, 0.0026],
[0, 0, 0, 1]])
# end-effector frame {e} relative to the arm base frame {0}
M0e = np.array([[1, 0, 0, 0.033],
[0, 1, 0, 0],
[0, 0, 1, 0.6546],
[0, 0, 0, 1]])
# the screw axes for the five joints in the end-effector frame {e}
Blist = np.array([[0, 0, 1, 0, 0.033, 0],
[0, -1, 0, -0.5076, 0, 0],
[0, -1, 0, -0.3526, 0, 0],
[0, -1, 0, -0.2176, 0, 0],
[0, 0, 1, 0, 0, 0]]).T
# find current joint angles
curr_joint_ang = curr_config[3:8]
# transformation from {0} to {e}
T0e = FKinBody(M0e, Blist, curr_joint_ang)
# transformation from {e} to {b}
Teb = TransInv(T0e)@TransInv(Tb0)
# compute the reference twist Vd
Log = MatrixLog6(TransInv(Xd)@Xd_next)
Vel = se3ToVec(Log)
Vd = 1/dt * Vel
# print(f"Vd: {Vd}")
# compute the Ad(x^-1xd) matrix
Adx_invxd = Adjoint(TransInv(X)@Xd)
Adx_invxdVd = Adx_invxd@Vd # 6x6 @ 6x1 = 6x1
# print(f"Adx_invxdVd: {Adx_invxdVd}")
# compute X error
Xerr = se3ToVec(MatrixLog6(TransInv(X)@Xd))
# print(f"Xerr: {Xerr}")
# compute the integral of the error
Xerr_integral += Xerr * dt
# print(f"Integral of error: {Xerr_integral}")
# compute V
V = Adx_invxdVd + Kp@Xerr + Ki@Xerr_integral
# print(f"V: {V}")
# F6 matrix
F6 = r/4 * np.array([[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[-1/(l+w), 1/(l+w), 1/(l+w), -1/(l+w)],
[ 1, 1, 1, 1],
[ -1, 1, -1, 1],
[ 0, 0, 0, 0]])
# arm jacobian
J = JacobianBody(Blist, curr_joint_ang)
# # joint limits
# J_limit = J.T
# if curr_joint_ang[0] < -2.95 or curr_joint_ang[0] > 2.95:
# J_limit[0] = J_limit[0]*0
# if curr_joint_ang[1] < -1 or curr_joint_ang[1] > 1:
# J_limit[1] = J_limit[1] * 0
# if curr_joint_ang[2] < -2 or curr_joint_ang[2] > 2:
# J_limit[2] = J_limit[2] * 0
# if curr_joint_ang[3] < -2 or curr_joint_ang[3] > 2:
# J_limit[3] = J_limit[3] * 0
# if curr_joint_ang[4] < -2.92 or curr_joint_ang[4] > 2.92:
# J_limit[4] = J_limit[4] * 0
# J = J_limit.T
# body jacobian
Jb = Adjoint(Teb) @ F6
Je = np.hstack((Jb, J)) ### make joint column zero depending on config to place joint limits
# print(f"Je: \n{np.around(Je, decimals=3)}")
# calculate the commanded wheel and arm joint speeds: u and thetadot
# using the Moore-Penrose pseudoinverse
# Je_pinv = Je.T@np.linalg.inv(Je@Je.T)
Je_pinv = np.linalg.pinv(Je, 1e-3)
# Je_pinv = np.linalg.pinv(Je)
controls = Je_pinv@V
# print(f"Controls: {np.around(controls, decimals=1)}")
return V, controls, Xerr
if __name__ == "__main__":
""" Main function to call FeedbackControl
"""
Xd = np.array([[ 0, 0, 1, 0.5],
[ 0, 1, 0, 0],
[-1, 0, 0, 0.5],
[ 0, 0, 0, 1]])
Xd_next = np.array([[ 0, 0, 1, 0.6],
[ 0, 1, 0, 0],
[-1, 0, 0, 0.3],
[ 0, 0, 0, 1]])
X = np.array([[ 0.170, 0, 0.985, 0.387],
[ 0, 1, 0, 0],
[-0.985, 0, 0.170, 0.570],
[ 0, 0, 0, 1]])
# Kp = np.zeros((6,6))
Kp = np.identity(6)
Ki = np.zeros((6,6))
# Ki = np.identity(6)
dt = 0.01
curr_config = np.array([0, 0, 0, 0, 0, 0.2, -1.6, 0])
Xerr_integral = np.zeros(6)
V, speeds, Xerr = FeedbackControl(X, Xd, Xd_next, Kp, Ki, dt, curr_config, Xerr_integral)
| 36.137255
| 97
| 0.515283
|
542640e9f2a3bfa06d2acc2a6ea59897d47e6fcb
| 19,271
|
py
|
Python
|
core/completion_test.py
|
afunsten/oil
|
e52071e10a78157db1e4f0befc439a36ca1cbc01
|
[
"Apache-2.0"
] | 1
|
2019-01-25T01:15:51.000Z
|
2019-01-25T01:15:51.000Z
|
core/completion_test.py
|
afunsten/oil
|
e52071e10a78157db1e4f0befc439a36ca1cbc01
|
[
"Apache-2.0"
] | null | null | null |
core/completion_test.py
|
afunsten/oil
|
e52071e10a78157db1e4f0befc439a36ca1cbc01
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2016 Andy Chu. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
completion_test.py: Tests for completion.py
"""
from __future__ import print_function
import os
import unittest
import sys
from core import alloc
from core import completion # module under test
from core import test_lib
from core import ui
from core import util
from core.meta import runtime_asdl, syntax_asdl
from frontend import parse_lib
from osh import state
from testdata.completion import bash_oracle
assign_op_e = syntax_asdl.assign_op_e
value_e = runtime_asdl.value_e
log = util.log
A1 = completion.TestAction(['foo.py', 'foo', 'bar.py'])
U1 = completion.UserSpec([A1], [], [], lambda candidate: True)
BASE_OPTS = {}
mem = state.Mem('', [], {}, None)
FIRST = completion.TestAction(['grep', 'sed', 'test'])
U2 = completion.UserSpec([FIRST], [], [], lambda candidate: True)
def MockApi(line):
"""Match readline's get_begidx() / get_endidx()."""
end = len(line)
i = end - 1
while i > 0:
if line[i] in util.READLINE_DELIMS:
break
i -= 1
return completion.Api(line=line, begin=i+1, end=end)
def _MakeRootCompleter(comp_lookup=None):
#comp_state = comp_state or completion.State()
comp_state = completion.State()
comp_lookup = comp_lookup or completion.Lookup()
ev = test_lib.MakeTestEvaluator()
pool = alloc.Pool()
arena = pool.NewArena()
arena.PushSource('<_MakeRootCompleter>')
trail = parse_lib.Trail()
parse_ctx = parse_lib.ParseContext(arena, {}, trail=trail)
if 0: # enable for details
debug_f = util.DebugFile(sys.stdout)
else:
debug_f = util.NullDebugFile()
progress_f = ui.TestStatusLine()
return completion.RootCompleter(ev, mem, comp_lookup, comp_state, parse_ctx,
progress_f, debug_f)
class FunctionsTest(unittest.TestCase):
def testAdjustArg(self):
AdjustArg = completion.AdjustArg
out = []
AdjustArg(':foo:=bar:', [':', '='], out)
self.assertEqual(out, [':', 'foo', ':=', 'bar', ':'])
out = []
AdjustArg('==::==', [':', '='], out)
self.assertEqual(out, ['==::=='])
out = []
AdjustArg('==::==', [':'], out)
self.assertEqual(out, ['==', '::', '=='])
# This is like if you get [""] somehow, it should be [""].
out = []
AdjustArg('', [':', '='], out)
self.assertEqual(out, [''])
class CompletionTest(unittest.TestCase):
def _MakeComp(self, words, index, to_complete):
comp = completion.Api()
comp.Update(partial_argv=['f'], index=0, to_complete='f')
return comp
def testLookup(self):
c = completion.Lookup()
c.RegisterName('grep', BASE_OPTS, U1)
print(c.GetSpecForName('grep'))
print(c.GetSpecForName('/usr/bin/grep'))
c.RegisterGlob('*.py', BASE_OPTS, U1)
base_opts, comp = c.GetSpecForName('/usr/bin/foo.py')
print('py', comp)
# NOTE: This is an implementation detail
self.assertEqual(1, len(comp.actions))
comp_rb = c.GetSpecForName('foo.rb')
print('rb', comp_rb)
def testExternalCommandAction(self):
mem = state.Mem('dummy', [], {}, None)
a = completion.ExternalCommandAction(mem)
comp = self._MakeComp([], 0, 'f')
print(list(a.Matches(comp)))
# TODO: This should set up the file system and $PATH and assert that only
# executable files are accessed!
def testFileSystemAction(self):
a = completion.FileSystemAction()
# Current dir -- all files and dirs
comp = self._MakeComp([], 0, '')
print(list(a.Matches(comp)))
os.system('mkdir -p /tmp/oil_comp_test')
os.system('bash -c "touch /tmp/oil_comp_test/{one,two,three}"')
# TODO:
# - This no longer filters by prefix!
# - Should test that the executable bit works!
return
# This test depends on actual file system content. But we choose things
# that shouldn't go away.
CASES = [
# Dirs and files
('c', ['core/', 'configure']),
('nonexistent/', []),
('README', ['README.md']),
# Directory should be completed to core/ ?
('core', ['core/']),
('asdl/R', ['asdl/README.md']),
('opy/doc', ['opy/doc/']),
('opy/doc/', ['opy/doc/opcodes.md']),
('/bi', ['/bin/']),
('/tmp/oil_comp_test/', [
'/tmp/oil_comp_test/one',
'/tmp/oil_comp_test/three',
'/tmp/oil_comp_test/two',
])
]
for prefix, expected in CASES:
log('')
log('-- PREFIX %s', prefix)
log('-- expected %s', expected)
comp = self._MakeComp([], 0, prefix)
self.assertEqual(expected, list(a.Matches(comp)))
comp = self._MakeComp([], 0, './o')
print(list(a.Matches(comp)))
# A bunch of repos in oilshell
comp = self._MakeComp([], 0, '../o')
print(list(a.Matches(comp)))
def testShellFuncExecution(self):
arena = test_lib.MakeArena('testShellFuncExecution')
c_parser = test_lib.InitCommandParser("""\
f() {
COMPREPLY=(f1 f2)
}
""", arena=arena)
func_node = c_parser.ParseLogicalLine()
print(func_node)
ex = test_lib.InitExecutor(arena=arena)
a = completion.ShellFuncAction(ex, func_node)
comp = self._MakeComp(['f'], 0, 'f')
matches = list(a.Matches(comp))
self.assertEqual(['f1', 'f2'], matches)
def testUserSpec(self):
comp = self._MakeComp(['f'], 0, 'f')
matches = list(U1.Matches(comp))
self.assertEqual([('foo.py', False), ('foo', False)], matches)
predicate = completion.GlobPredicate(False, '*.py')
c2 = completion.UserSpec([A1], [], [], predicate)
comp = self._MakeComp(['f'], 0, 'f')
matches = list(c2.Matches(comp))
self.assertEqual([('foo.py', False)], matches)
class RootCompeterTest(unittest.TestCase):
def testCompletesHomeDirs(self):
r = _MakeRootCompleter()
comp = MockApi(line='echo ~r')
print(comp)
m = list(r.Matches(comp))
#This test isn't hermetic, but I think root should be on all systems.
self.assert_('~root/' in m, 'Got %s' % m)
comp = MockApi(line='echo ~')
print(comp)
m = list(r.Matches(comp))
#This test isn't hermetic, but I think root should be on all systems.
self.assert_('~root/' in m, 'Got %s' % m)
# Don't be overly aggressive!
comp = MockApi(line='echo a~')
m = list(r.Matches(comp))
self.assertEqual(0, len(m))
def testCompletesVarNames(self):
r = _MakeRootCompleter()
# Complete ALL variables
comp = MockApi('echo $')
self.assertEqual(5, comp.begin) # what readline does
self.assertEqual(6, comp.end)
print(comp)
m = list(r.Matches(comp))
# Just test for a subset
self.assert_('$HOME' in m, m)
self.assert_('$IFS' in m, m)
# Now it has a prefix
comp = MockApi(line='echo $P')
self.assertEqual(5, comp.begin) # what readline does
self.assertEqual(7, comp.end)
print(comp)
m = list(r.Matches(comp))
self.assert_('$PWD' in m, 'Got %s' % m)
self.assert_('$PS4' in m, 'Got %s' % m)
#
# BracedVarSub
#
# Complete ALL variables
comp = MockApi(line='echo _${')
print(comp)
m = list(r.Matches(comp))
# Just test for a subset
self.assert_('_${HOME' in m, 'Got %s' % m)
self.assert_('_${IFS' in m, 'Got %s' % m)
# Now it has a prefix
comp = MockApi(line='echo ${P')
print(comp)
m = list(r.Matches(comp))
self.assert_('${PWD' in m, 'Got %s' % m)
self.assert_('${PS4' in m, 'Got %s' % m)
# Odd word break
# NOTE: We use VSub_Name both for $FOO and ${FOO. Might be bad?
comp = MockApi(line='echo ${undef:-$P')
print(comp)
m = list(r.Matches(comp))
self.assert_('-$PWD' in m, 'Got %s' % m)
self.assert_('-$PS4' in m, 'Got %s' % m)
# Odd word break
comp = MockApi(line='echo ${undef:-$')
print(comp)
m = list(r.Matches(comp))
self.assert_('-$HOME' in m, 'Got %s' % m)
self.assert_('-$IFS' in m, 'Got %s' % m)
#
# Double Quoted
#
# NOTE: GNU readline seems to complete closing quotes? We don't want that.
comp = MockApi(line='echo "$')
print(comp)
m = list(r.Matches(comp))
self.assert_('$HOME' in m, 'Got %s' % m) # don't need leading "
self.assert_('$IFS' in m, 'Got %s' % m)
comp = MockApi(line='echo "$P')
print(comp)
m = list(r.Matches(comp))
self.assert_('$PWD' in m, 'Got %s' % m) # don't need leading "
self.assert_('$PS4' in m, 'Got %s' % m)
#
# Prefix operator
#
if 0: # Here you need to handle VSub_Pound
comp = MockApi(line='echo ${#')
print(comp)
m = list(r.Matches(comp))
self.assert_('${#HOME' in m, 'Got %s' % m)
self.assert_('${#IFS' in m, 'Got %s' % m)
comp = MockApi(line='echo "${#P')
print(comp)
m = list(r.Matches(comp))
self.assert_('${#PWD' in m, 'Got %s' % m) # don't need leading "
self.assert_('${#PS4' in m, 'Got %s' % m)
#
# Arithmetic Context
#
comp = MockApi(line='echo "$((PWD +P')
print(comp)
m = list(r.Matches(comp))
self.assert_('+PWD' in m, 'Got %s' % m) # don't need leading "
self.assert_('+PS4' in m, 'Got %s' % m)
comp = MockApi(line='echo "$(( $P')
print(comp)
m = list(r.Matches(comp))
self.assert_('$PWD' in m, 'Got %s' % m) # don't need leading "
self.assert_('$PS4' in m, 'Got %s' % m)
def testCompletesRedirectArguments(self):
r = _MakeRootCompleter()
comp = MockApi('cat < b')
m = list(r.Matches(comp))
# Some B subdirs of the repo!
self.assert_('bin/' in m, 'Got %s' % m)
self.assert_('build/' in m, 'Got %s' % m)
self.assert_('benchmarks/' in m, 'Got %s' % m)
# This redirect does NOT take a path argument!
comp = MockApi('echo >&')
m = list(r.Matches(comp))
self.assertEqual(0, len(m))
def testCompletesWords(self):
comp_lookup = completion.Lookup()
comp_lookup.RegisterName('grep', BASE_OPTS, U1)
comp_lookup.RegisterName('__first', BASE_OPTS, U2)
r = _MakeRootCompleter(comp_lookup=comp_lookup)
comp = MockApi('grep f')
m = list(r.Matches(comp))
self.assertEqual(['foo.py ', 'foo '], m)
comp = MockApi('grep g')
m = list(r.Matches(comp))
self.assertEqual([], m)
# Complete first word
m = list(r.Matches(MockApi('g')))
self.assertEqual(['grep '], m)
# Empty completer
m = list(r.Matches(MockApi('')))
self.assertEqual(['grep ', 'sed ', 'test '], m)
# Test compound commands. These PARSE
m = list(r.Matches(MockApi('echo hi || grep f')))
m = list(r.Matches(MockApi('echo hi; grep f')))
# Brace -- does NOT parse
m = list(r.Matches(MockApi('{ echo hi; grep f')))
# TODO: Test if/for/while/case/etc.
m = list(r.Matches(MockApi('var=$v')))
m = list(r.Matches(MockApi('local var=$v')))
def testRunsUserDefinedFunctions(self):
# This is here because it's hard to test readline with the spec tests.
comp_lookup = completion.Lookup()
with open('testdata/completion/osh-unit.bash') as f:
code_str = f.read()
ex = test_lib.EvalCode(code_str, comp_lookup=comp_lookup)
r = _MakeRootCompleter(comp_lookup=comp_lookup)
# By default, we get a space on the end.
m = list(r.Matches(MockApi('mywords t')))
self.assertEqual(['three ', 'two '], sorted(m))
# No space
m = list(r.Matches(MockApi('mywords_nospace t')))
self.assertEqual(['three', 'two'], sorted(m))
# Filtered out two and bin
m = list(r.Matches(MockApi('flagX ')))
self.assertEqual(['one ', 'three '], sorted(m))
# Filter out everything EXCEPT two and bin
m = list(r.Matches(MockApi('flagX_bang ')))
self.assertEqual(['bin ', 'two '], sorted(m))
# -X with -P
m = list(r.Matches(MockApi('flagX_prefix ')))
self.assertEqual(['__one ', '__three '], sorted(m))
# TODO: Fix these!
# -P with plusdirs
m = list(r.Matches(MockApi('prefix_plusdirs b')))
self.assertEqual(['__bin ', 'benchmarks/', 'bin/', 'build/'], sorted(m))
# -X with plusdirs. We're filtering out bin/, and then it's added back by
# plusdirs. The filter doesn't kill it.
m = list(r.Matches(MockApi('flagX_plusdirs b')))
self.assertEqual(['benchmarks/', 'bin/', 'build/'], sorted(m))
# -P with dirnames. -P is NOT respected.
m = list(r.Matches(MockApi('prefix_dirnames b')))
self.assertEqual(['benchmarks/', 'bin/', 'build/'], sorted(m))
def testCompletesAssignment(self):
# OSH doesn't do this. Here is noticed about bash --norc (which is
# undoubtedly different from bash_completion):
#
# foo=/ho<TAB> completes directory
# foo=/home/:/ho<TAB> completes directory
#
# foo='/ho<TAB> completes directory
# foo='/home/:/ho<TAB> does NOT complete
#
# Ditto for ". The first path is completed, but nothing after :.
#
# Ditto for echo foo=/ho
# echo foo='/ho
# echo foo="/ho
#
# It doesn't distinguish by position.
#
# TODO:
# - test with an image created with debootstrap
# - test with an Alpine image
return
_INIT_TEMPLATE = """
argv() {
python -c 'import sys; print(sys.argv[1:])' "$@"
}
fail() {
echo "Non-fatal assertion failed: $@" >&2
}
arrays_equal() {
local n=$1
shift
local left=(${@: 0 : n})
local right=(${@: n : 2*n - 1})
for (( i = 0; i < n; i++ )); do
if [[ ${left[i]} != ${right[i]} ]]; then
echo -n 'left : '; argv "${left[@]}"
echo -n 'right: '; argv "${right[@]}"
fail "Word $i differed: ${left[i]} != ${right[i]}"
return 1
fi
done
return 0
}
_init_completion() {
compadjust "$@" cur prev words cword
}
my_complete() {
local cur prev words cword split
# Test this function
if arrays_equal 2 a b a b; then
echo ok
else
echo failed
return
fi
PASSED=()
# no quotes with [[
if [[ $COMP_LINE == $ORACLE_COMP_LINE ]]; then
PASSED+=(COMP_LINE)
fi
if [[ $COMP_POINT == $ORACLE_COMP_POINT ]]; then
PASSED+=(COMP_POINT)
fi
if [[ ${#COMP_WORDS[@]} == ${#ORACLE_COMP_WORDS[@]} ]]; then
local n=${#COMP_WORDS[@]}
if arrays_equal "$n" "${COMP_WORDS[@]}" "${ORACLE_COMP_WORDS[@]}"; then
PASSED+=(COMP_WORDS)
fi
else
fail "COMP_WORDS: Expected ${ORACLE_COMP_WORDS[@]}, got ${COMP_WORDS[@]}"
fi
# This doesn't pass because COMP_WORDS and COMP_CWORD are different.
if [[ $COMP_CWORD == $ORACLE_COMP_CWORD ]]; then
#echo "passed: COMP_CWORD = $COMP_CWORD"
PASSED+=(COMP_CWORD)
else
fail "COMP_CWORD: Expected $ORACLE_COMP_CWORD, got $COMP_CWORD"
fi
#
# Now run _init_completion
#
_init_completion %(flags)s
if [[ ${#words[@]} == ${#ORACLE_words[@]} ]]; then
local n=${#words[@]}
if arrays_equal "$n" "${words[@]}" "${ORACLE_words[@]}"; then
PASSED+=(words)
fi
else
fail "COMP_WORDS: Expected ${ORACLE_words[@]}, got ${words[@]}"
fi
if [[ $cur == $ORACLE_cur ]]; then
PASSED+=(cur)
else
fail "cur: Expected $ORACLE_cur, got $cur"
fi
if [[ $prev == $ORACLE_prev ]]; then
PASSED+=(prev)
else
fail "prev: Expected $ORACLE_prev, got $prev"
fi
if [[ $cword == $ORACLE_cword ]]; then
PASSED+=(cword)
else
fail "cword: Expected $ORACLE_cword, got $cword"
fi
if [[ $split == $ORACLE_split ]]; then
PASSED+=(split)
else
fail "split: Expected $ORACLE_split, got $split"
fi
COMPREPLY=(dummy)
}
complete -F my_complete %(command)s
"""
class InitCompletionTest(unittest.TestCase):
def testMatchesOracle(self):
for i, case in enumerate(bash_oracle.CASES): # generated data
flags = case.get('_init_completion_flags')
if flags is None:
continue
# This was input
code_str = case['code']
assert code_str.endswith('\t')
log('')
log('--- Case %d: %r with flags %s', i, code_str, flags)
log('')
#print(case)
oracle_comp_words = case['COMP_WORDS']
oracle_comp_cword = case['COMP_CWORD']
oracle_comp_line = case['COMP_LINE']
oracle_comp_point = case['COMP_POINT']
# Init completion data
oracle_words = case['words']
oracle_cur = case['cur']
oracle_prev = case['prev']
oracle_cword = case['cword']
oracle_split = case['split']
#
# First test some invariants on the oracle's data.
#
self.assertEqual(code_str[:-1], oracle_comp_line)
# weird invariant that always holds. So isn't COMP_CWORD useless?
self.assertEqual(int(oracle_comp_cword), len(oracle_comp_words)-1)
# Another weird invariant. Note this is from the bash ORACLE, not from
# our mocks.
self.assertEqual(int(oracle_comp_point), len(code_str) - 1)
#
# Now run a piece of code that compares OSH's actual data against hte oracle.
#
init_code = _INIT_TEMPLATE % {
'flags': ' '.join(flags),
'command': oracle_comp_words[0]
}
#print(init_code)
arena = test_lib.MakeArena('<InitCompletionTest>')
mem = state.Mem('', [], {}, arena)
#
# Allow our code to access oracle data
#
state.SetGlobalArray(mem, 'ORACLE_COMP_WORDS', oracle_comp_words)
state.SetGlobalString(mem, 'ORACLE_COMP_CWORD', oracle_comp_cword)
state.SetGlobalString(mem, 'ORACLE_COMP_LINE', oracle_comp_line)
state.SetGlobalString(mem, 'ORACLE_COMP_POINT', oracle_comp_point)
state.SetGlobalArray(mem, 'ORACLE_words', oracle_words)
state.SetGlobalString(mem, 'ORACLE_cur', oracle_cur)
state.SetGlobalString(mem, 'ORACLE_prev', oracle_prev)
state.SetGlobalString(mem, 'ORACLE_cword', oracle_cword)
state.SetGlobalString(mem, 'ORACLE_split', oracle_split)
comp_lookup = completion.Lookup()
ex = test_lib.EvalCode(init_code, comp_lookup=comp_lookup, arena=arena,
mem=mem)
#print(ex.comp_state)
r = _MakeRootCompleter(comp_lookup=comp_lookup)
#print(r)
comp = MockApi(code_str[:-1])
m = list(r.Matches(comp))
log('matches = %s', m)
# Unterminated quote in case 5. Nothing to complete.
# TODO: use a label
if i == 5:
continue
# Our test shell script records what passed in an array.
val = ex.mem.GetVar('PASSED')
self.assertEqual(value_e.StrArray, val.tag, "Expected array, got %s" % val)
actually_passed = val.strs
should_pass = [
'COMP_WORDS', 'COMP_CWORD', 'COMP_LINE', 'COMP_POINT', # old API
'words', 'cur', 'prev', 'cword', 'split' # new API
]
#should_pass = ['COMP_LINE', 'COMP_POINT', 'words', 'cur', 'prev', 'split']
if i == 4:
should_pass.remove('COMP_WORDS')
should_pass.remove('COMP_CWORD')
should_pass.remove('cword')
should_pass.remove('words') # double quotes aren't the same
for t in should_pass:
self.assert_(
t in actually_passed, "%r was expected to pass (case %d)" % (t, i))
log('Ran %d cases', len(bash_oracle.CASES))
if __name__ == '__main__':
unittest.main()
| 28.848802
| 83
| 0.607908
|
436347cb2850b6ac4f0c344355ed4f12f76cb185
| 20,429
|
py
|
Python
|
pyscf/fci/test/test_addons.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 2
|
2019-05-28T05:25:56.000Z
|
2019-11-09T02:16:43.000Z
|
pyscf/fci/test/test_addons.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 2
|
2019-09-16T17:58:31.000Z
|
2019-09-22T17:26:01.000Z
|
pyscf/fci/test/test_addons.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 1
|
2019-11-09T02:13:16.000Z
|
2019-11-09T02:13:16.000Z
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import reduce
import numpy
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import fci
mol = gto.Mole()
mol.verbose = 0
mol.atom = '''
H 1 -1. 0
H 0 -1. -1
H 0 -0.5 -0
H 0 -0. -1
H 1 -0.5 0
H 0 1. 1'''
mol.basis = 'sto-3g'
mol.build()
m = scf.RHF(mol)
m.conv_tol = 1e-15
ehf = m.scf()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff))
g2e = ao2mo.incore.general(m._eri, (m.mo_coeff,)*4, compact=False)
na = fci.cistring.num_strings(norb, nelec//2)
e, ci0 = fci.direct_spin1.kernel(h1e, g2e, norb, nelec, tol=1e-15)
def tearDownModule():
global mol, m, h1e, g2e, ci0
del mol, m, h1e, g2e, ci0
class KnownValues(unittest.TestCase):
def test_large_ci(self):
res = fci.addons.large_ci(ci0, norb, nelec, tol=.1)
refstr =[('0b111' , '0b111' ),
('0b111' , '0b1011' ),
('0b1011' , '0b111' ),
('0b1011' , '0b1011' ),
('0b10101', '0b10101')]
refci = [0.86848550920009038, 0.15130668599599939, 0.15130668599597297,
0.36620088911284837, 0.10306162063159749]
self.assertTrue(numpy.allclose([abs(x[0]) for x in res], refci))
self.assertEqual([x[1:] for x in res], refstr)
res = fci.addons.large_ci(ci0, norb, nelec, tol=.1, return_strs=False)
refa = numpy.array(((0,1,2), (0,1,2), (0,1,3), (0,1,3), (0,2,4)))
refb = numpy.array(((0,1,2), (0,1,3), (0,1,2), (0,1,3), (0,2,4)))
self.assertTrue(numpy.all([x[1] for x in res] == refa))
self.assertTrue(numpy.all([x[2] for x in res] == refb))
na = fci.cistring.num_strings(6, 3)
numpy.random.seed(9)
ci1 = numpy.random.random((na,na))
ci1 /= numpy.linalg.norm(ci1)
res = fci.addons.large_ci(ci1, 6, (3,3), tol=.2)
self.assertEqual(res[0][1:], ('0b110100', '0b1101'))
def test__init__file(self):
c1 = fci.FCI(mol, m.mo_coeff)
self.assertAlmostEqual(c1.kernel()[0], -2.8227809167209683, 9)
c1 = fci.FCI(m)
self.assertAlmostEqual(c1.kernel()[0], -2.8227809167209683, 9)
def test_init_triplet(self):
ci1 = fci.addons.initguess_triplet(norb, nelec, '0b1011')
self.assertAlmostEqual(abs(ci1 + ci1.T).sum(), 0)
self.assertTrue(ci1[0,1] < 0)
def test_credes_ab(self):
a4 = 10*numpy.arange(4)[:,None]
a6 = 10*numpy.arange(6)[:,None]
b4 = numpy.arange(4)
b6 = numpy.arange(6)
self.assertTrue(numpy.allclose(fci.addons.des_a(a4+b4, 4, (3,3), 0),
[[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 1., 2., 3.],
[ 0., 0., 0., 0.],
[ 10., 11., 12., 13.],
[ 20., 21., 22., 23.]]))
self.assertTrue(numpy.allclose(fci.addons.des_a(a4+b4, 4, (3,3), 1),
[[ 0., 0., 0., 0.],
[ 0., -1., -2., -3.],
[ 0., 0., 0., 0.],
[-10.,-11.,-12.,-13.],
[ 0., 0., 0., 0.],
[ 30., 31., 32., 33.]]))
self.assertTrue(numpy.allclose(fci.addons.des_a(a4+b4, 4, (3,3), 2),
[[ 0., 1., 2., 3.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[-20.,-21.,-22.,-23.],
[-30.,-31.,-32.,-33.],
[ 0., 0., 0., 0.]]))
self.assertTrue(numpy.allclose(fci.addons.des_a(a4+b4, 4, (3,3), 3),
[[ 10., 11., 12., 13.],
[ 20., 21., 22., 23.],
[ 30., 31., 32., 33.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]]))
self.assertTrue(numpy.allclose(fci.addons.des_b(a6+b4, 4, (2,3), 0),
[[ 0., 0., 0., 0., 1., 2.],
[ 0., 0., 10., 0., 11., 12.],
[ 0., 0., 20., 0., 21., 22.],
[ 0., 0., 30., 0., 31., 32.],
[ 0., 0., 40., 0., 41., 42.],
[ 0., 0., 50., 0., 51., 52.]]))
self.assertTrue(numpy.allclose(fci.addons.des_b(a6+b4, 4, (2,3), 1),
[[ 0., 0., 0., -1., 0., 3.],
[ 0.,-10., 0.,-11., 0., 13.],
[ 0.,-20., 0.,-21., 0., 23.],
[ 0.,-30., 0.,-31., 0., 33.],
[ 0.,-40., 0.,-41., 0., 43.],
[ 0.,-50., 0.,-51., 0., 53.]]))
self.assertTrue(numpy.allclose(fci.addons.des_b(a6+b4, 4, (2,3), 2),
[[ 0., 0., 0., -2., -3., 0.],
[ 10., 0., 0.,-12.,-13., 0.],
[ 20., 0., 0.,-22.,-23., 0.],
[ 30., 0., 0.,-32.,-33., 0.],
[ 40., 0., 0.,-42.,-43., 0.],
[ 50., 0., 0.,-52.,-53., 0.]]))
self.assertTrue(numpy.allclose(fci.addons.des_b(a6+b4, 4, (2,3), 3),
[[ 1., 2., 3., 0., 0., 0.],
[ 11., 12., 13., 0., 0., 0.],
[ 21., 22., 23., 0., 0., 0.],
[ 31., 32., 33., 0., 0., 0.],
[ 41., 42., 43., 0., 0., 0.],
[ 51., 52., 53., 0., 0., 0.]]))
self.assertTrue(numpy.allclose(fci.addons.cre_a(a6+b4, 4, (2,3), 0),
[[ 20., 21., 22., 23.],
[ 40., 41., 42., 43.],
[ 50., 51., 52., 53.],
[ 0., 0., 0., 0.]]))
self.assertTrue(numpy.allclose(fci.addons.cre_a(a6+b4, 4, (2,3), 1),
[[-10.,-11.,-12.,-13.],
[-30.,-31.,-32.,-33.],
[ 0., 0., 0., 0.],
[ 50., 51., 52., 53.]]))
self.assertTrue(numpy.allclose(fci.addons.cre_a(a6+b4, 4, (2,3), 2),
[[ 0., 1., 2., 3.],
[ 0., 0., 0., 0.],
[-30.,-31.,-32.,-33.],
[-40.,-41.,-42.,-43.]]))
self.assertTrue(numpy.allclose(fci.addons.cre_a(a6+b4, 4, (2,3), 3),
[[ 0., 0., 0., 0.],
[ 0., 1., 2., 3.],
[ 10., 11., 12., 13.],
[ 20., 21., 22., 23.]]))
self.assertTrue(numpy.allclose(fci.addons.cre_b(a6+b6, 4, (2,2), 0),
[[ 2., 4., 5., 0.],
[ 12., 14., 15., 0.],
[ 22., 24., 25., 0.],
[ 32., 34., 35., 0.],
[ 42., 44., 45., 0.],
[ 52., 54., 55., 0.]]))
self.assertTrue(numpy.allclose(fci.addons.cre_b(a6+b6, 4, (2,2), 1),
[[ -1., -3., 0., 5.],
[-11.,-13., 0., 15.],
[-21.,-23., 0., 25.],
[-31.,-33., 0., 35.],
[-41.,-43., 0., 45.],
[-51.,-53., 0., 55.]]))
self.assertTrue(numpy.allclose(fci.addons.cre_b(a6+b6, 4, (2,2), 2),
[[ 0., 0., -3., -4.],
[ 10., 0.,-13.,-14.],
[ 20., 0.,-23.,-24.],
[ 30., 0.,-33.,-34.],
[ 40., 0.,-43.,-44.],
[ 50., 0.,-53.,-54.]]))
self.assertTrue(numpy.allclose(fci.addons.cre_b(a6+b6, 4, (2,2), 3),
[[ 0., 0., 1., 2.],
[ 0., 10., 11., 12.],
[ 0., 20., 21., 22.],
[ 0., 30., 31., 32.],
[ 0., 40., 41., 42.],
[ 0., 50., 51., 52.]]))
def test_spin_squre(self):
ss = fci.spin_op.spin_square(ci0, norb, nelec)
self.assertAlmostEqual(ss[0], 0, 9)
ss = fci.spin_op.spin_square0(ci0, norb, nelec)
self.assertAlmostEqual(ss[0], 0, 9)
def test_fix_spin(self):
mci = fci.FCI(mol, m.mo_coeff, False)
mci = fci.addons.fix_spin_(mci, .2, 0)
mci.kernel(nelec=(3,3))
self.assertAlmostEqual(mci.spin_square(mci.ci, mol.nao_nr(), (3,3))[0], 0, 7)
mci = fci.addons.fix_spin_(mci, .2, ss=2)
# Change initial guess to triplet state
ci0 = fci.addons.initguess_triplet(norb, (3,3), '0b10011')
mci.kernel(nelec=(3,3), ci0=ci0)
self.assertAlmostEqual(mci.spin_square(mci.ci, mol.nao_nr(), (3,3))[0], 2, 7)
def test_fix_spin_high_cost(self):
def check(mci):
mci = fci.addons.fix_spin_(mci, .2, 0)
mci.kernel(nelec=(8,8))
self.assertAlmostEqual(mci.spin_square(mci.ci, mol.nao_nr(), 16)[0], 0, 7)
mol = gto.M(atom='O 0 0 0; O 0 0 1.2', spin=2, basis='sto3g',
symmetry=1, verbose=0)
mf = scf.RHF(mol).run()
mci = fci.FCI(mol, mf.mo_coeff, False)
mci.wfnsym = 'A1g'
check(mci)
mci.wfnsym = 'A2g'
check(mci)
mci = fci.FCI(mol, mf.mo_coeff, True)
mci.wfnsym = 'A1g'
check(mci)
mci.wfnsym = 'A2g'
check(mci)
mol = gto.M(atom='O 0 0 0; O 0 0 1.2', spin=2, basis='sto3g',
verbose=0)
mf = scf.RHF(mol).run()
mci = fci.FCI(mol, mf.mo_coeff, False)
check(mci)
mci = fci.FCI(mol, mf.mo_coeff, True)
check(mci)
def test_transform_ci_for_orbital_rotation(self):
numpy.random.seed(12)
norb = 6
nelec = (4,2)
u = numpy.linalg.svd(numpy.random.random((norb,norb)))[0]
mo1 = m.mo_coeff.dot(u)
h1e_new = reduce(numpy.dot, (mo1.T, m.get_hcore(), mo1))
g2e_new = ao2mo.incore.general(m._eri, (mo1,)*4, compact=False)
e1ref, ci1ref = fci.direct_spin1.kernel(h1e_new, g2e_new, norb, nelec, tol=1e-15)
ci0 = fci.direct_spin1.kernel(h1e, g2e, norb, nelec)[1]
ci1 = fci.addons.transform_ci_for_orbital_rotation(ci0, norb, nelec, u)
e1 = fci.direct_spin1.energy(h1e_new, g2e_new, ci1, norb, nelec)
self.assertAlmostEqual(e1, e1ref, 9)
self.assertAlmostEqual(abs(abs(ci1ref)-abs(ci1)).sum(), 0, 9)
def test_overlap(self):
numpy.random.seed(12)
s = numpy.random.random((6,6))
s = s.dot(s.T) / 3
bra = numpy.random.random((15,15))
ket = numpy.random.random((15,15))
bra /= numpy.linalg.norm(bra)
ket /= numpy.linalg.norm(ket)
self.assertAlmostEqual(fci.addons.overlap(bra, ket, 6, 4), 0.7767249258737043, 9)
self.assertAlmostEqual(fci.addons.overlap(bra, ket, 6, 4, (s,s)), 0.025906419720918766, 9)
norb = 4
nelec = (1,0)
ua = numpy.linalg.svd(numpy.random.random((norb+1,norb+1)))[0]
ub = numpy.linalg.svd(numpy.random.random((norb+1,norb+1)))[0]
s = numpy.dot(ua[:,:norb].T, ub[:,:norb])
ci0 = numpy.random.random((norb,1))
ci0 /= numpy.linalg.norm(ci0)
ci1 = numpy.random.random((norb,1))
ci1 /= numpy.linalg.norm(ci1)
ovlp = fci.addons.overlap(ci0, ci1, norb, nelec, s)
self.assertAlmostEqual(ovlp, (ci0*ci1.T*s).sum(), 9)
def test_det_overlap(self):
numpy.random.seed(12)
norb = 4
nelec = (2,2)
ua = numpy.linalg.svd(numpy.random.random((norb+1,norb+1)))[0]
ub = numpy.linalg.svd(numpy.random.random((norb+1,norb+1)))[0]
s = numpy.dot(ua[:,:norb].T, ub[:,:norb])
strs = fci.cistring.make_strings(range(norb), nelec[0])
na = len(strs)
ci0 = numpy.random.random((na,na))
ci0 /= numpy.linalg.norm(ci0)
ci1 = numpy.random.random((na,na))
ci1 /= numpy.linalg.norm(ci1)
ovlpa = numpy.zeros((na,na))
ovlpb = numpy.zeros((na,na))
for ia in range(na):
for ja in range(na):
ovlpa[ia,ja] = fci.addons.det_overlap(strs[ia], strs[ja], norb, s)
for ib in range(na):
for jb in range(na):
ovlpb[ib,jb] = fci.addons.det_overlap(strs[ib], strs[jb], norb, s)
ovlp = numpy.einsum('ab,ij,ai,bj->', ci0, ci1, ovlpa, ovlpb)
ref = fci.addons.overlap(ci0, ci1, norb, nelec, s)
self.assertAlmostEqual(ovlp, ref, 9)
s1 = numpy.random.seed(1)
s1 = numpy.random.random((6,6))
s1 = s1 + s1.T
val = fci.addons.det_overlap(int('0b10011',2), int('0b011010',2), 6, s1)
self.assertAlmostEqual(val, -0.273996425116, 12)
def test_guess_wfnsym(self):
orbsym = [2,3,6,7]
wfnsym = fci.addons.guess_wfnsym(numpy.array([-.5,0,0,0,0,0]),
len(orbsym), (4,2), orbsym)
self.assertEqual(wfnsym, 1)
orbsym = [2,3,6,7]
wfnsym = fci.addons.guess_wfnsym(numpy.array([-.5,0,.5,0,0,0]),
len(orbsym), (4,2), orbsym)
self.assertEqual(wfnsym, 1)
def test_cylindrical_init_guess(self):
mol = gto.M(atom='O; O 1 1.2', spin=2, symmetry=True)
orbsym = [6,7,2,3]
ci0 = fci.addons.cylindrical_init_guess(mol, 4, (3,3), orbsym, wfnsym=10)
ci0 = ci0[0].reshape(4,4)
self.assertAlmostEqual(ci0[0,0], .5**.5, 12)
self.assertAlmostEqual(ci0[1,1], -.5**.5, 12)
ci0 = fci.addons.cylindrical_init_guess(mol, 4, (3,3), orbsym, wfnsym=10, singlet=False)
ci0 = ci0[0].reshape(4,4)
self.assertAlmostEqual(ci0[0,1], .5**.5, 12)
self.assertAlmostEqual(ci0[1,0], -.5**.5, 12)
def test_symmetrize_wfn(self):
def finger(ci1):
numpy.random.seed(1)
fact = numpy.random.random(ci1.shape).ravel()
return numpy.dot(ci1.ravel(), fact.ravel())
norb = 6
nelec = neleca, nelecb = 4,3
na = fci.cistring.num_strings(norb, neleca)
nb = fci.cistring.num_strings(norb, nelecb)
ci = numpy.ones((na,nb))
val = finger(fci.addons.symmetrize_wfn(ci, norb, nelec, [0,6,0,3,5,2], 2))
self.assertAlmostEqual(val, 3.010642818688976, 12)
def test_symm_initguess(self):
norb = 6
nelec = (4,2)
orbsym = [6,5,7,2,3,0]
ci1 = fci.addons.symm_initguess(norb, nelec, orbsym, wfnsym=0)
ci2 = fci.addons.symmetrize_wfn(ci1, norb, nelec, orbsym, wfnsym=0)
self.assertEqual(abs(ci1-ci2).max(), 0)
ci1 = fci.addons.symm_initguess(norb, nelec, orbsym, wfnsym=5)
ci2 = fci.addons.symmetrize_wfn(ci1, norb, nelec, orbsym, wfnsym=5)
self.assertEqual(abs(ci1-ci2).max(), 0)
ci1 = fci.addons.symm_initguess(norb, nelec, orbsym, wfnsym=3)
ci2 = fci.addons.symmetrize_wfn(ci1, norb, nelec, orbsym, wfnsym=3)
self.assertEqual(abs(ci1-ci2).max(), 0)
ci1 = fci.addons.symm_initguess(6, (4,3), [0,1,5,4,3,7], wfnsym=1, irrep_nelec=None)
self.assertEqual(numpy.argwhere(ci1!=0).tolist(), [[0,2]])
ci1 = fci.addons.symm_initguess(6, (4,3), [0,1,5,4,3,7], wfnsym=0, irrep_nelec={0:[3,2],3:2})
self.assertEqual(numpy.argwhere(ci1!=0).tolist(), [[2,5], [3,4]])
ci1 = fci.addons.symm_initguess(6, (3,3), [0,1,5,4,3,7], wfnsym=2, irrep_nelec={1:[0,1],3:[1,0]})
self.assertEqual(numpy.argwhere(ci1!=0).tolist(), [[5,0]])
ci1 = fci.addons.symm_initguess(6, (3,3), [0,1,5,4,3,7], wfnsym=3, irrep_nelec={5:[0,1],3:[1,0]})
self.assertEqual(numpy.argwhere(ci1!=0).tolist(), [[4,2], [7,0]])
self.assertRaises(RuntimeError, fci.addons.symm_initguess, 6, (3,2), [3,3,3,3,3,3], wfnsym=2)
ci1 = fci.addons.symm_initguess(6, (3,3), [0,1,5,4,3,7], wfnsym=3, irrep_nelec={5:[0,1],3:[1,0]})
self.assertEqual(fci.addons.guess_wfnsym(ci1, 6, (3,3), [0,1,5,4,3,7]), 3)
def test_des_and_cre(self):
a4 = 10*numpy.arange(4)[:,None]
a6 = 10*numpy.arange(6)[:,None]
b4 = numpy.arange(4)
b6 = numpy.arange(6)
self.assertAlmostEqual(lib.finger(fci.addons.des_a(a4+b4, 4, (3,3), 0)), -31.99739808931113, 12)
self.assertAlmostEqual(lib.finger(fci.addons.des_a(a4+b4, 4, (3,3), 1)), -68.97044878458135, 12)
self.assertAlmostEqual(lib.finger(fci.addons.des_a(a4+b4, 4, (3,3), 2)), -41.22836642162049, 12)
self.assertAlmostEqual(lib.finger(fci.addons.des_a(a4+b4, 4, (3,3), 3)), -29.88708752568659, 12)
self.assertAlmostEqual(lib.finger(fci.addons.des_b(a6+b4, 4, (2,3), 0)), -163.5210711323742, 12)
self.assertAlmostEqual(lib.finger(fci.addons.des_b(a6+b4, 4, (2,3), 1)), -187.1999296644511, 12)
self.assertAlmostEqual(lib.finger(fci.addons.des_b(a6+b4, 4, (2,3), 2)), 285.3422683187559 , 12)
self.assertAlmostEqual(lib.finger(fci.addons.des_b(a6+b4, 4, (2,3), 3)), 311.44080890546695, 12)
self.assertAlmostEqual(lib.finger(fci.addons.cre_a(a6+b4, 4, (2,3), 0)), -39.48915822224921, 12)
self.assertAlmostEqual(lib.finger(fci.addons.cre_a(a6+b4, 4, (2,3), 1)), 12.45125619610399 , 12)
self.assertAlmostEqual(lib.finger(fci.addons.cre_a(a6+b4, 4, (2,3), 2)), 12.016451871939289, 12)
self.assertAlmostEqual(lib.finger(fci.addons.cre_a(a6+b4, 4, (2,3), 3)), 4.44581041782693 , 12)
self.assertAlmostEqual(lib.finger(fci.addons.cre_b(a6+b6, 4, (2,2), 0)), -56.76161034968627, 12)
self.assertAlmostEqual(lib.finger(fci.addons.cre_b(a6+b6, 4, (2,2), 1)), 23.167401126371875, 12)
self.assertAlmostEqual(lib.finger(fci.addons.cre_b(a6+b6, 4, (2,2), 2)), 30.522245459279716, 12)
self.assertAlmostEqual(lib.finger(fci.addons.cre_b(a6+b6, 4, (2,2), 3)), -57.04404450083064, 12)
if __name__ == "__main__":
print("Full Tests for fci.addons")
unittest.main()
| 49.345411
| 105
| 0.453375
|
b124735ccffddf85fded8d9e629fa8b4cd8f024a
| 792
|
py
|
Python
|
tests/test_core_middlewares.py
|
stjordanis/datar
|
4e2b5db026ad35918954576badef9951928c0cb1
|
[
"MIT"
] | 110
|
2021-03-09T04:10:40.000Z
|
2022-03-13T10:28:20.000Z
|
tests/test_core_middlewares.py
|
sthagen/datar
|
1218a549e2f0547c7b5a824ca6d9adf1bf96ba46
|
[
"MIT"
] | 54
|
2021-06-20T18:53:44.000Z
|
2022-03-29T22:13:07.000Z
|
tests/test_core_middlewares.py
|
sthagen/datar
|
1218a549e2f0547c7b5a824ca6d9adf1bf96ba46
|
[
"MIT"
] | 11
|
2021-06-18T03:03:14.000Z
|
2022-02-25T11:48:26.000Z
|
import pytest
from datar.core.middlewares import *
from datar.all import *
# def test_inverted_repr():
# iv = Inverted('a')
# assert repr(iv) == f"Inverted(['a'])"
# def test_inverted_evaluate_series():
# df = tibble(x=1)
# out = Inverted(df.x).evaluate(['x'])
# assert out == []
# out = Inverted(1).evaluate(['x'])
# assert out == []
# def test_inverted_out_of_bounds():
# with pytest.raises(ColumnNotExistingError):
# Inverted(10).evaluate(['x'])
# with pytest.raises(ColumnNotExistingError):
# Inverted('y').evaluate(['x'])
# def test_negated_repr():
# ng = Negated([1,2,3])
# assert repr(ng) == f"Negated([1, 2, 3])"
def test_curcolumn():
out = CurColumn.replace_args([CurColumn()], 'cur')
assert out == ('cur', )
| 26.4
| 54
| 0.607323
|
8fc058bff8b025ba75d797b894c3131e385eb270
| 15,085
|
py
|
Python
|
cloudkitty/storage/v2/influx.py
|
elastx/cloudkitty
|
9654373f12daad606bfabac70a48b68279d522bd
|
[
"Apache-2.0"
] | 97
|
2015-10-18T02:53:17.000Z
|
2022-03-07T05:15:39.000Z
|
cloudkitty/storage/v2/influx.py
|
shanafang9/cloudkitty
|
911c90569ccb09ecf0d7aa11a5a707c8ebda09cf
|
[
"Apache-2.0"
] | 1
|
2017-11-29T15:39:27.000Z
|
2017-11-29T15:39:27.000Z
|
cloudkitty/storage/v2/influx.py
|
shanafang9/cloudkitty
|
911c90569ccb09ecf0d7aa11a5a707c8ebda09cf
|
[
"Apache-2.0"
] | 54
|
2015-10-27T10:55:02.000Z
|
2022-02-18T08:23:19.000Z
|
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import datetime
import influxdb
from oslo_config import cfg
from oslo_log import log
from cloudkitty import dataframe
from cloudkitty.storage import v2 as v2_storage
from cloudkitty.utils import tz as tzutils
LOG = log.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('period', 'cloudkitty.collector', 'collect')
INFLUX_STORAGE_GROUP = 'storage_influxdb'
influx_storage_opts = [
cfg.StrOpt('username', help='InfluxDB username'),
cfg.StrOpt('password', help='InfluxDB password', secret=True),
cfg.StrOpt('database', help='InfluxDB database'),
cfg.StrOpt('retention_policy', default='autogen',
help='Retention policy to use'),
cfg.StrOpt('host', help='InfluxDB host', default='localhost'),
cfg.IntOpt('port', help='InfluxDB port', default=8086),
cfg.BoolOpt(
'use_ssl',
help='Set to true to use ssl for influxDB connection. '
'Defaults to False',
default=False,
),
cfg.BoolOpt(
'insecure',
help='Set to true to authorize insecure HTTPS connections to '
'influxDB. Defaults to False',
default=False,
),
cfg.StrOpt(
'cafile',
help='Path of the CA certificate to trust for HTTPS connections',
default=None
),
]
CONF.register_opts(influx_storage_opts, INFLUX_STORAGE_GROUP)
PERIOD_FIELD_NAME = '__ck_collect_period'
def _sanitized_groupby(groupby):
forbidden = ('time',)
return [g for g in groupby if g not in forbidden] if groupby else []
class InfluxClient(object):
"""Classe used to ease interaction with InfluxDB"""
def __init__(self, chunk_size=500, autocommit=True, default_period=3600):
"""Creates an InfluxClient object.
:param chunk_size: Size after which points should be pushed.
:param autocommit: Set to false to disable autocommit
:param default_period: Placeholder for the period in cae it can't
be determined.
"""
self._conn = self._get_influx_client()
self._chunk_size = chunk_size
self._autocommit = autocommit
self._retention_policy = CONF.storage_influxdb.retention_policy
self._default_period = default_period
self._points = []
@staticmethod
def _get_influx_client():
verify = CONF.storage_influxdb.use_ssl and not \
CONF.storage_influxdb.insecure
if verify and CONF.storage_influxdb.cafile:
verify = CONF.storage_influxdb.cafile
return influxdb.InfluxDBClient(
username=CONF.storage_influxdb.username,
password=CONF.storage_influxdb.password,
host=CONF.storage_influxdb.host,
port=CONF.storage_influxdb.port,
database=CONF.storage_influxdb.database,
ssl=CONF.storage_influxdb.use_ssl,
verify_ssl=verify,
)
def retention_policy_exists(self, database, policy):
policies = self._conn.get_list_retention_policies(database)
return policy in [pol['name'] for pol in policies]
def commit(self):
total_points = len(self._points)
if len(self._points) < 1:
return
LOG.debug('Pushing {} points to InfluxDB'.format(total_points))
self._conn.write_points(self._points,
retention_policy=self._retention_policy)
self._points = []
def append_point(self,
metric_type,
start,
period,
point):
"""Adds a point to commit to InfluxDB.
:param metric_type: Name of the metric type
:type metric_type: str
:param start: Start of the period the point applies to
:type start: datetime.datetime
:param period: length of the period the point applies to (in seconds)
:type period: int
:param point: Point to push
:type point: dataframe.DataPoint
"""
measurement_fields = dict(point.metadata)
measurement_fields['qty'] = float(point.qty)
measurement_fields['price'] = float(point.price)
measurement_fields['unit'] = point.unit
# Unfortunately, this seems to be the fastest way: Having several
# measurements would imply a high client-side workload, and this allows
# us to filter out unrequired keys
measurement_fields['groupby'] = '|'.join(point.groupby.keys())
measurement_fields['metadata'] = '|'.join(point.metadata.keys())
measurement_fields[PERIOD_FIELD_NAME] = period
measurement_tags = dict(point.groupby)
measurement_tags['type'] = metric_type
self._points.append({
'measurement': 'dataframes',
'tags': measurement_tags,
'fields': measurement_fields,
'time': start,
})
if self._autocommit and len(self._points) >= self._chunk_size:
self.commit()
@staticmethod
def _get_filter(key, value):
format_string = ''
if isinstance(value, str):
format_string = """"{}"='{}'"""
elif isinstance(value, (int, float)):
format_string = """"{}"={}"""
return format_string.format(key, value)
@staticmethod
def _get_time_query(begin, end):
return " WHERE time >= '{}' AND time < '{}'".format(
begin.isoformat(), end.isoformat())
def _get_filter_query(self, filters):
if not filters:
return ''
return ' AND ' + ' AND '.join(
self._get_filter(k, v) for k, v in filters.items())
@staticmethod
def _get_type_query(types):
if not types:
return ''
type_query = ' OR '.join("type='{}'".format(mtype)
for mtype in types)
return ' AND (' + type_query + ')'
def get_total(self, types, begin, end, custom_fields,
groupby=None, filters=None):
self.validate_custom_fields(custom_fields)
query = 'SELECT %s FROM "dataframes"' % custom_fields
query += self._get_time_query(begin, end)
query += self._get_filter_query(filters)
query += self._get_type_query(types)
if groupby:
groupby_query = ''
if 'time' in groupby:
groupby_query += 'time(' + str(self._default_period) + 's)'
groupby_query += ',' if groupby else ''
if groupby:
groupby_query += '"' + '","'.join(
_sanitized_groupby(groupby)) + '"'
query += ' GROUP BY ' + groupby_query
query += ';'
total = self._conn.query(query)
LOG.debug(
"Data [%s] received when executing query [%s].", total, query)
return total
@staticmethod
def validate_custom_fields(custom_fields):
forbidden_clauses = ["select", "from", "drop", "delete", "create",
"alter", "insert", "update"]
for field in custom_fields.split(","):
if field.lower() in forbidden_clauses:
raise RuntimeError("Clause [%s] is not allowed in custom"
" fields summary get report. The following"
" clauses are not allowed [%s].",
field, forbidden_clauses)
def retrieve(self,
types,
filters,
begin, end,
offset=0, limit=1000, paginate=True):
query = 'SELECT * FROM "dataframes"'
query += self._get_time_query(begin, end)
query += self._get_filter_query(filters)
query += self._get_type_query(types)
if paginate:
query += ' LIMIT {} OFFSET {}'.format(limit, offset)
query += ';'
total_query = 'SELECT COUNT(groupby) FROM "dataframes"'
total_query += self._get_time_query(begin, end)
total_query += self._get_filter_query(filters)
total_query += self._get_type_query(types)
total_query += ';'
total, result = self._conn.query(total_query + query)
total = sum(point['count'] for point in total.get_points())
return total, result
@staticmethod
def _get_time_query_delete(begin, end):
output = ""
if begin:
output += " WHERE time >= '{}'".format(begin.isoformat())
if end:
output += " AND " if output else " WHERE "
output += "time < '{}'".format(end.isoformat())
return output
def delete(self, begin, end, filters):
query = 'DELETE FROM "dataframes"'
query += self._get_time_query_delete(begin, end)
filter_query = self._get_filter_query(filters)
if 'WHERE' not in query and filter_query:
query += " WHERE " + filter_query[5:]
else:
query += filter_query
query += ';'
self._conn.query(query)
class InfluxStorage(v2_storage.BaseStorage):
def __init__(self, *args, **kwargs):
super(InfluxStorage, self).__init__(*args, **kwargs)
self._default_period = kwargs.get('period') or CONF.collect.period
self._conn = InfluxClient(default_period=self._default_period)
def init(self):
policy = CONF.storage_influxdb.retention_policy
database = CONF.storage_influxdb.database
if not self._conn.retention_policy_exists(database, policy):
LOG.error(
'Archive policy "{}" does not exist in database "{}"'.format(
policy, database)
)
def push(self, dataframes, scope_id=None):
for frame in dataframes:
period = tzutils.diff_seconds(frame.end, frame.start)
for type_, point in frame.iterpoints():
self._conn.append_point(type_, frame.start, period, point)
self._conn.commit()
@staticmethod
def _check_begin_end(begin, end):
if not begin:
begin = tzutils.get_month_start()
if not end:
end = tzutils.get_next_month()
return tzutils.local_to_utc(begin), tzutils.local_to_utc(end)
@staticmethod
def _point_to_dataframe_entry(point):
groupby = filter(bool, (point.pop('groupby', None) or '').split('|'))
metadata = filter(bool, (point.pop('metadata', None) or '').split('|'))
return dataframe.DataPoint(
point['unit'],
point['qty'],
point['price'],
{key: point.get(key, '') for key in groupby},
{key: point.get(key, '') for key in metadata},
)
def _build_dataframes(self, points):
dataframes = {}
for point in points:
point_type = point['type']
time = tzutils.dt_from_iso(point['time'])
period = point.get(PERIOD_FIELD_NAME) or self._default_period
timekey = (
time,
tzutils.add_delta(time, datetime.timedelta(seconds=period)))
if timekey not in dataframes.keys():
dataframes[timekey] = dataframe.DataFrame(
start=timekey[0],
end=timekey[1])
dataframes[timekey].add_point(
self._point_to_dataframe_entry(point), point_type)
output = list(dataframes.values())
output.sort(key=lambda frame: (frame.start, frame.end))
return output
def retrieve(self, begin=None, end=None,
filters=None,
metric_types=None,
offset=0, limit=1000, paginate=True):
begin, end = self._check_begin_end(begin, end)
total, resp = self._conn.retrieve(
metric_types, filters, begin, end, offset, limit, paginate)
# Unfortunately, a ResultSet has no values() method, so we need to
# get them manually
points = []
for _, item in resp.items():
points += list(item)
return {
'total': total,
'dataframes': self._build_dataframes(points)
}
def delete(self, begin=None, end=None, filters=None):
self._conn.delete(begin, end, filters)
def _get_total_elem(self, begin, end, groupby, series_groupby, point):
if groupby and 'time' in groupby:
begin = tzutils.dt_from_iso(point['time'])
period = point.get(PERIOD_FIELD_NAME) or self._default_period
end = tzutils.add_delta(begin, datetime.timedelta(seconds=period))
output = {
'begin': begin,
'end': end,
}
for key in point.keys():
if "time" != key:
output[key] = point[key]
if groupby:
for group in _sanitized_groupby(groupby):
output[group] = series_groupby.get(group, '')
return output
def total(self, groupby=None, begin=None, end=None, metric_types=None,
filters=None, offset=0, limit=1000, paginate=True,
custom_fields="SUM(qty) AS qty, SUM(price) AS rate"):
begin, end = self._check_begin_end(begin, end)
total = self._conn.get_total(metric_types, begin, end,
custom_fields, groupby, filters)
output = []
for (series_name, series_groupby), points in total.items():
for point in points:
# NOTE(peschk_l): InfluxDB returns all timestamps for a given
# period and interval, even those with no data. This filters
# out periods with no data
# NOTE (rafaelweingartner): the summary get API is allowing
# users to customize the report. Therefore, we only ignore
# data points, if all of the entries have None values.
# Otherwise, they are presented to the user.
if [k for k in point.keys() if point[k]]:
output.append(self._get_total_elem(
tzutils.utc_to_local(begin),
tzutils.utc_to_local(end),
groupby,
series_groupby,
point))
groupby = _sanitized_groupby(groupby)
if groupby:
output.sort(key=lambda x: [x[group] for group in groupby])
return {
'total': len(output),
'results': output[offset:offset + limit] if paginate else output,
}
| 36.262019
| 79
| 0.590255
|
a22eeccc759c78131ff8403061e08b4526577288
| 5,704
|
py
|
Python
|
examples/sector-coupling/biomass-synthetic-fuels-carbon-management.py
|
hertelm/PyPSA
|
7c0561ebaa104a264ca6229c72c82370cbab66f5
|
[
"MIT"
] | 594
|
2017-10-20T19:02:15.000Z
|
2022-03-31T10:16:23.000Z
|
examples/sector-coupling/biomass-synthetic-fuels-carbon-management.py
|
hertelm/PyPSA
|
7c0561ebaa104a264ca6229c72c82370cbab66f5
|
[
"MIT"
] | 271
|
2017-10-23T15:12:03.000Z
|
2022-03-29T10:20:36.000Z
|
examples/sector-coupling/biomass-synthetic-fuels-carbon-management.py
|
hertelm/PyPSA
|
7c0561ebaa104a264ca6229c72c82370cbab66f5
|
[
"MIT"
] | 286
|
2017-10-23T09:45:15.000Z
|
2022-03-28T15:23:40.000Z
|
## Biomass, synthetic fuels and carbon management
#
#In this example we show how to manage different biomass stocks with different potentials and costs, carbon dioxide hydrogenation from biogas, direct air capture (DAC) and carbon capture and usage/sequestration/cycling (CCU/S/C).
#
#Demand for electricity and diesel transport have to be met from various biomass sources, natural gas with possibility for carbon capture, electrolysis for hydrogen production, direct air capture of CO2, and diesel synthesis via Fischer-Tropsch.
#
#The system has to reach a target of net negative emissions over the period.
#
#All numbers/costs/efficiencies are fictitious to allow easy analysis.
#
#
#This Jupyter Notebook is also available to download at: http://www.pypsa.org/examples/biomass-synthetic-fuels-carbon-management.ipynb.
#
#It demonstrates features of the energy system modelling tool PyPSA : https://github.com/PyPSA/PyPSA.
import pypsa
import numpy as np
#First tell PyPSA that links can have multiple outputs by
#overriding the component_attrs. This can be done for
#as many buses as you need with format busi for i = 2,3,4,5,....
#See https://pypsa.org/doc/components.html#link-with-multiple-outputs-or-inputs
override_component_attrs = pypsa.descriptors.Dict({k : v.copy() for k,v in pypsa.components.component_attrs.items()})
override_component_attrs["Link"].loc["bus2"] = ["string",np.nan,np.nan,"2nd bus","Input (optional)"]
override_component_attrs["Link"].loc["bus3"] = ["string",np.nan,np.nan,"3rd bus","Input (optional)"]
override_component_attrs["Link"].loc["efficiency2"] = ["static or series","per unit",1.,"2nd bus efficiency","Input (optional)"]
override_component_attrs["Link"].loc["efficiency3"] = ["static or series","per unit",1.,"3rd bus efficiency","Input (optional)"]
override_component_attrs["Link"].loc["p2"] = ["series","MW",0.,"2nd bus output","Output"]
override_component_attrs["Link"].loc["p3"] = ["series","MW",0.,"3rd bus output","Output"]
n = pypsa.Network(override_component_attrs=override_component_attrs)
n.set_snapshots(range(10))
#add a constant electrical load
n.add("Bus","bus")
n.add("Load","load",bus="bus",
p_set=1.)
#add a constant demand for transport
n.add("Bus","transport")
n.add("Load","transport",bus="transport",
p_set=1.)
n.add("Bus","diesel")
n.add("Store","diesel",bus="diesel",
e_cyclic=True,
e_nom=1000.)
n.add("Bus","hydrogen")
n.add("Store","hydrogen",bus="hydrogen",
e_cyclic=True,
e_nom=1000.)
#n.add("Load","hydrogen",
# bus="hydrogen",
# p_set=1.)
n.add("Link","electrolysis",
p_nom=2.,
efficiency=0.8,
bus0="bus",
bus1="hydrogen")
#Allow production of diesel from H2 and CO2 using Fischer-Tropsch
n.add("Link","FT",
p_nom=4,
bus0="hydrogen",
bus1="diesel",
bus2="co2 stored",
efficiency=1.,
efficiency2=-1)
#minus sign because opposite to how fossil fuels used:
#CH4 burning puts CH4 down, atmosphere up
n.add("Carrier","co2",
co2_emissions=-1.)
#this tracks CO2 in the atmosphere
n.add("Bus","co2 atmosphere",
carrier="co2")
#NB: can also be negative
n.add("Store","co2 atmosphere",
e_nom=1000,
e_min_pu=-1,
bus="co2 atmosphere")
#this tracks CO2 stored, e.g. underground
n.add("Bus","co2 stored")
#NB: can also be negative
n.add("Store","co2 stored",
e_nom = 1000,
e_min_pu=-1,
bus="co2 stored")
#direct air capture consumes electricity to take CO2 from the air to the underground store
n.add("Link","DAC",
bus0="bus",
bus1="co2 stored",
bus2 = "co2 atmosphere",
efficiency=1,
efficiency2=-1,
p_nom=5.)
#meet transport with diesel
n.add("Link","diesel car",
bus0="diesel",
bus1="transport",
bus2="co2 atmosphere",
efficiency=1.,
efficiency2=1.,
p_nom=2.)
n.add("Bus","gas")
n.add("Store","gas",
e_initial=50,
e_nom=50,
marginal_cost=20,
bus="gas")
n.add("Link","OCGT",
bus0 = "gas",
bus1 = "bus",
bus2 = "co2 atmosphere",
p_nom_extendable=True,
efficiency = 0.5,
efficiency2 = 1)
n.add("Link","OCGT+CCS",
bus0 = "gas",
bus1 = "bus",
bus2 = "co2 stored",
bus3 = "co2 atmosphere",
p_nom_extendable=True,
efficiency = 0.4,
efficiency2 = 0.9,
efficiency3 = 0.1)
#Cheap and expensive biomass
biomass_marginal_cost = [20.,50.]
biomass_stored = [40.,15.]
for i in range(2):
n.add("Bus","biomass"+str(i))
n.add("Store","biomass"+str(i),
bus="biomass"+str(i),
e_nom_extendable=True,
marginal_cost=biomass_marginal_cost[i],
e_nom=biomass_stored[i],
e_initial=biomass_stored[i])
#simultaneously empties and refills co2 atmosphere
n.add("Link","biomass"+str(i),
bus0 = "biomass"+str(i),
bus1 = "bus",
p_nom_extendable=True,
efficiency = 0.5)
n.add("Link","biomass+CCS"+str(i),
bus0 = "biomass"+str(i),
bus1 = "bus",
bus2 = "co2 stored",
bus3 = "co2 atmosphere",
p_nom_extendable=True,
efficiency = 0.4,
efficiency2 = 1.,
efficiency3 = -1)
#can go to -50, but at some point can't generate enough electricity for DAC and demand
target = -50
n.add("GlobalConstraint","co2_limit",
sense="<=",
carrier_attribute="co2_emissions",
constant=target)
n.lopf()
n.stores_t.e.plot()
n.links_t.p0[["biomass+CCS0","biomass+CCS1","OCGT+CCS","DAC"]].plot()
#at all times, the amount of carbon is constant
n.stores_t.e[["co2 stored","co2 atmosphere","gas","diesel"]].sum(axis=1)
| 28.098522
| 245
| 0.653401
|
0605cffb2e645e794b613ef54a83047cc45b39a7
| 2,077
|
py
|
Python
|
handler.py
|
dujinle/AccountByTornado
|
ef76be1d8cfffea2797bf024dcb0eaa887ca0aff
|
[
"Apache-2.0"
] | null | null | null |
handler.py
|
dujinle/AccountByTornado
|
ef76be1d8cfffea2797bf024dcb0eaa887ca0aff
|
[
"Apache-2.0"
] | null | null | null |
handler.py
|
dujinle/AccountByTornado
|
ef76be1d8cfffea2797bf024dcb0eaa887ca0aff
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import sys, os, uuid
import json, time
import tornado.web
import tornado_mysql.pools
import redis
import logging
reload(sys)
sys.setdefaultencoding('utf8')
import common
import config
logger = logging.getLogger('web')
class RequestHandler(tornado.web.RequestHandler):
def write(self, trunk):
if type(trunk) == int:
trunk = str(trunk)
super(RequestHandler, self).write(trunk)
def gen_result(self, code, message, result):
# TODO JWT
res = '{ '
res += '"code": %s, ' % code
res += '"message": "%s"' % message
if result is None:
res += ' }'
return res
if not isinstance(result, basestring) and type(result) <> int:
result = json.dumps(result, sort_keys=True)
res += '",result": %s' % result
res += ' }'
return res
def exception_handle(self, message):
# TODO missing code
logger.error(message)
self.write(self.gen_result(-1, message, '{}'))
return
def session_set(self, uid):
uu = str(uuid.uuid1())
r = common.get_redis_1()
if r is None:
logger.error('Invalid Redis connection')
return None
try:
r.set(uu, uid, ex=config.Cookie_ExpireTime)
self.set_secure_cookie('session_id', uu)
except Exception, e:
logger.error('The database operation failed (Redis.Set)')
return None
return uu
def session_rm(self):
uu = self.get_secure_cookie('session_id')
if uu is None:
return
r = common.get_redis_1()
if r is None:
logger.error('Invalid Redis connection')
return None
try:
r.delete(uu)
self.set_secure_cookie('session_id', '')
except Exception, e:
logger.error('The database operation failed (Redis.Set)')
return None
def session_get(self):
return '111'
uu = self.get_secure_cookie('session_id')
if uu is None:
return
r = common.get_redis_1()
if r is None:
logger.error('Invalid Redis connection')
return None
try:
return r.get(uu)
except Exception, e:
logger.error('The database operation failed (Redis.Set)')
return None
def get_cur_time(self):
return time.strftime('%Y-%m-%d %X',time.localtime(time.time()))
| 22.576087
| 65
| 0.681271
|
75b07c9242d4c686bb99844ca4e7c5abee0e9ee2
| 1,624
|
py
|
Python
|
jebpy/__init__.py
|
JebBarbas/jebpy
|
547386bf09153efee1dabd315df865cffb00aa45
|
[
"MIT"
] | 1
|
2021-01-06T21:11:01.000Z
|
2021-01-06T21:11:01.000Z
|
jebpy/__init__.py
|
JebBarbas/jebpy
|
547386bf09153efee1dabd315df865cffb00aa45
|
[
"MIT"
] | null | null | null |
jebpy/__init__.py
|
JebBarbas/jebpy
|
547386bf09153efee1dabd315df865cffb00aa45
|
[
"MIT"
] | null | null | null |
"""
jebpy
This module contains more modules that help you doing difficult task
instalation
pip install jebpy
input_types
Different types of input() to capture in the console, capture the
type of data that you want without write convertions nor try/except blocks to prevent the user
to write a different type of data that the one you want.
Contains:
- input_str (function)
- input_str_required (function)
- input_int (function)
- input_float (function)
- input_bool (function)
- input_yesno (function)
is_string
Check if the given string is an email, password, #HEX color and more without
write any conditionals or regex expressions. (Uses module re)
Contains:
- is_custom (function)
- is_phone_number (function)
- is_email (function)
- is_password (function)
- is_hex_color_3 (function)
- is_hex_color_6 (function)
password_generator
Generate a random string of the length that you want. (Uses module random)
Contains:
- generate_password (function)
percentage
Introduces the PercentageVar, a special type of variable to use percentages and probabilities,
also includes the WeightOption to use in the WeightSelector, to make random choices based in weights, you can
also roll a dice() or flip a coin(). (Uses module random)
Contains:
- PerVar (class)
- WeightOption (class)
- WeightSelector (class)
- dice (function)
- coin (function)
"""
import jebpy
__version__ = 'v1.0.0'
__all__ = ['input_types','is_strings','password_generator','percentage']
| 31.230769
| 109
| 0.698892
|
1be10a15aa6adbaff4d0f0144bb4b2d9989c37d5
| 2,475
|
py
|
Python
|
server.py
|
Towerthousand/tvhack
|
080363e90be7c5ab9308481437d6b8c64bd8030a
|
[
"MIT"
] | null | null | null |
server.py
|
Towerthousand/tvhack
|
080363e90be7c5ab9308481437d6b8c64bd8030a
|
[
"MIT"
] | 1
|
2015-09-12T22:38:32.000Z
|
2015-09-21T23:23:31.000Z
|
server.py
|
Towerthousand/tvhack
|
080363e90be7c5ab9308481437d6b8c64bd8030a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from flask import Flask
from flask import jsonify
from flask import render_template
from flask.ext.cors import CORS
from firebase import firebase
app = Flask(__name__)
CORS(app)
firebase = firebase.FirebaseApplication('https://tvhack.firebaseio.com', None)
_calling = False
@app.route('/')
def render_index():
""" Returns the DirectTV UI page """
return render_template('index.html')
@app.route('/call')
def render_call():
""" Returns the DirectTV UI page """
return render_template('call.html')
@app.route('/message')
def render_message():
""" Returns the message page """
return render_template('message.html')
@app.route('/api/isCaredBy/<uid>/', methods=['GET'])
def usersCaredBy(uid):
""" Returns the user objects taking care of a given user """
users = firebase.get('/users-tv/' + uid + '/isCaredBy', None)
users_info = []
for user in users:
users_info.append(firebase.get('/users-carer', user))
return jsonify({'users': users_info})
@app.route('/api/user-tv/<uid>/', methods=['GET'])
def user_tv(uid):
""" Returns the requested tv user object """
user = firebase.get('/users-tv', uid)
return jsonify(user)
@app.route('/api/user-carer/<uid>/', methods=['GET'])
def user_carer(uid):
""" Returns the requested carer user object """
user = firebase.get('/users-carer', uid)
return jsonify(user)
@app.route('/api/stayOnline/<uid>/', methods=['GET'])
def stay_alive(uid):
""" Notifies the server that uid is still online """
global _calling
return 'OK'
@app.route('/api/call/<uid>/', methods=['GET'])
def call(uid):
""" calls a given uid """
global _calling
_calling = True
return 'OK'
@app.route('/api/uncall/<uid>/', methods=['GET'])
def uncall(uid):
""" uncalls a given uid """
global _calling
_calling = False
return 'OK'
@app.route('/api/isCalling/<uid>/', methods=['GET'])
def is_calling(uid):
""" Returns true if uid is being called """
return jsonify({"isCalling": _calling})
@app.route('/api/remind/<uid>/', methods=['GET'])
def remid(uid):
return 'OK'
@app.route('/api/notify/<id>/', methods=['GET'])
def notify(id):
firebase.patch('/push-notifications', {0: True})
return 'OK'
@app.route('/api/denotify/<id>/', methods=['GET'])
def denotify(id):
firebase.patch('/push-notifications', {0: False})
return 'OK'
if __name__ == "__main__":
app.run(debug=True)
| 22.916667
| 78
| 0.646061
|
8cbc7fe662a97ce0721d35cdec61ec314b529c77
| 5,479
|
py
|
Python
|
stencil_benchmarks/benchmarks_collection/stream/cuda_hip.py
|
MeteoSwiss-APN/stencil_benchmarks
|
055e1d82fd0d95ffb9633b350fe167f1f1241325
|
[
"BSD-3-Clause"
] | 5
|
2020-05-21T23:20:43.000Z
|
2021-01-12T11:52:05.000Z
|
stencil_benchmarks/benchmarks_collection/stream/cuda_hip.py
|
MeteoSwiss-APN/stencil_benchmarks
|
055e1d82fd0d95ffb9633b350fe167f1f1241325
|
[
"BSD-3-Clause"
] | null | null | null |
stencil_benchmarks/benchmarks_collection/stream/cuda_hip.py
|
MeteoSwiss-APN/stencil_benchmarks
|
055e1d82fd0d95ffb9633b350fe167f1f1241325
|
[
"BSD-3-Clause"
] | 7
|
2017-08-22T07:11:02.000Z
|
2021-06-30T11:31:20.000Z
|
# Stencil Benchmarks
#
# Copyright (c) 2017-2021, ETH Zurich and MeteoSwiss
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import re
import warnings
from ...benchmark import Benchmark, Parameter, ExecutionError
from ...tools import compilation, cpphelpers, template
class Native(Benchmark):
array_size = Parameter('number of elements in arrays', 10000000)
ntimes = Parameter('number of runs', 10)
block_size = Parameter('threads per block', 1024)
dtype = Parameter('data type in NumPy format, e.g. float32 or float64',
'float64')
compiler = Parameter('compiler path', dtype=str, nargs=1)
compiler_flags = Parameter('compiler flags', '')
axis = Parameter('compute grid dimension to use',
'x',
choices=['x', 'y', 'z'])
vector_size = Parameter('vector size', 1)
explicit_vectorization = Parameter(
'use float2, float3, float4 types, '
'otherwise just add a loop and let the compiler vectorize', True)
unroll_factor = Parameter(
'loop unroll factor (in addition to vectorization)', 1)
launch_bounds = Parameter('specify launch bounds', True)
index_type = Parameter('index data type', 'std::size_t')
streaming_stores = Parameter('use streaming store instructions', False)
streaming_loads = Parameter('use streaming load instructions', False)
print_code = Parameter('print code', False)
verify = Parameter('verify results', True)
def setup(self):
super().setup()
elements_per_block = (self.block_size * self.vector_size *
self.unroll_factor)
if self.array_size % elements_per_block:
warnings.warn(
'adapting array size to match block and vector sizes')
self.array_size = ((self.array_size + elements_per_block - 1) //
elements_per_block) * elements_per_block
template_file = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'cuda_hip.j2')
code = template.render(template_file, **self.template_args())
if self.print_code:
print(cpphelpers.format_code(code))
self.compiled = compilation.GnuLibrary(code,
self.compile_command(),
extension='.cu')
def compile_command(self):
command = [self.compiler]
if self.compiler_flags:
command += self.compiler_flags.split()
return command
def template_args(self):
return dict(array_size=self.array_size,
axis=self.axis,
block_size=self.block_size,
ctype=compilation.dtype_cname(self.dtype),
ntimes=self.ntimes,
vector_size=self.vector_size,
explicit_vectorization=self.explicit_vectorization,
unroll_factor=self.unroll_factor,
launch_bounds=self.launch_bounds,
index_type=self.index_type,
streaming_loads=self.streaming_loads,
streaming_stores=self.streaming_stores,
verify=self.verify)
def run(self):
try:
output = self.compiled.run()
except compilation.ExecutionError as error:
raise ExecutionError(*error.args) from error
regex = re.compile(r'(Copy|Scale|Add|Triad): +'
r'([0-9.]+) +([0-9.]+) +'
r'([0-9.]+) +([0-9.]+)')
results = []
for match in regex.finditer(output):
results.append({
'name': match.group(1).lower(),
'bandwidth': float(match.group(2)),
'avg-time': float(match.group(3)),
'time': float(match.group(4)),
'max-time': float(match.group(5))
})
return results
| 43.484127
| 79
| 0.638073
|
6cf613bc89e61907940ff91c398160bb64330f63
| 4,552
|
py
|
Python
|
tests/parsers/test_header.py
|
torressa/grblogtools
|
6a7783ed6514b3a60cc4cc041ee922e538571701
|
[
"Apache-2.0"
] | null | null | null |
tests/parsers/test_header.py
|
torressa/grblogtools
|
6a7783ed6514b3a60cc4cc041ee922e538571701
|
[
"Apache-2.0"
] | 39
|
2021-12-14T05:01:39.000Z
|
2022-03-01T23:05:46.000Z
|
tests/parsers/test_header.py
|
torressa/grblogtools
|
6a7783ed6514b3a60cc4cc041ee922e538571701
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase, main
from grblogtools.parsers.header import HeaderParser
from grblogtools.parsers.util import parse_lines
example_log_0 = """
Gurobi Optimizer version 9.5.0 build v9.5.0rc5 (mac64[arm])
Copyright (c) 2021, Gurobi Optimization, LLC
Read MPS format model from file /Library/gurobi950/macos_universal2/examples/data/glass4.mps
Reading time = 0.00 seconds
glass4: 396 rows, 322 columns, 1815 nonzeros
Thread count: 8 physical cores, 8 logical processors, using up to 8 threads
"""
expected_summary_0 = {
"Version": "9.5.0",
"ModelFilePath": "/Library/gurobi950/macos_universal2/examples/data/glass4.mps",
"ReadingTime": 0.0,
"PhysicalCores": 8,
"LogicalProcessors": 8,
"Threads": 8,
"ModelName": "glass4",
"Rows": 396,
"Columns": 322,
"Nonzeros": 1815,
}
expected_parameters_0 = {}
example_log_1 = """
Set parameter Presolve to value 0
Set parameter NonConvex to value 2
Gurobi Optimizer version 9.5.0 build v9.5.0rc5 (mac64[rosetta2])
Thread count: 8 physical cores, 8 logical processors, using up to 8 threads
"""
expected_summary_1 = {
"Version": "9.5.0",
"PhysicalCores": 8,
"LogicalProcessors": 8,
"Threads": 8,
}
expected_parameters_1 = {
"Presolve": 0,
"NonConvex": 2,
}
example_log_2 = """
Set parameter CSManager to value "localhost:61000"
Set parameter CSAuthToken
Compute Server job ID: 4e90605d-8ec1-4b56-8351-d8a5355ff641
Capacity available on 'localhost' - connecting...
Established HTTP unencrypted connection
Set parameter ConcurrentMIP to value 2
Set parameter FuncPieces to value 1
Set parameter FuncPieceLength to value 0.001
Gurobi Optimizer version 9.5.0 build v9.5.0rc5 (mac64[rosetta2])
Gurobi Compute Server Worker version 9.5.0 build v9.5.0rc5 (mac64[arm])
Thread count: 8 physical cores, 8 logical processors, using up to 8 threads
"""
expected_summary_2 = {
"JobID": "4e90605d-8ec1-4b56-8351-d8a5355ff641",
"Version": "9.5.0",
"Platform": "mac64[arm]",
"PhysicalCores": 8,
"LogicalProcessors": 8,
"Threads": 8,
}
expected_parameters_2 = {
"CSManager": '"localhost:61000"',
"FuncPieces": 1,
"FuncPieceLength": 0.001,
"ConcurrentMIP": 2,
}
class TestHeader(TestCase):
def setUp(self):
pass
def test_first_line_matched(self):
expected_start_lines = [
"Gurobi Optimizer version 9.5.0 build v9.5.0rc5 (mac64[arm])",
"Set parameter Presolve to value 0",
'Set parameter CSManager to value "localhost:61000"',
]
for i, example_log in enumerate([example_log_0, example_log_1, example_log_2]):
with self.subTest(example_log=example_log):
header_parser = HeaderParser()
for line in example_log.strip().split("\n"):
if header_parser.parse(line):
self.assertEqual(line, expected_start_lines[i])
break
else:
self.assertRaises("No start line found.")
def test_get_summary(self):
example_logs = [example_log_0, example_log_1, example_log_2]
expected_summaries = [
expected_summary_0,
expected_summary_1,
expected_summary_2,
]
expected_parameter_sets = [
expected_parameters_0,
expected_parameters_1,
expected_parameters_2,
]
for example_log, expected_summary, expected_parameters in zip(
example_logs, expected_summaries, expected_parameter_sets
):
with self.subTest(example_log=example_log):
header_parser = HeaderParser()
lines = example_log.strip().split("\n")
parse_lines(header_parser, lines)
self.assertEqual(header_parser.get_summary(), expected_summary)
self.assertEqual(header_parser.get_parameters(), expected_parameters)
def test_start_patterns(self):
"""Check the header parser properly guards later patterns. This is
important so that when parsing multiple logs, the header parser does
not interrupt.
The below presolve line can be caught by the model name/size parser
in the header, but it should only be picked up if the HeaderParser
has seen a proper log start line."""
parser = HeaderParser()
parse_lines(parser, ["Presolved: 390 rows, 316 columns, 1803 nonzeros"])
assert not parser.get_summary()
if __name__ == "__main__":
main()
| 32.514286
| 92
| 0.664982
|
12d4df3102d2ac84fb72a7e2f01037b2d4262a63
| 6,993
|
py
|
Python
|
sdk/python/pulumi_okta/deprecated/oauth_app_redirect_uri.py
|
pulumi/pulumi-okta
|
83f7617a85b3d05213901773fa4e6a151ab6076b
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2019-10-29T21:59:22.000Z
|
2021-11-08T12:00:24.000Z
|
sdk/python/pulumi_okta/deprecated/oauth_app_redirect_uri.py
|
pulumi/pulumi-okta
|
83f7617a85b3d05213901773fa4e6a151ab6076b
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2020-01-06T10:28:09.000Z
|
2022-03-25T19:52:40.000Z
|
sdk/python/pulumi_okta/deprecated/oauth_app_redirect_uri.py
|
pulumi/pulumi-okta
|
83f7617a85b3d05213901773fa4e6a151ab6076b
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-09-11T16:31:04.000Z
|
2020-11-24T12:23:17.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['OauthAppRedirectUriArgs', 'OauthAppRedirectUri']
@pulumi.input_type
class OauthAppRedirectUriArgs:
def __init__(__self__, *,
app_id: pulumi.Input[str],
uri: pulumi.Input[str]):
"""
The set of arguments for constructing a OauthAppRedirectUri resource.
:param pulumi.Input[str] uri: Redirect URI to append to Okta OIDC application.
"""
pulumi.set(__self__, "app_id", app_id)
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter(name="appId")
def app_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: pulumi.Input[str]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter
def uri(self) -> pulumi.Input[str]:
"""
Redirect URI to append to Okta OIDC application.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: pulumi.Input[str]):
pulumi.set(self, "uri", value)
@pulumi.input_type
class _OauthAppRedirectUriState:
def __init__(__self__, *,
app_id: Optional[pulumi.Input[str]] = None,
uri: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering OauthAppRedirectUri resources.
:param pulumi.Input[str] uri: Redirect URI to append to Okta OIDC application.
"""
if app_id is not None:
pulumi.set(__self__, "app_id", app_id)
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter(name="appId")
def app_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter
def uri(self) -> Optional[pulumi.Input[str]]:
"""
Redirect URI to append to Okta OIDC application.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uri", value)
class OauthAppRedirectUri(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_id: Optional[pulumi.Input[str]] = None,
uri: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a OauthAppRedirectUri resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] uri: Redirect URI to append to Okta OIDC application.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: OauthAppRedirectUriArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a OauthAppRedirectUri resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param OauthAppRedirectUriArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(OauthAppRedirectUriArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_id: Optional[pulumi.Input[str]] = None,
uri: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = OauthAppRedirectUriArgs.__new__(OauthAppRedirectUriArgs)
if app_id is None and not opts.urn:
raise TypeError("Missing required property 'app_id'")
__props__.__dict__["app_id"] = app_id
if uri is None and not opts.urn:
raise TypeError("Missing required property 'uri'")
__props__.__dict__["uri"] = uri
super(OauthAppRedirectUri, __self__).__init__(
'okta:deprecated/oauthAppRedirectUri:OauthAppRedirectUri',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
app_id: Optional[pulumi.Input[str]] = None,
uri: Optional[pulumi.Input[str]] = None) -> 'OauthAppRedirectUri':
"""
Get an existing OauthAppRedirectUri resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] uri: Redirect URI to append to Okta OIDC application.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _OauthAppRedirectUriState.__new__(_OauthAppRedirectUriState)
__props__.__dict__["app_id"] = app_id
__props__.__dict__["uri"] = uri
return OauthAppRedirectUri(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="appId")
def app_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "app_id")
@property
@pulumi.getter
def uri(self) -> pulumi.Output[str]:
"""
Redirect URI to append to Okta OIDC application.
"""
return pulumi.get(self, "uri")
| 38.423077
| 134
| 0.630345
|
c91eb0c3ab3c63d9d3720ffa55ee708947cbacbe
| 4,333
|
py
|
Python
|
cogs/roles.py
|
Drowrin/Weeabot
|
c919e9cd928bbb22f79a130bb3e8279c20b9a577
|
[
"MIT"
] | 5
|
2017-02-05T06:28:03.000Z
|
2021-04-23T13:19:54.000Z
|
cogs/roles.py
|
Drowrin/Weeabot
|
c919e9cd928bbb22f79a130bb3e8279c20b9a577
|
[
"MIT"
] | 7
|
2016-08-11T15:19:22.000Z
|
2018-09-17T14:36:01.000Z
|
cogs/roles.py
|
Drowrin/Weeabot
|
c919e9cd928bbb22f79a130bb3e8279c20b9a577
|
[
"MIT"
] | 1
|
2016-06-06T00:05:07.000Z
|
2016-06-06T00:05:07.000Z
|
from collections import defaultdict
import discord
from discord.ext import commands
import checks
from cogs.requestsystem import request
from Weeabot import Weeabot
class Roles:
def __init__(self, bot: Weeabot):
self.bot = bot
async def check_config(self, ctx):
if ctx.message.server.id not in self.bot.server_configs:
self.bot.server_configs[ctx.message.server.id] = {}
if 'hidden_channels' not in self.bot.server_configs[ctx.message.server.id]:
self.bot.server_configs[ctx.message.server.id]['hidden_channels'] = {}
async def get_roles_list(self, ctx):
await self.check_config(ctx)
await self.update_roles(ctx)
roles_list = defaultdict(list)
for chan, r in self.bot.server_configs[ctx.message.server.id]["hidden_channels"].items():
chann = ctx.message.server.get_channel(chan)
for role in r:
roles_list[role].append(chann)
return roles_list
async def update_roles(self, ctx):
for chan_id, r in self.bot.server_configs[ctx.message.server.id]['hidden_channels'].items():
rs = [t[0].id for t in ctx.message.server.get_channel(chan_id).overwrites if t[1].read_messages]
self.bot.server_configs[ctx.message.server.id]['hidden_channels'][chan_id] = rs
self.bot.dump_server_configs()
@commands.command(pass_context=True)
@checks.is_server_owner()
async def hide(self, ctx):
await self.check_config(ctx)
await self.bot.edit_channel_permissions(
channel=ctx.message.channel,
target=ctx.message.server.default_role,
overwrite=discord.PermissionOverwrite(read_messages=False)
)
self.bot.server_configs[ctx.message.server.id]['hidden_channels'][ctx.message.channel.id] = []
await self.update_roles(ctx)
@commands.command(pass_context=True)
@checks.is_server_owner()
async def unhide(self, ctx):
await self.check_config(ctx)
for t in ctx.message.channel.overwrites:
await self.bot.delete_channel_permissions(
channel=ctx.message.channel,
target=t[0]
)
del self.bot.server_configs[ctx.message.server.id]['hidden_channels'][ctx.message.channel.id]
await self.update_roles(ctx)
@commands.command(pass_context=True)
@request()
@checks.is_server_owner()
async def make_channel(self, ctx, channel_name, role_name):
await self.check_config(ctx)
try:
everyone_perms = discord.PermissionOverwrite(read_messages=False)
everyone = discord.ChannelPermissions(target=ctx.message.server.default_role, overwrite=everyone_perms)
can_read = discord.PermissionOverwrite(read_messages=True)
new_role = await self.bot.create_role(ctx.message.server, name=role_name)
channel = await self.bot.create_channel(ctx.message.server, channel_name, everyone, (new_role, can_read))
await self.bot.add_roles(ctx.message.author, new_role)
self.bot.server_configs[ctx.message.server.id]['hidden_channels'][channel.id] = [new_role.id]
except discord.errors.HTTPException:
await self.bot.say("Invalid name or that name is taken. Names must be alphanumeric.")
@commands.command(pass_context=True)
async def roles(self, ctx):
roles = await self.get_roles_list(ctx)
e: discord.Embed = discord.Embed()
for role, channels in roles.items():
try:
role_name = commands.RoleConverter(ctx, role).convert().name
message = '\n'.join([f'__{channel.name}__\n\t{channel.topic}' for channel in channels])
e.add_field(name=role_name, value=message, inline=False)
except commands.BadArgument:
pass
await self.bot.say('**Opt-in Roles**', embed=e)
@commands.command(pass_context=True)
async def makeme(self, ctx, *, role: discord.Role):
roles = await self.get_roles_list(ctx)
if role.id not in roles:
await self.bot.say("Sorry, that role isn't an opt-in role.")
return
await self.bot.add_roles(ctx.message.author, role)
def setup(bot):
bot.add_cog(Roles(bot))
| 42.067961
| 117
| 0.658897
|
807e838365301f5a822d25a7b1057091162ace1f
| 11,583
|
py
|
Python
|
train.py
|
KevinLL218/Mydatabase
|
6bf48aed67a1b7cd3b847c9e54caf0406e1cea40
|
[
"MIT"
] | 2
|
2021-07-15T06:59:14.000Z
|
2021-07-19T01:34:47.000Z
|
train.py
|
KevinLL218/Mydatabase
|
6bf48aed67a1b7cd3b847c9e54caf0406e1cea40
|
[
"MIT"
] | 2
|
2021-06-10T08:09:44.000Z
|
2021-07-19T02:01:46.000Z
|
train.py
|
KevinLL218/Underwater-Image-Segmentation
|
6bf48aed67a1b7cd3b847c9e54caf0406e1cea40
|
[
"MIT"
] | null | null | null |
"""Training script for the DeepLab-ResNet network on the PASCAL VOC dataset
for semantic image segmentation.
This script trains the model using augmented PASCAL VOC,
which contains approximately 10000 images for training and 1500 images for validation.
"""
from __future__ import print_function
import argparse
from datetime import datetime
import os
import sys
import time
import tensorflow as tf
import numpy as np
from deeplab_resnet import DeepLabResNetModel, ImageReader, decode_labels, inv_preprocess, prepare_label
IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
BATCH_SIZE = 2
DATA_DIRECTORY = './dataset'
DATA_LIST_PATH = './dataset/train_tf.txt'
IGNORE_LABEL = 255
INPUT_SIZE = '256,256'
LEARNING_RATE = 1e-4
MOMENTUM = 0.9
NUM_CLASSES = 3
NUM_STEPS = 2000
POWER = 0.9
RANDOM_SEED = 1234
RESTORE_FROM = './deeplab_resnet.ckpt'
SAVE_NUM_IMAGES = 2
SAVE_PRED_EVERY = 10
SNAPSHOT_DIR = './snapshots/'
WEIGHT_DECAY = 0.0005
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLab-ResNet Network")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Number of images sent to the network in one step.")
parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the PASCAL VOC dataset.")
parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH,
help="Path to the file listing the images in the dataset.")
parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL,
help="The index of the label to ignore during the training.")
parser.add_argument("--input-size", type=str, default=INPUT_SIZE,
help="Comma-separated string with height and width of images.")
parser.add_argument("--is-training", action="store_true",
help="Whether to updates the running means and variances during the training.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Base learning rate for training with polynomial decay.")
parser.add_argument("--momentum", type=float, default=MOMENTUM,
help="Momentum component of the optimiser.")
parser.add_argument("--not-restore-last", action="store_true",
help="Whether to not restore last (FC) layers.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--num-steps", type=int, default=NUM_STEPS,
help="Number of training steps.")
parser.add_argument("--power", type=float, default=POWER,
help="Decay parameter to compute the learning rate.")
parser.add_argument("--random-mirror", action="store_true",
help="Whether to randomly mirror the inputs during the training.")
parser.add_argument("--random-scale", action="store_true",
help="Whether to randomly scale the inputs during the training.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random seed to have reproducible results.")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
parser.add_argument("--save-num-images", type=int, default=SAVE_NUM_IMAGES,
help="How many images to save.")
parser.add_argument("--save-pred-every", type=int, default=SAVE_PRED_EVERY,
help="Save summaries and checkpoint every often.")
parser.add_argument("--snapshot-dir", type=str, default=SNAPSHOT_DIR,
help="Where to save snapshots of the model.")
parser.add_argument("--weight-decay", type=float, default=WEIGHT_DECAY,
help="Regularisation parameter for L2-loss.")
return parser.parse_args()
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def main():
"""Create the model and start the training."""
args = get_arguments()
h, w = map(int, args.input_size.split(','))
input_size = (h, w)
tf.set_random_seed(args.random_seed)
# Create queue coordinator.
coord = tf.train.Coordinator()
# Load reader.
with tf.name_scope("create_inputs"):
reader = ImageReader(
args.data_dir,
args.data_list,
input_size,
args.random_scale,
args.random_mirror,
args.ignore_label,
IMG_MEAN,
coord)
image_batch, label_batch = reader.dequeue(args.batch_size)
# Create network.
net = DeepLabResNetModel({'data': image_batch}, is_training=args.is_training, num_classes=args.num_classes)
# For a small batch size, it is better to keep
# the statistics of the BN layers (running means and variances)
# frozen, and to not update the values provided by the pre-trained model.
# If is_training=True, the statistics will be updated during the training.
# Note that is_training=False still updates BN parameters gamma (scale) and beta (offset)
# if they are presented in var_list of the optimiser definition.
# Predictions.
raw_output = net.layers['fc1_voc12']
# Which variables to load. Running means and variances are not trainable,
# thus all_variables() should be restored.
restore_var = [v for v in tf.global_variables() if 'fc' not in v.name or not args.not_restore_last]
all_trainable = [v for v in tf.trainable_variables() if 'beta' not in v.name and 'gamma' not in v.name]
fc_trainable = [v for v in all_trainable if 'fc' in v.name]
conv_trainable = [v for v in all_trainable if 'fc' not in v.name] # lr * 1.0
fc_w_trainable = [v for v in fc_trainable if 'weights' in v.name] # lr * 10.0
fc_b_trainable = [v for v in fc_trainable if 'biases' in v.name] # lr * 20.0
assert(len(all_trainable) == len(fc_trainable) + len(conv_trainable))
assert(len(fc_trainable) == len(fc_w_trainable) + len(fc_b_trainable))
# Predictions: ignoring all predictions with labels greater or equal than n_classes
raw_prediction = tf.reshape(raw_output, [-1, args.num_classes])
label_proc = prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]), num_classes=args.num_classes, one_hot=False) # [batch_size, h, w]
raw_gt = tf.reshape(label_proc, [-1,])
indices = tf.squeeze(tf.where(tf.less_equal(raw_gt, args.num_classes - 1)), 1)
gt = tf.cast(tf.gather(raw_gt, indices), tf.int32)
prediction = tf.gather(raw_prediction, indices)
# Pixel-wise softmax loss.
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=gt)
l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'weights' in v.name]
reduced_loss = tf.reduce_mean(loss) + tf.add_n(l2_losses)
# Processed predictions: for visualisation.
raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])
raw_output_up = tf.argmax(raw_output_up, dimension=3)
pred = tf.expand_dims(raw_output_up, dim=3)
# Image summary.
images_summary = tf.py_func(inv_preprocess, [image_batch, args.save_num_images, IMG_MEAN], tf.uint8)
labels_summary = tf.py_func(decode_labels, [label_batch, args.save_num_images, args.num_classes], tf.uint8)
preds_summary = tf.py_func(decode_labels, [pred, args.save_num_images, args.num_classes], tf.uint8)
total_summary = tf.summary.image('images',
tf.concat(axis=2, values=[images_summary, labels_summary, preds_summary]),
max_outputs=args.save_num_images) # Concatenate row-wise.
summary_writer = tf.summary.FileWriter(args.snapshot_dir,
graph=tf.get_default_graph())
# Define loss and optimisation parameters.
base_lr = tf.constant(args.learning_rate)
step_ph = tf.placeholder(dtype=tf.float32, shape=())
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - step_ph / args.num_steps), args.power))
opt_conv = tf.train.MomentumOptimizer(learning_rate, args.momentum)
opt_fc_w = tf.train.MomentumOptimizer(learning_rate * 10.0, args.momentum)
opt_fc_b = tf.train.MomentumOptimizer(learning_rate * 20.0, args.momentum)
grads = tf.gradients(reduced_loss, conv_trainable + fc_w_trainable + fc_b_trainable)
grads_conv = grads[:len(conv_trainable)]
grads_fc_w = grads[len(conv_trainable) : (len(conv_trainable) + len(fc_w_trainable))]
grads_fc_b = grads[(len(conv_trainable) + len(fc_w_trainable)):]
train_op_conv = opt_conv.apply_gradients(zip(grads_conv, conv_trainable))
train_op_fc_w = opt_fc_w.apply_gradients(zip(grads_fc_w, fc_w_trainable))
train_op_fc_b = opt_fc_b.apply_gradients(zip(grads_fc_b, fc_b_trainable))
train_op = tf.group(train_op_conv, train_op_fc_w, train_op_fc_b)
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
# Saver for storing checkpoints of the model.
saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=10)
# Load variables if the checkpoint is provided.
if args.restore_from is not None:
loader = tf.train.Saver(var_list=restore_var)
load(loader, sess, args.restore_from)
# Start queue threads.
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
# Iterate over training steps.
for step in range(args.num_steps):
start_time = time.time()
feed_dict = { step_ph : step }
if step % args.save_pred_every == 0:
loss_value, images, labels, preds, summary, _ = sess.run([reduced_loss, image_batch, label_batch, pred, total_summary, train_op], feed_dict=feed_dict)
summary_writer.add_summary(summary, step)
save(saver, sess, args.snapshot_dir, step)
else:
loss_value, _ = sess.run([reduced_loss, train_op], feed_dict=feed_dict)
duration = time.time() - start_time
print('step {:d} \t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main()
| 45.602362
| 162
| 0.670206
|
371a90920f867f7d0ac2903e48835d2529dd1339
| 529
|
py
|
Python
|
tree/apps.py
|
d42/django-tree
|
687c01c02d91cada9ca1912e34e482da9e73e27a
|
[
"BSD-3-Clause"
] | null | null | null |
tree/apps.py
|
d42/django-tree
|
687c01c02d91cada9ca1912e34e482da9e73e27a
|
[
"BSD-3-Clause"
] | null | null | null |
tree/apps.py
|
d42/django-tree
|
687c01c02d91cada9ca1912e34e482da9e73e27a
|
[
"BSD-3-Clause"
] | 1
|
2018-09-26T17:51:37.000Z
|
2018-09-26T17:51:37.000Z
|
from django.apps import AppConfig
from .fields import PathField
from .lookups import DescendantOf, AncestorOf, Match, MatchAny, Search
from .transforms import Level
class TreeAppConfig(AppConfig):
name = 'tree'
verbose_name = 'Tree'
def ready(self):
PathField.register_lookup(DescendantOf)
PathField.register_lookup(AncestorOf)
PathField.register_lookup(Match)
PathField.register_lookup(MatchAny)
PathField.register_lookup(Search)
PathField.register_lookup(Level)
| 26.45
| 70
| 0.73724
|
5a960b1823166658b6fd65e78aed0e9c2181f86d
| 8,579
|
py
|
Python
|
pytorch_lightning/loggers/test_tube.py
|
Code-Cornelius/pytorch-lightning
|
ce95891f6ab21a6cb1e5e6bc46cebafe9aab6057
|
[
"Apache-2.0"
] | 4
|
2021-12-10T01:30:35.000Z
|
2022-02-12T17:25:36.000Z
|
pytorch_lightning/loggers/test_tube.py
|
Code-Cornelius/pytorch-lightning
|
ce95891f6ab21a6cb1e5e6bc46cebafe9aab6057
|
[
"Apache-2.0"
] | 4
|
2021-11-07T02:22:34.000Z
|
2021-11-15T12:58:43.000Z
|
pytorch_lightning/loggers/test_tube.py
|
Code-Cornelius/pytorch-lightning
|
ce95891f6ab21a6cb1e5e6bc46cebafe9aab6057
|
[
"Apache-2.0"
] | 2
|
2021-12-08T22:29:39.000Z
|
2022-03-26T04:46:09.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Tube Logger
----------------
"""
from argparse import Namespace
from typing import Any, Dict, Optional, Union
import pytorch_lightning as pl
from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment
from pytorch_lightning.utilities import _module_available, rank_zero_deprecation, rank_zero_warn
from pytorch_lightning.utilities.distributed import rank_zero_only
_TESTTUBE_AVAILABLE = _module_available("test_tube")
if _TESTTUBE_AVAILABLE:
from test_tube import Experiment
else:
Experiment = None
class TestTubeLogger(LightningLoggerBase):
r"""
Log to local file system in `TensorBoard <https://www.tensorflow.org/tensorboard>`_ format
but using a nicer folder structure (see `full docs <https://williamfalcon.github.io/test-tube>`_).
Warning:
The test-tube package is no longer maintained and PyTorch Lightning will remove the :class:´TestTubeLogger´
in v1.7.0.
Install it with pip:
.. code-block:: bash
pip install test_tube
.. code-block:: python
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TestTubeLogger
logger = TestTubeLogger("tt_logs", name="my_exp_name")
trainer = Trainer(logger=logger)
Use the logger anywhere in your :class:`~pytorch_lightning.core.lightning.LightningModule` as follows:
.. code-block:: python
from pytorch_lightning import LightningModule
class LitModel(LightningModule):
def training_step(self, batch, batch_idx):
# example
self.logger.experiment.whatever_method_summary_writer_supports(...)
def any_lightning_module_function_or_hook(self):
self.logger.experiment.add_histogram(...)
Args:
save_dir: Save directory
name: Experiment name. Defaults to ``'default'``.
description: A short snippet about this experiment
debug: If ``True``, it doesn't log anything.
version: Experiment version. If version is not specified the logger inspects the save
directory for existing versions, then automatically assigns the next available version.
create_git_tag: If ``True`` creates a git tag to save the code used in this experiment.
log_graph: Adds the computational graph to tensorboard. This requires that
the user has defined the `self.example_input_array` attribute in their
model.
prefix: A string to put at the beginning of metric keys.
Raises:
ModuleNotFoundError:
If required TestTube package is not installed on the device.
"""
__test__ = False
LOGGER_JOIN_CHAR = "-"
def __init__(
self,
save_dir: str,
name: str = "default",
description: Optional[str] = None,
debug: bool = False,
version: Optional[int] = None,
create_git_tag: bool = False,
log_graph: bool = False,
prefix: str = "",
):
rank_zero_deprecation(
"The TestTubeLogger is deprecated since v1.5 and will be removed in v1.7. We recommend switching to the"
" `pytorch_lightning.loggers.TensorBoardLogger` as an alternative."
)
if Experiment is None:
raise ModuleNotFoundError(
"You want to use `test_tube` logger which is not installed yet,"
" install it with `pip install test-tube`."
)
super().__init__()
self._save_dir = save_dir
self._name = name
self.description = description
self.debug = debug
self._version = version
self.create_git_tag = create_git_tag
self._log_graph = log_graph
self._prefix = prefix
self._experiment = None
@property
@rank_zero_experiment
def experiment(self) -> Experiment:
r"""
Actual TestTube object. To use TestTube features in your
:class:`~pytorch_lightning.core.lightning.LightningModule` do the following.
Example::
self.logger.experiment.some_test_tube_function()
"""
if self._experiment is not None:
return self._experiment
self._experiment = Experiment(
save_dir=self.save_dir,
name=self._name,
debug=self.debug,
version=self.version,
description=self.description,
create_git_tag=self.create_git_tag,
rank=rank_zero_only.rank,
)
return self._experiment
@rank_zero_only
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
# TODO: HACK figure out where this is being set to true
self.experiment.debug = self.debug
params = self._convert_params(params)
params = self._flatten_dict(params)
self.experiment.argparse(Namespace(**params))
@rank_zero_only
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
# TODO: HACK figure out where this is being set to true
metrics = self._add_prefix(metrics)
self.experiment.debug = self.debug
self.experiment.log(metrics, global_step=step)
@rank_zero_only
def log_graph(self, model: "pl.LightningModule", input_array=None):
if self._log_graph:
if input_array is None:
input_array = model.example_input_array
if input_array is not None:
self.experiment.add_graph(model, model._apply_batch_transfer_handler(input_array))
else:
rank_zero_warn(
"Could not log computational graph since neither the"
" `model.example_input_array` attribute is set nor"
" `input_array` was given",
UserWarning,
)
@rank_zero_only
def save(self) -> None:
super().save()
# TODO: HACK figure out where this is being set to true
self.experiment.debug = self.debug
self.experiment.save()
@rank_zero_only
def finalize(self, status: str) -> None:
super().finalize(status)
# TODO: HACK figure out where this is being set to true
self.experiment.debug = self.debug
self.save()
self.close()
@rank_zero_only
def close(self) -> None:
super().save()
# TODO: HACK figure out where this is being set to true
self.experiment.debug = self.debug
if not self.debug:
exp = self.experiment
exp.close()
@property
def save_dir(self) -> Optional[str]:
"""Gets the save directory.
Returns:
The path to the save directory.
"""
return self._save_dir
@property
def name(self) -> str:
"""Gets the experiment name.
Returns:
The experiment name if the experiment exists, else the name specified in the constructor.
"""
if self._experiment is None:
return self._name
return self.experiment.name
@property
def version(self) -> int:
"""Gets the experiment version.
Returns:
The experiment version if the experiment exists, else the next version.
"""
if self._experiment is None:
return self._version
return self.experiment.version
# Test tube experiments are not pickleable, so we need to override a few
# methods to get DDP working. See
# https://docs.python.org/3/library/pickle.html#handling-stateful-objects
# for more info.
def __getstate__(self) -> Dict[Any, Any]:
state = self.__dict__.copy()
state["_experiment"] = self.experiment.get_meta_copy()
return state
def __setstate__(self, state: Dict[Any, Any]):
self._experiment = state["_experiment"].get_non_ddp_exp()
del state["_experiment"]
self.__dict__.update(state)
| 34.043651
| 116
| 0.645064
|
16dfc5bba77aa0744e9e171e6d6ee27cc026a1c5
| 133
|
py
|
Python
|
geekshop/authapp/admin.py
|
tortilla1310/Django_shop
|
b61bea6a7f09eeb445321d4d3f508b1e8b88d18d
|
[
"MIT"
] | null | null | null |
geekshop/authapp/admin.py
|
tortilla1310/Django_shop
|
b61bea6a7f09eeb445321d4d3f508b1e8b88d18d
|
[
"MIT"
] | null | null | null |
geekshop/authapp/admin.py
|
tortilla1310/Django_shop
|
b61bea6a7f09eeb445321d4d3f508b1e8b88d18d
|
[
"MIT"
] | null | null | null |
import imp
from django.contrib import admin
from .models import ShopUser
# Register your models here.
admin.site.register(ShopUser)
| 19
| 32
| 0.81203
|
03f5d28d29e61dbb2ba1329ed2c53e9e35395401
| 35,319
|
py
|
Python
|
bilby/gw/result.py
|
tdalford1/bilby_relative_binning
|
a74123bf8ca5d66cbf07141b971d0ea32b71d9ec
|
[
"MIT"
] | null | null | null |
bilby/gw/result.py
|
tdalford1/bilby_relative_binning
|
a74123bf8ca5d66cbf07141b971d0ea32b71d9ec
|
[
"MIT"
] | null | null | null |
bilby/gw/result.py
|
tdalford1/bilby_relative_binning
|
a74123bf8ca5d66cbf07141b971d0ea32b71d9ec
|
[
"MIT"
] | null | null | null |
from __future__ import division
import json
import pickle
import os
import matplotlib.pyplot as plt
from matplotlib import rcParams
import numpy as np
from ..core.result import Result as CoreResult
from ..core.utils import (
infft, logger, check_directory_exists_and_if_not_mkdir,
latex_plot_format, safe_save_figure
)
from .utils import plot_spline_pos, spline_angle_xform, asd_from_freq_series
from .detector import get_empty_interferometer, Interferometer
class CompactBinaryCoalescenceResult(CoreResult):
def __init__(self, **kwargs):
super(CompactBinaryCoalescenceResult, self).__init__(**kwargs)
def __get_from_nested_meta_data(self, *keys):
dictionary = self.meta_data
try:
item = None
for k in keys:
item = dictionary[k]
dictionary = item
return item
except KeyError:
raise AttributeError(
"No information stored for {}".format('/'.join(keys)))
@property
def sampling_frequency(self):
""" Sampling frequency in Hertz"""
return self.__get_from_nested_meta_data(
'likelihood', 'sampling_frequency')
@property
def duration(self):
""" Duration in seconds """
return self.__get_from_nested_meta_data(
'likelihood', 'duration')
@property
def start_time(self):
""" Start time in seconds """
return self.__get_from_nested_meta_data(
'likelihood', 'start_time')
@property
def time_marginalization(self):
""" Boolean for if the likelihood used time marginalization """
return self.__get_from_nested_meta_data(
'likelihood', 'time_marginalization')
@property
def phase_marginalization(self):
""" Boolean for if the likelihood used phase marginalization """
return self.__get_from_nested_meta_data(
'likelihood', 'phase_marginalization')
@property
def distance_marginalization(self):
""" Boolean for if the likelihood used distance marginalization """
return self.__get_from_nested_meta_data(
'likelihood', 'distance_marginalization')
@property
def interferometers(self):
""" List of interferometer names """
return [name for name in self.__get_from_nested_meta_data(
'likelihood', 'interferometers')]
@property
def waveform_approximant(self):
""" String of the waveform approximant """
return self.__get_from_nested_meta_data(
'likelihood', 'waveform_arguments', 'waveform_approximant')
@property
def waveform_generator_class(self):
""" Dict of waveform arguments """
return self.__get_from_nested_meta_data(
'likelihood', 'waveform_generator_class')
@property
def waveform_arguments(self):
""" Dict of waveform arguments """
return self.__get_from_nested_meta_data(
'likelihood', 'waveform_arguments')
@property
def reference_frequency(self):
""" Float of the reference frequency """
return self.__get_from_nested_meta_data(
'likelihood', 'waveform_arguments', 'reference_frequency')
@property
def frequency_domain_source_model(self):
""" The frequency domain source model (function)"""
return self.__get_from_nested_meta_data(
'likelihood', 'frequency_domain_source_model')
@property
def parameter_conversion(self):
""" The frequency domain source model (function)"""
return self.__get_from_nested_meta_data(
'likelihood', 'parameter_conversion')
def detector_injection_properties(self, detector):
""" Returns a dictionary of the injection properties for each detector
The injection properties include the parameters injected, and
information about the signal to noise ratio (SNR) given the noise
properties.
Parameters
----------
detector: str [H1, L1, V1]
Detector name
Returns
-------
injection_properties: dict
A dictionary of the injection properties
"""
try:
return self.__get_from_nested_meta_data(
'likelihood', 'interferometers', detector)
except AttributeError:
logger.info("No injection for detector {}".format(detector))
return None
@latex_plot_format
def plot_calibration_posterior(self, level=.9, format="png"):
""" Plots the calibration amplitude and phase uncertainty.
Adapted from the LALInference version in bayespputils
Plot is saved to {self.outdir}/{self.label}_calibration.{format}
Parameters
----------
level: float
Quantile for confidence levels, default=0.9, i.e., 90% interval
format: str
Format to save the plot, default=png, options are png/pdf
"""
if format not in ["png", "pdf"]:
raise ValueError("Format should be one of png or pdf")
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(15, 15), dpi=500)
posterior = self.posterior
font_size = 32
outdir = self.outdir
parameters = posterior.keys()
ifos = np.unique([param.split('_')[1] for param in parameters if 'recalib_' in param])
if ifos.size == 0:
logger.info("No calibration parameters found. Aborting calibration plot.")
return
for ifo in ifos:
if ifo == 'H1':
color = 'r'
elif ifo == 'L1':
color = 'g'
elif ifo == 'V1':
color = 'm'
else:
color = 'c'
# Assume spline control frequencies are constant
freq_params = np.sort([param for param in parameters if
'recalib_{0}_frequency_'.format(ifo) in param])
logfreqs = np.log([posterior[param].iloc[0] for param in freq_params])
# Amplitude calibration model
plt.sca(ax1)
amp_params = np.sort([param for param in parameters if
'recalib_{0}_amplitude_'.format(ifo) in param])
if len(amp_params) > 0:
amplitude = 100 * np.column_stack([posterior[param] for param in amp_params])
plot_spline_pos(logfreqs, amplitude, color=color, level=level,
label="{0} (mean, {1}$\%$)".format(ifo.upper(), int(level * 100)))
# Phase calibration model
plt.sca(ax2)
phase_params = np.sort([param for param in parameters if
'recalib_{0}_phase_'.format(ifo) in param])
if len(phase_params) > 0:
phase = np.column_stack([posterior[param] for param in phase_params])
plot_spline_pos(logfreqs, phase, color=color, level=level,
label="{0} (mean, {1}$\%$)".format(ifo.upper(), int(level * 100)),
xform=spline_angle_xform)
ax1.tick_params(labelsize=.75 * font_size)
ax2.tick_params(labelsize=.75 * font_size)
plt.legend(loc='upper right', prop={'size': .75 * font_size}, framealpha=0.1)
ax1.set_xscale('log')
ax2.set_xscale('log')
ax2.set_xlabel('Frequency [Hz]', fontsize=font_size)
ax1.set_ylabel('Amplitude [$\%$]', fontsize=font_size)
ax2.set_ylabel('Phase [deg]', fontsize=font_size)
filename = os.path.join(outdir, self.label + '_calibration.' + format)
fig.tight_layout()
safe_save_figure(
fig=fig, filename=filename,
format=format, dpi=600, bbox_inches='tight'
)
logger.debug("Calibration figure saved to {}".format(filename))
plt.close()
def plot_waveform_posterior(
self, interferometers=None, level=0.9, n_samples=None,
format='png', start_time=None, end_time=None):
"""
Plot the posterior for the waveform in the frequency domain and
whitened time domain for all detectors.
If the strain data is passed that will be plotted.
If injection parameters can be found, the injection will be plotted.
Parameters
----------
interferometers: (list, bilby.gw.detector.InterferometerList, optional)
level: float, optional
symmetric confidence interval to show, default is 90%
n_samples: int, optional
number of samples to use to calculate the median/interval
default is all
format: str, optional
format to save the figure in, default is png
start_time: float, optional
the amount of time before merger to begin the time domain plot.
the merger time is defined as the mean of the geocenter time
posterior. Default is - 0.4
end_time: float, optional
the amount of time before merger to end the time domain plot.
the merger time is defined as the mean of the geocenter time
posterior. Default is 0.2
"""
if interferometers is None:
interferometers = self.interferometers
elif not isinstance(interferometers, list):
raise TypeError(
'interferometers must be a list or InterferometerList')
for ifo in interferometers:
self.plot_interferometer_waveform_posterior(
interferometer=ifo, level=level, n_samples=n_samples,
save=True, format=format, start_time=start_time,
end_time=end_time)
@latex_plot_format
def plot_interferometer_waveform_posterior(
self, interferometer, level=0.9, n_samples=None, save=True,
format='png', start_time=None, end_time=None):
"""
Plot the posterior for the waveform in the frequency domain and
whitened time domain.
If the strain data is passed that will be plotted.
If injection parameters can be found, the injection will be plotted.
Parameters
----------
interferometer: (str, bilby.gw.detector.interferometer.Interferometer)
detector to use, if an Interferometer object is passed the data
will be overlaid on the posterior
level: float, optional
symmetric confidence interval to show, default is 90%
n_samples: int, optional
number of samples to use to calculate the median/interval
default is all
save: bool, optional
whether to save the image, default=True
if False, figure handle is returned
format: str, optional
format to save the figure in, default is png
start_time: float, optional
the amount of time before merger to begin the time domain plot.
the merger time is defined as the mean of the geocenter time
posterior. Default is - 0.4
end_time: float, optional
the amount of time before merger to end the time domain plot.
the merger time is defined as the mean of the geocenter time
posterior. Default is 0.2
Returns
-------
fig: figure-handle, only is save=False
Notes
-----
To reduce the memory footprint we decimate the frequency domain
waveforms to have ~4000 entries. This should be sufficient for decent
resolution.
"""
DATA_COLOR = "#ff7f0e"
WAVEFORM_COLOR = "#1f77b4"
INJECTION_COLOR = "#000000"
if format == "html":
try:
import plotly.graph_objects as go
from plotly.offline import plot
from plotly.subplots import make_subplots
except ImportError:
logger.warning(
"HTML plotting requested, but plotly cannot be imported, "
"falling back to png format for waveform plot.")
format = "png"
if isinstance(interferometer, str):
interferometer = get_empty_interferometer(interferometer)
interferometer.set_strain_data_from_zero_noise(
sampling_frequency=self.sampling_frequency,
duration=self.duration, start_time=self.start_time)
PLOT_DATA = False
elif not isinstance(interferometer, Interferometer):
raise TypeError(
'interferometer must be either str or Interferometer')
else:
PLOT_DATA = True
logger.info("Generating waveform figure for {}".format(
interferometer.name))
if n_samples is None:
n_samples = len(self.posterior)
elif n_samples > len(self.posterior):
logger.debug(
"Requested more waveform samples ({}) than we have "
"posterior samples ({})!".format(
n_samples, len(self.posterior)
)
)
n_samples = len(self.posterior)
if start_time is None:
start_time = - 0.4
start_time = np.mean(self.posterior.geocent_time) + start_time
if end_time is None:
end_time = 0.2
end_time = np.mean(self.posterior.geocent_time) + end_time
if format == "html":
start_time = - np.inf
end_time = np.inf
time_idxs = (
(interferometer.time_array >= start_time) &
(interferometer.time_array <= end_time)
)
frequency_idxs = np.where(interferometer.frequency_mask)[0]
logger.debug("Frequency mask contains {} values".format(
len(frequency_idxs))
)
frequency_idxs = frequency_idxs[::max(1, len(frequency_idxs) // 4000)]
logger.debug("Downsampling frequency mask to {} values".format(
len(frequency_idxs))
)
plot_times = interferometer.time_array[time_idxs]
plot_times -= interferometer.strain_data.start_time
start_time -= interferometer.strain_data.start_time
end_time -= interferometer.strain_data.start_time
plot_frequencies = interferometer.frequency_array[frequency_idxs]
waveform_generator = self.waveform_generator_class(
duration=self.duration, sampling_frequency=self.sampling_frequency,
start_time=self.start_time,
frequency_domain_source_model=self.frequency_domain_source_model,
parameter_conversion=self.parameter_conversion,
waveform_arguments=self.waveform_arguments)
if format == "html":
fig = make_subplots(
rows=2, cols=1,
row_heights=[0.5, 0.5],
)
fig.update_layout(
template='plotly_white',
font=dict(
family="Computer Modern",
)
)
else:
old_font_size = rcParams["font.size"]
rcParams["font.size"] = 20
fig, axs = plt.subplots(
2, 1,
gridspec_kw=dict(height_ratios=[1.5, 1]),
figsize=(16, 12.5)
)
if PLOT_DATA:
if format == "html":
fig.add_trace(
go.Scatter(
x=plot_frequencies,
y=asd_from_freq_series(
interferometer.frequency_domain_strain[frequency_idxs],
1 / interferometer.strain_data.duration
),
fill=None,
mode='lines', line_color=DATA_COLOR,
opacity=0.5,
name="Data",
legendgroup='data',
),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(
x=plot_frequencies,
y=interferometer.amplitude_spectral_density_array[frequency_idxs],
fill=None,
mode='lines', line_color=DATA_COLOR,
opacity=0.8,
name="ASD",
legendgroup='asd',
),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(
x=plot_times,
y=infft(
interferometer.whitened_frequency_domain_strain *
np.sqrt(2. / interferometer.sampling_frequency),
sampling_frequency=interferometer.strain_data.sampling_frequency)[time_idxs],
fill=None,
mode='lines', line_color=DATA_COLOR,
opacity=0.5,
name="Data",
legendgroup='data',
showlegend=False,
),
row=2,
col=1,
)
else:
axs[0].loglog(
plot_frequencies,
asd_from_freq_series(
interferometer.frequency_domain_strain[frequency_idxs],
1 / interferometer.strain_data.duration),
color=DATA_COLOR, label='Data', alpha=0.3)
axs[0].loglog(
plot_frequencies,
interferometer.amplitude_spectral_density_array[frequency_idxs],
color=DATA_COLOR, label='ASD')
axs[1].plot(
plot_times, infft(
interferometer.whitened_frequency_domain_strain *
np.sqrt(2. / interferometer.sampling_frequency),
sampling_frequency=interferometer.strain_data.sampling_frequency)[time_idxs],
color=DATA_COLOR, alpha=0.3)
logger.debug('Plotted interferometer data.')
fd_waveforms = list()
td_waveforms = list()
for ii in range(n_samples):
params = dict(self.posterior.iloc[ii])
wf_pols = waveform_generator.frequency_domain_strain(params)
fd_waveform = interferometer.get_detector_response(wf_pols, params)
fd_waveforms.append(fd_waveform[frequency_idxs])
td_waveform = infft(
fd_waveform * np.sqrt(2. / interferometer.sampling_frequency) /
interferometer.amplitude_spectral_density_array,
self.sampling_frequency)[time_idxs]
td_waveforms.append(td_waveform)
fd_waveforms = asd_from_freq_series(
fd_waveforms,
1 / interferometer.strain_data.duration)
td_waveforms = np.array(td_waveforms)
delta = (1 + level) / 2
upper_percentile = delta * 100
lower_percentile = (1 - delta) * 100
logger.debug(
'Plotting posterior between the {} and {} percentiles'.format(
lower_percentile, upper_percentile
)
)
if format == "html":
fig.add_trace(
go.Scatter(
x=plot_frequencies, y=np.median(fd_waveforms, axis=0),
fill=None,
mode='lines', line_color=WAVEFORM_COLOR,
opacity=1,
name="Median reconstructed",
legendgroup='median',
),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(
x=plot_frequencies, y=np.percentile(fd_waveforms, lower_percentile, axis=0),
fill=None,
mode='lines',
line_color=WAVEFORM_COLOR,
opacity=0.1,
name="{:.2f}% credible interval".format(upper_percentile - lower_percentile),
legendgroup='uncertainty',
),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(
x=plot_frequencies, y=np.percentile(fd_waveforms, upper_percentile, axis=0),
fill='tonexty',
mode='lines',
line_color=WAVEFORM_COLOR,
opacity=0.1,
name="{:.2f}% credible interval".format(upper_percentile - lower_percentile),
legendgroup='uncertainty',
showlegend=False,
),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(
x=plot_times, y=np.median(td_waveforms, axis=0),
fill=None,
mode='lines', line_color=WAVEFORM_COLOR,
opacity=1,
name="Median reconstructed",
legendgroup='median',
showlegend=False,
),
row=2,
col=1,
)
fig.add_trace(
go.Scatter(
x=plot_times, y=np.percentile(td_waveforms, lower_percentile, axis=0),
fill=None,
mode='lines',
line_color=WAVEFORM_COLOR,
opacity=0.1,
name="{:.2f}% credible interval".format(upper_percentile - lower_percentile),
legendgroup='uncertainty',
showlegend=False,
),
row=2,
col=1,
)
fig.add_trace(
go.Scatter(
x=plot_times, y=np.percentile(td_waveforms, upper_percentile, axis=0),
fill='tonexty',
mode='lines',
line_color=WAVEFORM_COLOR,
opacity=0.1,
name="{:.2f}% credible interval".format(upper_percentile - lower_percentile),
legendgroup='uncertainty',
showlegend=False,
),
row=2,
col=1,
)
else:
lower_limit = np.mean(fd_waveforms, axis=0)[0] / 1e3
axs[0].loglog(
plot_frequencies,
np.mean(fd_waveforms, axis=0), color=WAVEFORM_COLOR, label='Mean reconstructed')
axs[0].fill_between(
plot_frequencies,
np.percentile(fd_waveforms, lower_percentile, axis=0),
np.percentile(fd_waveforms, upper_percentile, axis=0),
color=WAVEFORM_COLOR, label='{}\% credible interval'.format(
int(upper_percentile - lower_percentile)),
alpha=0.3)
axs[1].plot(
plot_times, np.mean(td_waveforms, axis=0),
color=WAVEFORM_COLOR)
axs[1].fill_between(
plot_times, np.percentile(
td_waveforms, lower_percentile, axis=0),
np.percentile(td_waveforms, upper_percentile, axis=0),
color=WAVEFORM_COLOR,
alpha=0.3)
if self.injection_parameters is not None:
try:
hf_inj = waveform_generator.frequency_domain_strain(
self.injection_parameters)
hf_inj_det = interferometer.get_detector_response(
hf_inj, self.injection_parameters)
ht_inj_det = infft(
hf_inj_det * np.sqrt(2. / interferometer.sampling_frequency) /
interferometer.amplitude_spectral_density_array,
self.sampling_frequency)[time_idxs]
if format == "html":
fig.add_trace(
go.Scatter(
x=plot_frequencies,
y=asd_from_freq_series(
hf_inj_det[frequency_idxs],
1 / interferometer.strain_data.duration),
fill=None,
mode='lines',
line=dict(color=INJECTION_COLOR, dash='dot'),
name="Injection",
legendgroup='injection',
),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(
x=plot_times, y=ht_inj_det,
fill=None,
mode='lines',
line=dict(color=INJECTION_COLOR, dash='dot'),
name="Injection",
legendgroup='injection',
showlegend=False,
),
row=2,
col=1,
)
else:
axs[0].loglog(
plot_frequencies,
asd_from_freq_series(
hf_inj_det[frequency_idxs],
1 / interferometer.strain_data.duration),
color=INJECTION_COLOR, label='Injection', linestyle=':')
axs[1].plot(
plot_times, ht_inj_det,
color=INJECTION_COLOR, linestyle=':')
logger.debug('Plotted injection.')
except IndexError as e:
logger.info('Failed to plot injection with message {}.'.format(e))
f_domain_x_label = "$f [\\mathrm{Hz}]$"
f_domain_y_label = "$\\mathrm{ASD} \\left[\\mathrm{Hz}^{-1/2}\\right]$"
t_domain_x_label = "$t - {} [s]$".format(interferometer.strain_data.start_time)
t_domain_y_label = "Whitened Strain"
if format == "html":
fig.update_xaxes(title_text=f_domain_x_label, type="log", row=1)
fig.update_yaxes(title_text=f_domain_y_label, type="log", row=1)
fig.update_xaxes(title_text=t_domain_x_label, type="linear", row=2)
fig.update_yaxes(title_text=t_domain_y_label, type="linear", row=2)
else:
axs[0].set_xlim(interferometer.minimum_frequency,
interferometer.maximum_frequency)
axs[1].set_xlim(start_time, end_time)
axs[0].set_ylim(lower_limit)
axs[0].set_xlabel(f_domain_x_label)
axs[0].set_ylabel(f_domain_y_label)
axs[1].set_xlabel(t_domain_x_label)
axs[1].set_ylabel(t_domain_y_label)
axs[0].legend(loc='lower left', ncol=2)
if save:
filename = os.path.join(
self.outdir,
self.label + '_{}_waveform.{}'.format(
interferometer.name, format))
if format == 'html':
plot(fig, filename=filename, include_mathjax='cdn', auto_open=False)
else:
plt.tight_layout()
safe_save_figure(
fig=fig, filename=filename,
format=format, dpi=600
)
plt.close()
logger.debug("Waveform figure saved to {}".format(filename))
rcParams["font.size"] = old_font_size
else:
rcParams["font.size"] = old_font_size
return fig
def plot_skymap(
self, maxpts=None, trials=5, jobs=1, enable_multiresolution=True,
objid=None, instruments=None, geo=False, dpi=600,
transparent=False, colorbar=False, contour=[50, 90],
annotate=True, cmap='cylon', load_pickle=False):
""" Generate a fits file and sky map from a result
Code adapted from ligo.skymap.tool.ligo_skymap_from_samples and
ligo.skymap.tool.plot_skymap. Note, the use of this additionally
required the installation of ligo.skymap.
Parameters
----------
maxpts: int
Maximum number of samples to use, if None all samples are used
trials: int
Number of trials at each clustering number
jobs: int
Number of multiple threads
enable_multiresolution: bool
Generate a multiresolution HEALPix map (default: True)
objid: str
Event ID to store in FITS header
instruments: str
Name of detectors
geo: bool
Plot in geographic coordinates (lat, lon) instead of RA, Dec
dpi: int
Resolution of figure in fots per inch
transparent: bool
Save image with transparent background
colorbar: bool
Show colorbar
contour: list
List of contour levels to use
annotate: bool
Annotate image with details
cmap: str
Name of the colormap to use
load_pickle: bool, str
If true, load the cached pickle file (default name), or the
pickle-file give as a path.
"""
try:
from astropy.time import Time
from ligo.skymap import io, version, plot, postprocess, bayestar, kde
import healpy as hp
except ImportError as e:
logger.info("Unable to generate skymap: error {}".format(e))
return
check_directory_exists_and_if_not_mkdir(self.outdir)
logger.info('Reading samples for skymap')
data = self.posterior
if maxpts is not None and maxpts < len(data):
logger.info('Taking random subsample of chain')
data = data.sample(maxpts)
default_obj_filename = os.path.join(self.outdir, '{}_skypost.obj'.format(self.label))
if load_pickle is False:
try:
pts = data[['ra', 'dec', 'luminosity_distance']].values
confidence_levels = kde.Clustered2Plus1DSkyKDE
distance = True
except KeyError:
logger.warning("The results file does not contain luminosity_distance")
pts = data[['ra', 'dec']].values
confidence_levels = kde.Clustered2DSkyKDE
distance = False
logger.info('Initialising skymap class')
skypost = confidence_levels(pts, trials=trials, jobs=jobs)
logger.info('Pickling skymap to {}'.format(default_obj_filename))
with open(default_obj_filename, 'wb') as out:
pickle.dump(skypost, out)
else:
if isinstance(load_pickle, str):
obj_filename = load_pickle
else:
obj_filename = default_obj_filename
logger.info('Reading from pickle {}'.format(obj_filename))
with open(obj_filename, 'rb') as file:
skypost = pickle.load(file)
skypost.jobs = jobs
distance = isinstance(skypost, kde.Clustered2Plus1DSkyKDE)
logger.info('Making skymap')
hpmap = skypost.as_healpix()
if not enable_multiresolution:
hpmap = bayestar.rasterize(hpmap)
hpmap.meta.update(io.fits.metadata_for_version_module(version))
hpmap.meta['creator'] = "bilby"
hpmap.meta['origin'] = 'LIGO/Virgo'
hpmap.meta['gps_creation_time'] = Time.now().gps
hpmap.meta['history'] = ""
if objid is not None:
hpmap.meta['objid'] = objid
if instruments:
hpmap.meta['instruments'] = instruments
if distance:
hpmap.meta['distmean'] = np.mean(data['luminosity_distance'])
hpmap.meta['diststd'] = np.std(data['luminosity_distance'])
try:
time = data['geocent_time']
hpmap.meta['gps_time'] = time.mean()
except KeyError:
logger.warning('Cannot determine the event time from geocent_time')
fits_filename = os.path.join(self.outdir, "{}_skymap.fits".format(self.label))
logger.info('Saving skymap fits-file to {}'.format(fits_filename))
io.write_sky_map(fits_filename, hpmap, nest=True)
skymap, metadata = io.fits.read_sky_map(fits_filename, nest=None)
nside = hp.npix2nside(len(skymap))
# Convert sky map from probability to probability per square degree.
deg2perpix = hp.nside2pixarea(nside, degrees=True)
probperdeg2 = skymap / deg2perpix
if geo:
obstime = Time(metadata['gps_time'], format='gps').utc.isot
ax = plt.axes(projection='geo degrees mollweide', obstime=obstime)
else:
ax = plt.axes(projection='astro hours mollweide')
ax.grid()
# Plot sky map.
vmax = probperdeg2.max()
img = ax.imshow_hpx(
(probperdeg2, 'ICRS'), nested=metadata['nest'], vmin=0., vmax=vmax,
cmap=cmap)
# Add colorbar.
if colorbar:
cb = plot.colorbar(img)
cb.set_label(r'prob. per deg$^2$')
if contour is not None:
confidence_levels = 100 * postprocess.find_greedy_credible_levels(skymap)
contours = ax.contour_hpx(
(confidence_levels, 'ICRS'), nested=metadata['nest'],
colors='k', linewidths=0.5, levels=contour)
fmt = r'%g\%%' if rcParams['text.usetex'] else '%g%%'
plt.clabel(contours, fmt=fmt, fontsize=6, inline=True)
# Add continents.
if geo:
geojson_filename = os.path.join(
os.path.dirname(plot.__file__), 'ne_simplified_coastline.json')
with open(geojson_filename, 'r') as geojson_file:
geoms = json.load(geojson_file)['geometries']
verts = [coord for geom in geoms
for coord in zip(*geom['coordinates'])]
plt.plot(*verts, color='0.5', linewidth=0.5,
transform=ax.get_transform('world'))
# Add a white outline to all text to make it stand out from the background.
plot.outline_text(ax)
if annotate:
text = []
try:
objid = metadata['objid']
except KeyError:
pass
else:
text.append('event ID: {}'.format(objid))
if contour:
pp = np.round(contour).astype(int)
ii = np.round(np.searchsorted(np.sort(confidence_levels), contour) *
deg2perpix).astype(int)
for i, p in zip(ii, pp):
text.append(
u'{:d}% area: {:d} deg$^2$'.format(p, i))
ax.text(1, 1, '\n'.join(text), transform=ax.transAxes, ha='right')
filename = os.path.join(self.outdir, "{}_skymap.png".format(self.label))
logger.info("Generating 2D projected skymap to {}".format(filename))
safe_save_figure(fig=plt.gcf(), filename=filename, dpi=dpi)
CBCResult = CompactBinaryCoalescenceResult
| 40.044218
| 105
| 0.549081
|
1491a1a60c94fd425b23434fc020d3623b6c265d
| 450
|
py
|
Python
|
src/test/utils/oob/test_dial.py
|
narnikgamarnikus/program-y
|
777b9a8a75ec787c037de9f11a8527875ff450b1
|
[
"MIT"
] | null | null | null |
src/test/utils/oob/test_dial.py
|
narnikgamarnikus/program-y
|
777b9a8a75ec787c037de9f11a8527875ff450b1
|
[
"MIT"
] | null | null | null |
src/test/utils/oob/test_dial.py
|
narnikgamarnikus/program-y
|
777b9a8a75ec787c037de9f11a8527875ff450b1
|
[
"MIT"
] | null | null | null |
import unittest
import xml.etree.ElementTree as ET
from programy.utils.oob.dial import DialOutOfBoundsProcessor
class DefaultOutOfBoundsProcessorTests(unittest.TestCase):
def test_processor(self):
oob_processor = DialOutOfBoundsProcessor()
self.assertIsNotNone(oob_processor)
oob_content = ET.fromstring("<dial>911</dial>")
self.assertEqual("", oob_processor.process_out_of_bounds(None, "console", oob_content))
| 37.5
| 95
| 0.766667
|
108e5ee48b8e327de1ceeab97de6866989ae0db6
| 1,467
|
py
|
Python
|
apps/emoter/models.py
|
iapain/smartwebapps
|
19dcd2440cab5675708ddc0c3e1dad037e9f90cb
|
[
"MIT"
] | 1
|
2018-10-31T03:12:30.000Z
|
2018-10-31T03:12:30.000Z
|
apps/emoter/models.py
|
iapain/smartwebapps
|
19dcd2440cab5675708ddc0c3e1dad037e9f90cb
|
[
"MIT"
] | null | null | null |
apps/emoter/models.py
|
iapain/smartwebapps
|
19dcd2440cab5675708ddc0c3e1dad037e9f90cb
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import post_save
from django.dispatch import receiver
from apps.ai.bayes import BayesianClassifier
from contrib.feedparser import feedparser
STATUS_DEFAULT = 'N'
STATUS_POSITIVE = 'H'
STATUS_NEGATIVE = 'S'
STATUS_CHOICES = (
(STATUS_DEFAULT, _("Neutral")),
(STATUS_POSITIVE, _("Positive")),
(STATUS_NEGATIVE, _("Negative"))
)
class SearchTerm(models.Model):
search = models.CharField(_("Search Term"), max_length=100)
def __unicode__(self):
return self.search
class Tweet(models.Model):
text = models.CharField(_("Message"), max_length=140)
polarity = models.CharField(_("Polarity"), choices=STATUS_CHOICES, max_length=1, default=STATUS_DEFAULT)
def save(self, *args, **kwargs):
if not self.id:
classifier = BayesianClassifier()
self.polarity = classifier.classify(self.text)
super(Tweet, self).save()
def __unicode__(self):
return self.text
@receiver(post_save, sender=SearchTerm)
def create_tweets(sender, instance, created, **kwargs):
"""Create a news items from source"""
#if created:
classifier = BayesianClassifier()
url = "feed://search.twitter.com/search.atom?q=" + instance.search
feed = feedparser.parse(url)
for item in feed.entries:
news = Tweet(text=item.summary)
news.save()
| 29.34
| 108
| 0.689162
|
422d6a5ffe6533e05489ac92f7738d2493453490
| 1,782
|
py
|
Python
|
cases/0011/0011.py
|
marcusrosenblatt/petab_test_suite
|
5b69c0dccf38867d47331303c01f5e935952f502
|
[
"BSD-3-Clause"
] | null | null | null |
cases/0011/0011.py
|
marcusrosenblatt/petab_test_suite
|
5b69c0dccf38867d47331303c01f5e935952f502
|
[
"BSD-3-Clause"
] | null | null | null |
cases/0011/0011.py
|
marcusrosenblatt/petab_test_suite
|
5b69c0dccf38867d47331303c01f5e935952f502
|
[
"BSD-3-Clause"
] | null | null | null |
from petabtests import *
from petab.C import *
import petab
import pandas as pd
test_id = 11
# problem --------------------------------------------------------------------
model = DEFAULT_MODEL_FILE
condition_df = pd.DataFrame(data={
CONDITION_ID: ['c0'],
'B': [2]
}).set_index([CONDITION_ID])
measurement_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a', 'obs_a'],
SIMULATION_CONDITION_ID: ['c0', 'c0'],
TIME: [0, 10],
MEASUREMENT: [0.7, 0.1]
})
observable_df = pd.DataFrame(data={
OBSERVABLE_ID: ['obs_a'],
OBSERVABLE_FORMULA: ['A'],
NOISE_FORMULA: [0.5]
}).set_index([OBSERVABLE_ID])
parameter_df = pd.DataFrame(data={
PARAMETER_ID: ['k1', 'k2'],
PARAMETER_SCALE: [LIN] * 2,
LOWER_BOUND: [0] * 2,
UPPER_BOUND: [10] * 2,
NOMINAL_VALUE: [0.8, 0.6],
ESTIMATE: [1] * 2,
}).set_index(PARAMETER_ID)
# write files
write_problem(test_id=test_id,
parameter_df=parameter_df,
condition_dfs=[condition_df],
observable_dfs=[observable_df],
measurement_dfs=[measurement_df],
sbml_files=['conversion_modified.xml'])
# solutions ------------------------------------------------------------------
simulation_df = measurement_df.copy(deep=True).rename(
columns={MEASUREMENT: SIMULATION})
simulation_df[SIMULATION] = [analytical_a(t, 1, 2, 0.8, 0.6)
for t in simulation_df[TIME]]
chi2 = petab.calculate_chi2(
measurement_df, simulation_df, observable_df, parameter_df)
llh = petab.calculate_llh(
measurement_df, simulation_df, observable_df, parameter_df)
print(llh)
# write files
write_solution(test_id=test_id,
chi2=chi2,
llh=llh,
simulation_dfs=[simulation_df])
| 25.457143
| 78
| 0.59596
|
91dde65e6f45b713ef63d2f77806ca0d80d7f86d
| 209
|
py
|
Python
|
Parte 02/Projeto 27.py
|
andrewyamagata/Python
|
ac9baf16cd142156829ec6e977ecfcac8a4e3965
|
[
"MIT"
] | null | null | null |
Parte 02/Projeto 27.py
|
andrewyamagata/Python
|
ac9baf16cd142156829ec6e977ecfcac8a4e3965
|
[
"MIT"
] | null | null | null |
Parte 02/Projeto 27.py
|
andrewyamagata/Python
|
ac9baf16cd142156829ec6e977ecfcac8a4e3965
|
[
"MIT"
] | null | null | null |
#Vários números com flag
c = s = 0
while True:
n = int(input("Digite um número: "))
if n == 999:
break
c += 1
s += n
print(f"Você digitou {c} números e a some entre eles é igual a {s}")
| 23.222222
| 68
| 0.564593
|
c7bae8c8b8699dfb4248b7763cb41c8fe2e3efa3
| 7,236
|
py
|
Python
|
model_zoo/official/gnn/gat/src/utils.py
|
GuoSuiming/mindspore
|
48afc4cfa53d970c0b20eedfb46e039db2a133d5
|
[
"Apache-2.0"
] | 77
|
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
model_zoo/official/gnn/gat/src/utils.py
|
forwhat461/mindspore
|
59a277756eb4faad9ac9afcc7fd526e8277d4994
|
[
"Apache-2.0"
] | 3
|
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
model_zoo/official/gnn/gat/src/utils.py
|
forwhat461/mindspore
|
59a277756eb4faad9ac9afcc7fd526e8277d4994
|
[
"Apache-2.0"
] | 24
|
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utils for training gat"""
from mindspore import nn
from mindspore.common.parameter import ParameterTuple
from mindspore import Tensor
from mindspore.common import dtype as mstype
from mindspore.ops import composite as C
from mindspore.ops import functional as F
from mindspore.ops import operations as P
class MaskedSoftMaxLoss(nn.Cell):
"""Calculate masked softmax loss with l2 loss"""
def __init__(self, num_class, label, mask, l2_coeff, params):
super(MaskedSoftMaxLoss, self).__init__()
self.num_class = num_class
self.label = label
self.mask = mask
self.softmax = P.SoftmaxCrossEntropyWithLogits()
self.reduce_mean = P.ReduceMean()
self.cast = P.Cast()
self.l2_coeff = l2_coeff
self.params = ParameterTuple(list(param for param in params if param.name[-4:] != 'bias'))
self.reduce_sum = P.ReduceSum()
self.num_params = len(self.params)
def construct(self, logits):
"""calc l2 loss"""
l2_loss = 0
for i in range(self.num_params):
l2_loss = l2_loss + self.l2_coeff * P.L2Loss()(self.params[i])
logits = P.Reshape()(logits, (-1, self.num_class))
label = P.Reshape()(self.label, (-1, self.num_class))
mask = P.Reshape()(self.mask, (-1,))
logits = self.cast(logits, mstype.float32)
loss = self.softmax(logits, label)[0]
mask /= self.reduce_mean(mask)
loss *= mask
loss = self.reduce_mean(loss)
l2_loss = P.Cast()(l2_loss, mstype.float32)
return loss+l2_loss
class MaskedAccuracy(nn.Cell):
"""Calculate accuracy with mask"""
def __init__(self, num_class, label, mask):
super(MaskedAccuracy, self).__init__()
self.argmax = P.Argmax(axis=1)
self.cast = P.Cast()
self.reduce_mean = P.ReduceMean()
self.equal = P.Equal()
self.num_class = num_class
self.label = Tensor(label, dtype=mstype.float32)
self.mask = Tensor(mask, dtype=mstype.float32)
def construct(self, logits):
"""Calculate accuracy"""
logits = P.Reshape()(logits, (-1, self.num_class))
labels = P.Reshape()(self.label, (-1, self.num_class))
mask = P.Reshape()(self.mask, (-1,))
labels = self.cast(labels, mstype.float32)
correct_prediction = self.equal(self.argmax(logits), self.argmax(labels))
accuracy_all = self.cast(correct_prediction, mstype.float32)
mask = self.cast(mask, mstype.float32)
mask /= self.reduce_mean(mask)
accuracy_all *= mask
return self.reduce_mean(accuracy_all)
class LossAccuracyWrapper(nn.Cell):
"""
Warp GAT model with loss calculation and accuracy calculation, loss is calculated with l2 loss.
Args:
network (Cell): GAT network with logits calculation as output.
num_class (int): num of class for classification.
label (numpy.ndarray): Train Dataset label.
mask (numpy.ndarray): Train Dataset mask.
l2_coeff (float): l2 loss discount rate.
"""
def __init__(self, network, num_class, label, mask, l2_coeff):
super(LossAccuracyWrapper, self).__init__()
self.network = network
label = Tensor(label, dtype=mstype.float32)
mask = Tensor(mask, dtype=mstype.float32)
self.loss_func = MaskedSoftMaxLoss(num_class, label, mask, l2_coeff, self.network.trainable_params())
self.acc_func = MaskedAccuracy(num_class, label, mask)
def construct(self, feature, biases):
logits = self.network(feature, biases, training=False)
loss = self.loss_func(logits)
accuracy = self.acc_func(logits)
return loss, accuracy
class LossNetWrapper(nn.Cell):
"""Wrap GAT model with loss calculation"""
def __init__(self, network, num_class, label, mask, l2_coeff):
super(LossNetWrapper, self).__init__()
self.network = network
label = Tensor(label, dtype=mstype.float32)
mask = Tensor(mask, dtype=mstype.float32)
params = list(param for param in self.network.trainable_params() if param.name[-4:] != 'bias')
self.loss_func = MaskedSoftMaxLoss(num_class, label, mask, l2_coeff, params)
def construct(self, feature, biases):
logits = self.network(feature, biases)
loss = self.loss_func(logits)
return loss
class TrainOneStepCell(nn.Cell):
"""
For network training. Warp the loss net with optimizer.
Args:
network (Cell): GAT network with loss calculation as the output.
optimizer (Cell): Optimizer for minimize the loss.
sens (Float): Backpropagation input number, default 1.0.
"""
def __init__(self, network, optimizer, sens=1.0):
super(TrainOneStepCell, self).__init__(auto_prefix=True)
self.network = network
self.network.set_grad()
self.network.add_flags(defer_inline=True)
self.weights = ParameterTuple(network.trainable_params())
self.optimizer = optimizer
self.grad = C.GradOperation(get_by_list=True, sens_param=True)
self.sens = sens
def construct(self, feature, biases):
weights = self.weights
loss = self.network(feature, biases)
sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens)
grads = self.grad(self.network, weights)(feature, biases, sens)
return F.depend(loss, self.optimizer(grads))
class TrainGAT(nn.Cell):
"""
Warp GAT model with everything needed for training, include loss, optimizer ,etc.
Args:
network (Cell): GAT network.
num_class (int): num of class for classification.
label (numpy.ndarray): Train Dataset label.
mask (numpy.ndarray): Train Dataset mask.
learning_rate (float): Learning rate.
l2_coeff (float): l2 loss discount rate.
"""
def __init__(self, network, num_class, label, mask, learning_rate, l2_coeff):
super(TrainGAT, self).__init__(auto_prefix=False)
self.network = network
loss_net = LossNetWrapper(network, num_class, label, mask, l2_coeff)
optimizer = nn.Adam(loss_net.trainable_params(),
learning_rate=learning_rate)
self.loss_train_net = TrainOneStepCell(loss_net, optimizer)
self.accuracy_func = MaskedAccuracy(num_class, label, mask)
def construct(self, feature, biases):
loss = self.loss_train_net(feature, biases)
accuracy = self.accuracy_func(self.network(feature, biases))
return loss, accuracy
| 39.977901
| 109
| 0.657546
|
2e53c201d20277f6cde46f396117735ff128d431
| 520
|
py
|
Python
|
services/web/src/extensions.py
|
dmenezesgabriel/flask-notes
|
6a18f23e841b7d8ff641c0840f4cb9814dbcccb7
|
[
"MIT"
] | null | null | null |
services/web/src/extensions.py
|
dmenezesgabriel/flask-notes
|
6a18f23e841b7d8ff641c0840f4cb9814dbcccb7
|
[
"MIT"
] | null | null | null |
services/web/src/extensions.py
|
dmenezesgabriel/flask-notes
|
6a18f23e841b7d8ff641c0840f4cb9814dbcccb7
|
[
"MIT"
] | null | null | null |
from flask_moment import Moment
from flask_babel import Babel
from flask_babel import lazy_gettext as _l
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_mail import Mail
babel = Babel()
db = SQLAlchemy()
mail = Mail()
migrate = Migrate()
moment = Moment()
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.login_message_category = 'info'
login_manager.login_message = _l('Please log in to access this page.')
| 27.368421
| 70
| 0.809615
|
9029a57e3da710216edd616020bf0c3743de98b8
| 1,640
|
py
|
Python
|
printing.py
|
zimolzak/wav-in-python
|
dcbfa0a126299f29a223b9078f4f8a9ccfd6456f
|
[
"MIT"
] | null | null | null |
printing.py
|
zimolzak/wav-in-python
|
dcbfa0a126299f29a223b9078f4f8a9ccfd6456f
|
[
"MIT"
] | null | null | null |
printing.py
|
zimolzak/wav-in-python
|
dcbfa0a126299f29a223b9078f4f8a9ccfd6456f
|
[
"MIT"
] | null | null | null |
from typing import Generator
def pretty_hex_string(hs: str, bytes_space: int = 2, bytes_newline: int = 16) -> Generator[str, None, None]:
"""Prepare hexadecimal text for easier reading.
"abcdefgh" -> ['a', 'b', 'c', 'd ', 'e', 'f', 'g', 'h ']
Note the spaces. Often you do ''.join(list()) to get this:
'abcd efgh '
:param hs: Any string. Usually hexadecimal letters/numbers.
:param bytes_newline: How many bytes until insert newline
:param bytes_space: How many bytes until insert space
:return: Yield a stream of chars with spaces and newlines added every so often.
"""
characters_per_space = bytes_space * 2 # chars per space (4)
characters_per_newline = bytes_newline * 2 # chars per newline (32)
for n, c in enumerate(hs):
# every 16 bytes add a newline
if n % characters_per_newline == characters_per_newline - 1:
yield c + "\n"
elif n % characters_per_space == characters_per_space - 1:
# Every 4 char (2 bytes), add a space.
yield c + " "
else:
yield c
def ints2dots(ints: list, max_int: int = 65535, max_spaces: int = 75) -> Generator[str, None, None]:
"""Prepare a text bar graph of numeric data. Usually a few values of WAV file samples. If they look a bit like a
sine wave, we probably decoded them properly.
list(ints2dots([1000,2000,4000])) ->
['.X', '..X', '....X']
:param max_spaces:
:param max_int:
:param ints: List of numbers. Negative means no
"""
for x in ints:
n_spaces = int(x / max_int * max_spaces)
yield '.' * n_spaces + 'X'
| 39.047619
| 116
| 0.62622
|
a3bc61f0a8e8557bb2af4d102059f75449017aa0
| 22,703
|
py
|
Python
|
cxs/config/lib_ini_files.py
|
ajvazquez/CXS338
|
fca77807073f75670f2e7a99011dc198cc7c9829
|
[
"MIT"
] | 1
|
2021-11-05T02:38:45.000Z
|
2021-11-05T02:38:45.000Z
|
cxs/config/lib_ini_files.py
|
ajvazquez/CXS338
|
fca77807073f75670f2e7a99011dc198cc7c9829
|
[
"MIT"
] | null | null | null |
cxs/config/lib_ini_files.py
|
ajvazquez/CXS338
|
fca77807073f75670f2e7a99011dc198cc7c9829
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
#!/usr/bin/env python
#
#The MIT CorrelX Correlator
#
#https://github.com/MITHaystack/CorrelX
#Contact: correlX@haystack.mit.edu
#Project leads: Victor Pankratius, Pedro Elosegui Project developer: A.J. Vazquez Alvarez
#
#Copyright 2017 MIT Haystack Observatory
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#
#------------------------------
#------------------------------
#Project: CorrelX.
#File: lib_ini_files.py.
#Author: A.J. Vazquez Alvarez (ajvazquez@haystack.mit.edu)
#Description:
"""
Routines for generating/accessing the .ini files with the configuration of the scenario for the correlation.
Notes
-----
Parameters and values are case sensitive!
| Experiment files:
| Every experiment requires the following files:
| correlation.ini: configuration of the correlation (start and end time, FFT length, etc.)
| stations.ini: information about stations clocks (polynomials).
| sources.ini: information about sources.
| media.ini: information about media (input file names, formats, etc)
| delay_model.ini: information aobut delay model (polynomials).
|
| Additionally, the following file is generated during initialization:
| delays.ini: re-processed polynomials for delay model.
Regarding initialization files
------------------------------
Initialization files follow this format:
|
| [section]
| param: value
"""
#History:
#initial version: 2015.12 ajva
#MIT Haystack Observatory
from __future__ import print_function
import numpy as np
import os
if os.environ.get("is_legacy"):
from const_ini_files import *
else:
from cxs.config.const_ini_files import *
try:
import configparser
except ImportError:
import ConfigParser as configparser
def get_seconds_ini_str(seconds):
return "{:.3f}".format(seconds)
def serialize_config(sources_file='media.ini'):
"""
Converts configuration file into string.
Parameters
----------
sources_file : str
name of initialization [.ini] file (e.g.: media.ini, stations.ini, delays.ini, etc.)
Returns
-------
serial_str : str
serialized contents of initialization file.
Notes
-----
| **Format configuration:**
|
| See const_ini_files.py:
| SEPARATOR_ELEMENTS
| SEPARATOR_VECTOR
| SEP_VALUES
|
|
| **Notes:
|
| Avoid use of reserved separators from list of file if existing
| E.g.: list of files separated with commas
| file1,file2 -> file1:file2
|
|
| **Example:**
|
| >>> serial_str=serialize_config()
| >>> print(serial_str)
| VF-0.vt,polarizations/L:R:L:R,station/At,channels/0:0:1:1;VF-1.vt,polarizations/L:R:L:R,station/At,channels/0:0:1:1
"""
s = configparser.ConfigParser()
s.optionxform=str
s.read(sources_file)
serial_str=""
for section in s.sections():
serial_str+=section
for (each_key, each_val) in s.items(section):
each_val_mod=""
for i in each_val:
if (i==SEPARATOR_ELEMENTS)or(i==SEPARATOR_VECTOR):
each_val_mod+=SEP_VALUES
else:
each_val_mod+=i
serial_str+= SEPARATOR_ELEMENTS + each_key + SEPARATOR_PARAM_VAL + each_val_mod
serial_str+= SEPARATOR_VECTOR
serial_str=serial_str[:-1]
return(serial_str)
def serial_params_to_array(read_str=""):
"""
Converts string with serialized configuration file into array.
Parameters
----------
read_str : str
serialized configuration [created with serialize_config()].
Returns
-------
files_param : list of lists
list of lists based on serialized configuration.
Notes
-----
|
| **Example:**
|
| >>> params_array=serial_params_to_array(serial_str)
| >>> print(params_array)
| [['VF-0.vt', 'polarizations/L:R:L:R', 'station/At', 'channels/0:0:1:1'], ['VF-1.vt', 'polarizations/L:R:L:R', 'station/At', 'channels/0:0:1:1']]
"""
read_split = read_str.split(SEPARATOR_VECTOR)
files_param =[]
for i in read_split:
files_param += [i.split(SEPARATOR_ELEMENTS)]
return(files_param)
def get_param_serial(params_array,section,param):
"""
Retrieves value given an array with parameters, the filename and the parameter.
Parameters
----------
params_array : list
configuration [created with serial_params_to_array()].
section : str
section to be looked up.
param : str
parameter to be looked up.
Returns
-------
value : str
value corresponding to requested section and param.
Notes
-----
|
| **Example:**
|
| >>> value = get_param_serial(params_array,'VF-0.vt','channels')
| >>> print(value)
| 0:0:1:1
"""
if 1==1:
will_break=0
value=""
for vector in params_array:
if will_break==1:
break
if vector[0]==section:
for i in vector[1:]:
if will_break==1:
break
if param in i:
value=i.split(SEPARATOR_PARAM_VAL)[1]
will_break=1
else:
value=""
id_section = list(zip(*params_array))[0].index(section)
for i in params_array[id_section][1:]:
if param in i:
value=i.split(SEPARATOR_PARAM_VAL)[1]
return(value)
def get_param_total(params_array,section,param,separator_values=SEP_VALUES):
"""
Returns the number of different values for a parameter.
This is e.g. for getting the number of polarizations in a media.ini file for a station.
Parameters
----------
params_array : list
configuration [created with serial_params_to_array()].
section : str
section to be looked up.
param : str
parameter to be looked up.
separator_values
separator for values [SEP_VALUES from const_ini_files.py by default].
Returns
-------
total : int
number of different values separated by separator_vaules.
Notes
-----
|
| **Example:**
|
| >>> tot = get_param_total(params_array,'VF-0.vt','Channels')
| >>> print(tot)
| 2
"""
value = get_param_serial(params_array,section,param)
total = len(set(value.split(SEP_VALUES)))
return(total)
def get_param_eq_vector(params_array,section,param,separator_values=SEP_VALUES,modein="int"):
"""
Returns the vector with the mapped values for the specified parameters.
Parameters
----------
params_array : list
configuration [created with serial_params_to_array()].
section : str
section to be looked up.
param : str
parameter to be looked up.
separator_values
separator for values [SEP_VALUES from const_ini_files.py by default].
modein : str
selector of format for output:
| "int" : convert values in output list to integer.
| else: return output list as is (strings).
Returns
-------
eq_vector
equivalent vector with indices reported at the first sections of the ini file.
Notes
-----
|
| **Example:**
|
| Given the file media.ini:
| [channels]
| CH2 = 1
| CH1 = 0
| [polarizations]
| L = 0
| R = 1
| [VDF-0.vt]
| polarizations = L:R:L:R
| channels = CH1:CH1:CH2:CH2
| station = At
| ...
| >>>params_array=serial_params_to_array(serialize_config(sources_file='media.ini'))
| >>>eq_vector=get_param_eq_vector(params_array,'VDF-0.vt','polarizations')
| >>>print(eq_vector)
| [0, 1, 0, 1]
"""
value = get_param_serial(params_array,section,param)
values = value.split(SEP_VALUES)
eq_vector=[]
if modein=="int":
for i in values:
eq_vector+=[int(get_param_serial(params_array,param,i))]
else:
for i in values:
eq_vector+=[get_param_serial(params_array,param,i)]
return(eq_vector)
#def get_param_eq_vector_list(params_array,section,param,separator_values=SEP_VALUES):
# """
# Returns the vector with the mapped values for the specified parameters for separated elements.
#
# Example:
# --------
# Given the file media.ini:
# [channels]
# P15 = 4,5
#
#
# """
# value = get_param_serial(params_array,section,param)
# values = value.split(SEP_VALUES)
# eq_vector=[]
# for i in values:
# eq_vector+=[list(map(int,get_val_vector(params_array,param,i)))]
# return(eq_vector)
def get_val_vector(params_array,section,param):
"""
Returns vector with list of values.
Parameters
----------
params_array : list
configuration [created with serial_params_to_array()].
section : str
string with section to be looked up.
param : str
parameter to be looked up.
Notes
-----
|
| **Example:**
| Given the file media.ini:
| ...
| [file1]
| sidebands = U:U:U:U
| ...
| >>>>sidebands=get_val_vector(params_media,"file1",C_INI_MEDIA_SIDEBANDS)
| >>>>print(sidebands)
| ['U', 'U', 'U', 'U']
"""
value = get_param_serial(params_array,section,param)
values = value.split(SEP_VALUES)
return(values)
def get_all_params_serial(params_array,section):
"""
Retrieves list with all the parameters for a section.
Parameters
----------
params_array : list
list with configuration [created with serial_params_to_array()].
section : str
section to be looked up.
Returns
-------
values : str
lists of strings corresponding to the parameters in the section.
"""
values=[]
for vector in params_array:
if vector[0]==section:
for i in vector[1:]:
values+=[i.split(SEPARATOR_PARAM_VAL)[0]]
return(values)
def get_all_values_serial(params_array,param):
"""
Retrieves list with all the values of a paramter for all the section.
Parameters
----------
params_array : list
configuration [created with serial_params_to_array()].
param : str
parameter to be looked up through all sections.
Returns
-------
values
list with strings with all the values corresponding to the requested paramter.
"""
values=[]
for vector in params_array:
for i in vector[1:]:
if param in i:
values+=[i.split(SEPARATOR_PARAM_VAL)[1]]
return(values)
def get_all_sections(params_array):
"""
Retrieves list with all the section names.
Parameters
----------
params_array
list with configuration [created with serial_params_to_array()].
Returns
-------
sections
list of strings with the names of all the sections in the ini file.
"""
sections=[]
for vector in params_array:
sections.append(vector[0])
return(sections)
##################################################################
#
# Specific routines
#
##################################################################
# Specific for delay model
def get_pair_st_so(st_id,so_id):
"""
Get string st<st_id>-so<so_id> (e.g. st0-so0).
Parameters
----------
st_id : int
station id.
so_id : int
source id.
Returns
-------
pair_st_so : str
output.
"""
return("st"+str(st_id)+"-"+"so"+str(so_id))
def get_section_delay_model(params_array,mjd_in,seconds,source_id_in,station_id_in,v=0):
"""
Get section from delay model ini file.
Parameters
----------
params_array : list
information from delay model ini file (see lib_ini_files.py).
mjd_in
MJD for the start of the experiment.
seconds
number of seconds for the start of the experiment.
source_id_in : int
source identifier in sources ini file.
station_id_in : int
stations identifier in stations ini file.
v : int
0 by default, 1 for verbose mode.
Returns
-------
section_found : str
section found in the delay model ini file, otherwise "".
start_s : int
seconds for this section.
Notes
-----
|
| **TO DO:**
|
| Move to lib_ini_files.py
"""
section_found = ""
sections = get_all_sections(params_array)
start_s=0
for i in sections:
if v==1:
print(i)
[mjd_str,start_str,end_str,so_str,st_str]=i.split('-')
mjd=int(mjd_str)
start_s=int(start_str)
end_s=int(end_str)
so_id=int(so_str[2:])
st_id=int(st_str[2:])
if (mjd_in==mjd)and(start_s<=seconds)and(seconds<end_s)and(source_id_in==so_id)and(station_id_in==st_id):
section_found=i
break
return([section_found,start_s])
def get_vector_delay_ref(vector_params_delay):
"""
Get seconds corresponding to elements in delays.ini.
Parameters
----------
vector_params_delay : str
serialized representation of the delay model ini file.
Returns
-------
sv : list of floats
seconds for delay information (start time polynomials).
"""
sv=[]
for i in vector_params_delay:
if DELAY_MODEL_REL_MARKER in i[:len(DELAY_MODEL_REL_MARKER)]:
#sv+=[int(i.split(DELAY_MODEL_REL_MARKER)[1])]
sv+=[float(i.split(DELAY_MODEL_REL_MARKER)[1])]
sv=np.array(sv)
return(sv)
def find_nearest_seconds(vector_seconds_ref,seconds_fr):
"""
Find second which is nearest to seconds_fr in delay param vector. In other words, find the timestamp
(start time) of the accumulation period to which the timestamp of the frame corresponds.
Parameters
----------
vector_seconds_ref : list of floats
seconds for delay information (start time polynomials).
seconds_fr
seconds to be searched for in the previous item.
Returns
-------
seconds_out
highest value lesser than the input number of seconds, or -1 if not found
"""
sv = vector_seconds_ref # get_vector_delay_ref(vector_params_delay)
sv_sub=np.array([seconds_fr]*len(sv))
difference=np.subtract(sv,sv_sub)
if np.abs(difference) is None or np.abs(difference).size==0:
# TO DO: internal log? if so argument to function
#if INTERNAL_LOG==1:
#print("zM"+KEY_SEP+"Failed find_nearest="+str(seconds_fr)+ " in ("+','.join(vector_params_delay)+")")
if DEBUG_GENERAL_M:
print("zM\tFailed find_nearest="+str(seconds_fr)+ " in ("+','.join(vector_params_delay)+")")
seconds_out = -1
else:
seconds_out = sv[np.argmin(np.abs(difference))]
return(seconds_out)
#def get_rates_delays(seconds_fr_nearest,pair_st_so,params_delays,cache_rates=[]):
def get_rates_cache(seconds_fr_nearest,pair_st_so,params_delays,cache_rates=[]):
"""
Get rates from delays.ini. It allows to keep a cache (currently only one element) to
reduce the number of look-ups.
"""
found=None
if cache_rates!=[]:
#i = cache_rates[0]
if seconds_fr_nearest==cache_rates[0] and pair_st_so==cache_rates[1]:
#found=i[2][0]
found=0
#found = check_delay_cache(seconds_fr_nearest,pair_st_so,cache_rates)
if found is not None:
[rate_delay,ref_delay,abs_delay,delay]= cache_rates[2][0]#found_rates
else:
str_seconds = get_seconds_ini_str(seconds_fr_nearest)
#if VERBOSE_INI_DELAYS:
rr0_epoch=DELAY_MODEL_RR0_MARKER+str_seconds
rr1_epoch=DELAY_MODEL_RR1_MARKER+str_seconds
rr2_epoch=DELAY_MODEL_RR2_MARKER+str_seconds
rrr_epoch=DELAY_MODEL_RRR_MARKER+str_seconds
rc0_epoch=DELAY_MODEL_RC0_MARKER+str_seconds
rc1_epoch=DELAY_MODEL_RC1_MARKER+str_seconds
zc0_epoch=DELAY_MODEL_ZC0_MARKER+str_seconds
zc1_epoch=DELAY_MODEL_ZC1_MARKER+str_seconds
rcr_epoch=DELAY_MODEL_RCR_MARKER+str_seconds
rcm_epoch=DELAY_MODEL_RCM_MARKER+str_seconds
rcc_epoch=DELAY_MODEL_RCC_MARKER+str_seconds
ddd_epoch=DELAY_MODEL_DDD_MARKER+str_seconds
try:
rate_delay = [float(get_param_serial(params_delays,pair_st_so,rr0_epoch)),\
float(get_param_serial(params_delays,pair_st_so,rr1_epoch)),\
float(get_param_serial(params_delays,pair_st_so,rr2_epoch)),\
float(get_param_serial(params_delays,pair_st_so,rrr_epoch)),\
float(get_param_serial(params_delays,pair_st_so,rc0_epoch)),\
float(get_param_serial(params_delays,pair_st_so,rc1_epoch)),\
float(get_param_serial(params_delays,pair_st_so,zc0_epoch)),\
float(get_param_serial(params_delays,pair_st_so,zc1_epoch)),\
float(get_param_serial(params_delays,pair_st_so,rcr_epoch)),\
float(get_param_serial(params_delays,pair_st_so,rcm_epoch)),\
float(get_param_serial(params_delays,pair_st_so,rcc_epoch)),\
float(get_param_serial(params_delays,pair_st_so,ddd_epoch))]
except:
print(params_delays)
print(pair_st_so)
print(rr0_epoch)
print("==")
print("==")
print("==")
print("==")
print("==")
raise
rel_epoch=DELAY_MODEL_REL_MARKER+str_seconds
delay = float(get_param_serial(params_delays,pair_st_so,rel_epoch))
f_epoch=DELAY_MODEL_REF_MARKER+str_seconds
ref_delay=float(get_param_serial(params_delays,pair_st_so,f_epoch))
abs_epoch=DELAY_MODEL_ABS_MARKER+str_seconds
abs_delay = float(get_param_serial(params_delays,pair_st_so,abs_epoch))
#else:
# di_epoch=DELAY_MODEL_DI_MARKER+str(seconds_fr_nearest)
# rate_delay=map(float,get_param_serial(params_delays,pair_st_so,di_epoch).split(SEP_VALUES))
#if rate_delay!=rate_delay2:
# print("Diff")
new_rates = [rate_delay,ref_delay,abs_delay,delay]
# Simply store last element
#cache_rates=[]
#cache_rates.append([seconds_fr_nearest,pair_st_so,[new_rates]])
cache_rates = [seconds_fr_nearest,pair_st_so,[new_rates]]
rates_out = [rate_delay,ref_delay,abs_delay,cache_rates]
#delays_out = [delay,cache_rates]
#return([rates_out,delays_out])
return(rates_out)
# [rates_out,delays_out] = get_rates_delays(seconds_fr_nearest,pair_st_so,params_delays,cache_rates)
# return(rates_out)
def get_delay_cache(seconds_fr_nearest,pair_st_so,params_delays,cache_delays=[]):
"""
Get the delays from the cache.
"""
#print("get: "+str(seconds_fr_nearest))
# First try to find in delay cache, then complete cache, then fetch from ini info.
found = None
#if 1==0: #cache_delays!=[]:
if cache_delays!=[]:
#i = cache_rates[0]
if seconds_fr_nearest==cache_delays[0] and pair_st_so==cache_delays[1]:
delay_new=cache_delays[2]
found=1
if found is None:
#[rates_out,delays_out] = get_rates_delays(seconds_fr_nearest,pair_st_so,params_delays,cache_rates)
sec = get_seconds_ini_str(seconds_fr_nearest)
rel_epoch=DELAY_MODEL_REL_MARKER+sec
delay = float(get_param_serial(params_delays,pair_st_so,rel_epoch))
cache_delays = [seconds_fr_nearest,pair_st_so,delay]
delays_out= [delay,cache_delays]
else:
#cache_delays = [seconds_fr_nearest,pair_st_so,delay_new]
delays_out=[delay_new,cache_delays]
#print(delays_out)
return(delays_out)
def extract_data_media(params_media, current_file_name):
forced_frame_length = int(get_param_serial(params_media, current_file_name, C_INI_MEDIA_FRAMEBYTES))
forced_format = get_param_serial(params_media, current_file_name, C_INI_MEDIA_FORMAT)
forced_version = get_param_serial(params_media, current_file_name, C_INI_MEDIA_VERSION)
tot_pols = get_param_total(params_media, current_file_name, C_INI_MEDIA_POLARIZATIONS)
pols_assoc_vector = [int(val) for val in
get_param_eq_vector(params_media, current_file_name, C_INI_MEDIA_POLARIZATIONS)]
channels_assoc_vector = [int(val) for val in get_param_eq_vector(params_media, current_file_name, C_INI_MEDIA_CHANNELS)]
freqs_assoc_vector = [float(val) for val in
get_param_eq_vector(params_media, current_file_name, C_INI_MEDIA_FREQUENCIES, modein="str")]
sidebands_assoc_vector = get_val_vector(params_media, current_file_name, C_INI_MEDIA_SIDEBANDS)
freq_sample_in = int(float(get_param_serial(params_media, current_file_name, C_INI_MEDIA_FREQ_SAMPLE)))
station_name = get_param_serial(params_media, current_file_name, C_INI_MEDIA_STATION)
return [forced_frame_length, forced_format, forced_version, tot_pols, pols_assoc_vector, \
channels_assoc_vector, freqs_assoc_vector, sidebands_assoc_vector, freq_sample_in, station_name]
| 31.619777
| 461
| 0.634277
|
d28c60a24870f65453ba47f4bbe3fe82a71acdb9
| 1,222
|
py
|
Python
|
allauth/account/urls.py
|
sssbox/django-allauth
|
421ca0b6767ed9368775a722762ac5f9bebb9689
|
[
"MIT"
] | null | null | null |
allauth/account/urls.py
|
sssbox/django-allauth
|
421ca0b6767ed9368775a722762ac5f9bebb9689
|
[
"MIT"
] | null | null | null |
allauth/account/urls.py
|
sssbox/django-allauth
|
421ca0b6767ed9368775a722762ac5f9bebb9689
|
[
"MIT"
] | null | null | null |
from django.conf.urls.defaults import patterns, url
import views
urlpatterns = patterns("",
url(r"^email/$", views.email, name="account_email"),
url(r"^signup/$", views.signup, name="account_signup"),
url(r"^login/$", views.login, name="account_login"),
url(r"^password/change/$", views.password_change, name="account_change_password"),
url(r"^password/set/$", views.password_set, name="account_set_password"),
# url(r"^password_delete/$", views.password_delete, name="acct_passwd_delete"),
# url(r"^password_delete/done/$", "django.views.generic.simple.direct_to_template", {
# "template": "account/password_delete_done.html",
# }, name="acct_passwd_delete_done"),
url(r"^logout/$", views.logout, name="account_logout"),
url(r"^confirm_email/(?P<key>\w+)/$", views.confirm_email, name="account_confirm_email"),
# password reset
url(r"^password/reset/$", views.password_reset, name="account_reset_password"),
url(r"^password/reset/done/$", views.password_reset_done, name="account_reset_password_done"),
url(r"^password/reset/key/(?P<uidb36>[0-9A-Za-z]+)-(?P<key>.+)/$", views.password_reset_from_key, name="account_reset_password_from_key"),
)
| 48.88
| 142
| 0.697218
|
2b6f4765b3b6bfff847a5a79bdccc013522a23f7
| 390
|
py
|
Python
|
observable.py
|
NonExistentUsername/explosive-kittens-bot
|
d96fd068bb1b11b0f2409f85c59c20fa68593c69
|
[
"Apache-2.0"
] | null | null | null |
observable.py
|
NonExistentUsername/explosive-kittens-bot
|
d96fd068bb1b11b0f2409f85c59c20fa68593c69
|
[
"Apache-2.0"
] | null | null | null |
observable.py
|
NonExistentUsername/explosive-kittens-bot
|
d96fd068bb1b11b0f2409f85c59c20fa68593c69
|
[
"Apache-2.0"
] | null | null | null |
class Observable:
def _notify(self, notification, **data):
for observer in self.__observers:
observer.notify(notification, **data)
def __init__(self):
self.__observers = []
def add_observer(self, observer):
self.__observers.append(observer)
def remove_observer(self, observer):
self.__observers.remove(observer)
| 26
| 50
| 0.635897
|
ec20869b129f14e0f193cf8979542fad7ba99dfc
| 2,935
|
py
|
Python
|
forte/data/readers/ms_marco_passage_reader.py
|
jrxk/forte
|
bb88412a23efa8b4b22f636c1e8073c18fab8c18
|
[
"Apache-2.0"
] | 2
|
2021-01-01T12:07:27.000Z
|
2021-09-10T03:57:18.000Z
|
forte/data/readers/ms_marco_passage_reader.py
|
ha-lins/forte
|
4594f65f41a8dbfc822573d12fb9af58c37a83a4
|
[
"Apache-2.0"
] | null | null | null |
forte/data/readers/ms_marco_passage_reader.py
|
ha-lins/forte
|
4594f65f41a8dbfc822573d12fb9af58c37a83a4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A reader to read passages from `MS MARCO` dataset, pertaining to the
Passage Ranking task. Uses the document text for indexing.
Official webpage -
https://github.com/microsoft/MSMARCO-Passage-Ranking#data-information-and-formating
Dataset download link -
https://msmarco.blob.core.windows.net/msmarcoranking/collection.tar.gz
Dataset Paper -
Nguyen, Tri, et al. "MS MARCO: A Human-Generated MAchine Reading
COmprehension Dataset." (2016).
"""
import os
from typing import Iterator, Tuple
from forte.common.configuration import Config
from forte.common.resources import Resources
from forte.data.data_pack import DataPack
from forte.data.readers.base_reader import PackReader
from ft.onto.base_ontology import Document
__all__ = [
"MSMarcoPassageReader"
]
class MSMarcoPassageReader(PackReader):
def __init__(self):
super().__init__()
self.configs = None
def initialize(self, resources: Resources, configs: Config):
# pylint: disable = unused-argument
self.configs = configs
def _collect(self, *args, **kwargs) -> Iterator[Tuple[str, str]]:
# pylint: disable = unused-argument, undefined-variable
dir_path: str = args[0]
corpus_file_path = os.path.join(dir_path, 'collection.tsv')
with open(corpus_file_path, 'r') as file:
for line in file:
doc_id, doc_content = line.split('\t', 1)
yield doc_id, doc_content
def _parse_pack(self, doc_info: Tuple[str, str]) -> Iterator[DataPack]:
r"""Takes the `doc_info` returned by the `_collect` method and returns a
`data_pack` that either contains entry of the type `Query`, or contains
an entry of the type Document.
Args:
doc_info: document info to be populated in the data_pack.
Returns: query or document data_pack.
"""
data_pack: DataPack = self.new_pack()
doc_id, doc_text = doc_info
data_pack.pack_name = doc_id
data_pack.set_text(doc_text)
# add documents
Document(data_pack, 0, len(doc_text))
yield data_pack
def _cache_key_function(self, data_pack: DataPack) -> str:
if data_pack.pack_name is None:
raise ValueError("Data pack does not have a document id.")
return data_pack.pack_name
| 33.735632
| 83
| 0.700852
|
553e58e46cdb19db9ec03f1a59f388c7521a02cb
| 8,942
|
py
|
Python
|
app.py
|
danrneal/restaurant-menu-app
|
16f0d1e89a137df5cf2342a3c5ddda8250c695a9
|
[
"MIT"
] | null | null | null |
app.py
|
danrneal/restaurant-menu-app
|
16f0d1e89a137df5cf2342a3c5ddda8250c695a9
|
[
"MIT"
] | null | null | null |
app.py
|
danrneal/restaurant-menu-app
|
16f0d1e89a137df5cf2342a3c5ddda8250c695a9
|
[
"MIT"
] | null | null | null |
"""A web app displaying various restaurants and their menus.
Usage: flask run
"""
from flask import (
Flask,
flash,
jsonify,
redirect,
render_template,
request,
url_for,
)
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from models import Base, MenuItem, Restaurant
engine = create_engine("sqlite:///restaurant_menu.db")
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
app = Flask(__name__)
app.secret_key = "super_secret_key"
@app.route("/")
@app.route("/restaurants/")
def show_restaurants():
"""Route handler for viewing all restaurants.
Returns:
An html template showing all restaurants
"""
restaurants = session.query(Restaurant).all()
return render_template("restaurants.html", restaurants=restaurants)
@app.route("/restaurants/new/", methods=["GET", "POST"])
def new_restaurant():
"""Route handler for creating a new restaurant.
Returns:
An html template with a form to create a new restaurant
"""
if request.method == "GET":
return render_template("new_restaurant.html")
restaurant = Restaurant(name=request.form.get("name"))
session.add(restaurant)
session.commit()
flash("New Restaurant Created!")
return redirect(url_for("show_restaurants"))
@app.route("/restaurants/<int:restaurant_id>/edit/", methods=["GET", "POST"])
def edit_restaurant(restaurant_id):
"""Route handler for modifying an existing restaurant.
Args:
restaurant_id: An int representing the id of the restaurant to modify
Returns:
An html template with a form to modify the given restaurant
"""
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
if request.method == "GET":
return render_template("edit_restaurant.html", restaurant=restaurant)
for field in request.form:
if len(request.form.get(field)) > 0:
setattr(restaurant, field, request.form.get(field))
session.add(restaurant)
session.commit()
flash("Restaurant Updated!")
return redirect(url_for("show_restaurants"))
@app.route("/restaurants/<int:restaurant_id>/delete/", methods=["GET", "POST"])
def delete_restaurant(restaurant_id):
"""Route handler to delete and existing restaurant.
Args:
restaurant_id: An int representing the id of the restaurant to delete
Returns:
An html template with a confirmation to delete the given restaurant
"""
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
if request.method == "GET":
return render_template("delete_restaurant.html", restaurant=restaurant)
session.delete(restaurant)
session.commit()
flash("Restaurant Deleted!")
return redirect(url_for("show_restaurants"))
@app.route("/restaurants/<int:restaurant_id>/")
@app.route("/restaurants/<int:restaurant_id>/menu/")
def show_menu_items(restaurant_id):
"""Route handler for displaying the menu for a given restaurant.
Args:
restaurant_id: An int representing the id of the restaurant whose menu
is to be displayed
Returns:
An html template with the given restaurant's menu displayed
"""
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
menu_items = (
session.query(MenuItem).filter_by(restaurant_id=restaurant_id).all()
)
appetizers = [
menu_item
for menu_item in menu_items
if menu_item.course == "Appetizer"
]
entrees = [
menu_item for menu_item in menu_items if menu_item.course == "Entree"
]
desserts = [
menu_item for menu_item in menu_items if menu_item.course == "Dessert"
]
beverages = [
menu_item for menu_item in menu_items if menu_item.course == "Beverage"
]
uncategorized = [
menu_item
for menu_item in menu_items
if menu_item.course
not in ("Appetizer", "Entree", "Dessert", "Beverage")
]
return render_template(
"menu_items.html",
restaurant=restaurant,
menu_items=len(menu_items) > 0,
appetizers=appetizers,
entrees=entrees,
desserts=desserts,
beverages=beverages,
uncategorized=uncategorized,
)
@app.route(
"/restaurants/<int:restaurant_id>/menu/new/", methods=["GET", "POST"]
)
def new_menu_item(restaurant_id):
"""Route handler for creating a new menu item for the given restaurant.
Args:
restaurant_id: An int representing the id of the restaurant to create
the menu item for
Returns:
An html template with a form to create a new menu item
"""
if request.method == "GET":
return render_template(
"new_menu_item.html", restaurant_id=restaurant_id
)
menu_item = MenuItem(
name=request.form.get("name"),
course=request.form.get("course"),
description=request.form.get("description"),
price=request.form.get("price"),
restaurant_id=restaurant_id,
)
session.add(menu_item)
session.commit()
flash("New Menu Item Created!")
return redirect(url_for("show_menu_items", restaurant_id=restaurant_id))
@app.route(
"/restaurants/<int:restaurant_id>/menu/<int:menu_item_id>/edit/",
methods=["GET", "POST"],
)
def edit_menu_item(restaurant_id, menu_item_id):
"""Route handler for modifying an existing menu item.
Args:
restaurant_id: An int representing the id of the restaurant the given
menu item belongs to
menu_item_id: An int representing the id of the menu item to modify
Returns:
An html template with a form to modify the given menu item
"""
menu_item = session.query(MenuItem).filter_by(id=menu_item_id).one()
if request.method == "GET":
return render_template("edit_menu_item.html", menu_item=menu_item)
for field in request.form:
if len(request.form.get(field)) > 0:
setattr(menu_item, field, request.form.get(field))
session.add(menu_item)
session.commit()
flash("Menu Item Updated!")
return redirect(url_for("show_menu_items", restaurant_id=restaurant_id))
@app.route(
"/restaurants/<int:restaurant_id>/menu/<int:menu_item_id>/delete/",
methods=["GET", "POST"],
)
def delete_menu_item(restaurant_id, menu_item_id):
"""Route handler for deleting an existing menu item.
Args:
restaurant_id: An int representing the id of the restaurant the given
menu item belongs to
menu_item_id: An int representing the id of the menu item to delete
Returns:
An html template with a confirmation to delete the given menu item
"""
menu_item = session.query(MenuItem).filter_by(id=menu_item_id).one()
if request.method == "GET":
return render_template("delete_menu_item.html", menu_item=menu_item)
session.delete(menu_item)
session.commit()
flash("Menu Item Deleted!")
return redirect(url_for("show_menu_items", restaurant_id=restaurant_id))
@app.route("/api/restaurants/")
def restaurants_api():
"""Route handler for api endpoint retreiving all restaurants.
Returns:
response: A json object containing all restaurants
"""
restaurants = session.query(Restaurant).all()
response = jsonify(
restaurants=[restaurant.serialize for restaurant in restaurants]
)
return response
@app.route("/api/restaurants/<int:restaurant_id>/")
@app.route("/api/restaurants/<int:restaurant_id>/menu/")
def menu_items_api(restaurant_id):
"""Route handler for api endpoint retreiving menu items for a restaurant.
Args:
restaurant_id: An int representing the id of the restaurant whose menu
items are to be retrieved
Returns:
response: A json object containing all menu items for a given
restaurant
"""
menu_items = (
session.query(MenuItem).filter_by(restaurant_id=restaurant_id).all()
)
response = jsonify(
menu_items=[menu_item.serialize for menu_item in menu_items]
)
return response
@app.route("/api/restaurants/<int:restaurant_id>/menu/<int:menu_id>/")
def menu_item_api(restaurant_id, menu_id): # pylint: disable=unused-argument
"""Route handler for api endpoint retreiving a specific menu item.
Args:
restaurant_id: An int representing the id of the restaurant the given
menu item to be retrieved belongs to (unused)
menu_item_id: An int representing the id of the menu item to be
retrieved
Returns:
response: A json object containing the given menu item
"""
menu_item = session.query(MenuItem).filter_by(id=menu_id).one()
response = jsonify(menu_item=menu_item.serialize)
return response
if __name__ == "__main__":
app.run(debug=True)
| 29.222222
| 79
| 0.681279
|
1bf1084afc48f5eec6732f56eb0583b435359ef6
| 23,785
|
py
|
Python
|
cogs/pokemon.py
|
s0hv/Not-a-bot
|
41a1172ebb8e66dbc9effb1f14e7b91212f572ea
|
[
"MIT"
] | 6
|
2019-11-15T00:57:15.000Z
|
2022-01-08T08:11:08.000Z
|
cogs/pokemon.py
|
s0hv/Not-a-bot
|
41a1172ebb8e66dbc9effb1f14e7b91212f572ea
|
[
"MIT"
] | null | null | null |
cogs/pokemon.py
|
s0hv/Not-a-bot
|
41a1172ebb8e66dbc9effb1f14e7b91212f572ea
|
[
"MIT"
] | 3
|
2018-12-25T14:42:08.000Z
|
2021-07-22T15:56:43.000Z
|
import csv
import json
import logging
import math
import os
import re
from functools import partial
import discord
from discord import utils, Embed
from discord.ext.commands import BucketType, guild_only
from discord.ext.commands.errors import BotMissingPermissions
from bot.bot import command, cooldown, has_permissions
from bot.exceptions import BotException
from bot.globals import POKESTATS
from cogs.cog import Cog
from utils.utilities import random_color, wait_for_yes, \
check_botperm
logger = logging.getLogger('terminal')
pokestats = re.compile(r'''Level (?P<level>\d+) "?(?P<name>.+?)"?
.+?
(Holding: .+?\n)?Nature: (?P<nature>\w+)
HP: (?P<hp>\d+)( - IV: (?P<hp_iv>\d+)/\d+)?
Attack: (?P<attack>\d+)( - IV: (?P<attack_iv>\d+)/\d+)?
Defense: (?P<defense>\d+)( - IV: (?P<defense_iv>\d+)/\d+)?
Sp. Atk: (?P<spattack>\d+)( - IV: (?P<spattack_iv>\d+)/\d+)?
Sp. Def: (?P<spdefense>\d+)( - IV: (?P<spdefense_iv>\d+)/\d+)?
Speed: (?P<speed>\d+)( - IV: (?P<speed_iv>\d+)/\d+)?''')
pokemon = {}
stat_names = ('hp', 'attack', 'defense', 'spattack', 'spdefense', 'speed')
MAX_IV = (31, 31, 31, 31, 31, 31)
MIN_IV = (0, 0, 0, 0, 0, 0)
legendary_detector = re.compile(r'Congratulations (<@!?\d+>|.+?)! You caught a level \d+ (Shiny )?(.+?)!\s*(These colors seem unusual)?.*', re.MULTILINE | re.I)
legendaries = ['arceus', 'articuno', 'azelf', 'blacephalon', 'buzzwole',
'celebi', 'celesteela', 'cobalion', 'cosmoem', 'cosmog',
'cresselia', 'darkrai', 'deoxys', 'dialga', 'diancie',
'entei', 'genesect', 'giratina', 'groudon', 'guzzlord',
'heatran', 'ho-oh', 'hoopa', 'jirachi', 'kartana', 'keldeo',
'kyogre', 'kyurem', 'landorus', 'latias', 'latios', 'lugia',
'lunala', 'magearna', 'manaphy', 'marshadow', 'meloetta',
'mesprit', 'mew', 'mewtwo', 'moltres', 'naganadel', 'necrozma',
'nihilego', 'palkia', 'pheromosa', 'phione', 'poipole', 'raikou',
'rayquaza', 'regice', 'regigigas', 'regirock', 'registeel',
'reshiram', 'shaymin', 'silvally', 'solgaleo', 'stakataka',
'suicune', 'tapu bulu', 'tapu fini', 'tapu koko', 'tapu lele',
'terrakion', 'thundurus', 'tornadus', 'type: null', 'uxie',
'victini', 'virizion', 'volcanion', 'xerneas', 'xurkitree',
'yveltal', 'zapdos', 'zekrom', 'zeraora', 'zygarde',
'meltan', 'melmetal', 'zacian', 'zamazenta', 'eternatus',
'kubfu', 'urshifu', 'calyrex']
# Stats taken from https://www.kaggle.com/mylesoneill/pokemon-sun-and-moon-gen-7-stats
with open(os.path.join(POKESTATS, 'pokemon.csv'), 'r', encoding='utf-8') as f:
reader = csv.DictReader(f)
keys = ('ndex', 'hp', 'attack', 'defense', 'spattack', 'spdefense', 'speed')
for row in reader:
name = None
if '(Mega ' in row['forme']:
name = row['forme']
name = 'mega' + name.split('(Mega')[1].split(')')[0].lower()
elif '(Primal Reversion)' in row['forme']:
name = 'primal ' + row['species'].lower()
else:
name = row['species'].lower()
if name in pokemon:
continue
pokemon[name] = {k: int(row[k]) for k in keys}
with open(os.path.join(POKESTATS, 'natures.json'), 'r') as f:
natures = json.load(f)
# Good work from https://github.com/xKynn/PokecordCatcher
with open(os.path.join(POKESTATS, 'pokemonrefs.json'), 'r') as f:
pokemonrefs = json.load(f)
# Below functions ported from https://github.com/dalphyx/pokemon-stat-calculator
# Formulas from https://bulbapedia.bulbagarden.net/wiki/Statistic
def calc_stat(iv, base, ev=0, level=1, nature=1):
result = math.floor(((2 * base + iv + math.floor(ev / 4)) * level) / 100 + 5) * nature
result = math.floor(result)
return result
def calc_iv(value, base, ev=0, level=1, nature=1):
return max(math.floor((100 * value / nature - 500) / level) - math.floor(ev / 4) - 2 * base, 0)
def calc_hp_iv(hp, base, ev=0, level=1):
return max(math.floor((100 * hp - 1000) / level - 2 * base - math.floor(ev / 4) - 100), 0)
def iv_range(level, natures, stats, base):
ivs = []
def get_range(get_stat, get_iv, stat, base, nature=None):
iv_small = None
iv_big = 31
if nature is None:
iv_guess = get_iv(stat, base, ev=102, level=level)
else:
iv_guess = get_iv(stat, base, ev=102, level=level, nature=nature)
if nature is not None:
get_stat = partial(get_stat, nature=nature)
if get_stat(iv_guess, base, ev=102, level=level) != stat:
for iv in range(1, 32):
if get_stat(iv, base, ev=102, level=level) == stat:
iv_guess = iv
for iv in range(32):
stat_new = get_stat(iv, base, ev=102, level=level)
if stat_new == stat and iv_small is None:
iv_small = iv
continue
if stat_new != stat:
if iv_small is None:
continue
iv_big = iv - 1
break
if iv_small is None:
return 'N/A'
return list(range(iv_small, iv_big+1))
ivs.append(get_range(calc_hp_stats, calc_hp_iv, stats[0], base[0]))
for stat, base_, nature in zip(stats[1:], base[1:], natures):
ivs.append(get_range(calc_stat, calc_iv, stat, base_, nature=nature))
return ivs
def calc_hp_stats(iv, base, ev, level):
# No.292 Shedinja's HP always be 1.
if base == 1:
return 1
result = math.floor((2 * base + iv + math.floor(ev / 4)) * level / 100) + level + 10
return result
def get_base_stats(name: str):
poke = pokemon.get(name.lower())
if not poke:
raise BotException(f"Could not find pokemon `{name}`"
"Make sure you replace the nickname to the pokemons real name or this won't work")
return tuple(poke[stat] for stat in stat_names)
def calc_all_stats(name, level, nature, evs=(102, 102, 102, 102, 102, 102), ivs=MAX_IV, with_max_level=False):
if isinstance(nature, str):
nature_ = natures.get(nature.lower())
if not nature_:
raise BotException(f'Could not find nature `{nature}`')
nature = nature_
base_stats = get_base_stats(name)
def calc_stats(lvl):
st = [calc_hp_stats(ivs[0], base_stats[0], evs[0], lvl)]
for i in range(1, 6):
st.append(calc_stat(ivs[i], base_stats[i], evs[i], lvl, nature[i - 1]))
return st
stats = calc_stats(level)
if with_max_level and level != 100:
max_stats = calc_stats(100)
elif with_max_level:
max_stats = stats
if with_max_level:
return stats, max_stats
return stats
def from_max_stat(min: int, max: int, value: int) -> tuple:
"""
Gets where the stats stands between the max and min
Args:
min: min value
max: max value
value: the current value of the stat
Returns: tuple
Percentage on how close the value is to the max and the actual diff to max
"""
d = value - min
from_max = max - value
diff = max - min
if diff == 0:
delta = 'N/A'
else:
delta = d/diff
return delta, from_max
class Pokemon(Cog):
def __init__(self, bot):
super().__init__(bot)
self.poke_spawns = {}
'''
Not needed since ivs are shown
@command(aliases=['pstats', 'pstat'])
@cooldown(1, 3, BucketType.user)
async def poke_stats(self, ctx, *, stats=None):
"""
Calculate how good your pokemons stats are.
To be used in combination with pokecord
How to use:
Use p!info (use whatever prefix pokecord has on the server instead of p!)
Copy the fields from Level to Speed. Then use the command as follows
{prefix}pstats
or manually
{prefix}pstats Level 100 Pikachu
2305/2610XP
Nature: Hasty
HP: 300
Attack: 300
Defense: 300
Sp. Atk: 300
Sp. Def: 300
Speed: 300
"""
async def process_embed(embed):
stats = embed.title + '\n' + embed.description.replace('*', '')
match = pokestats.match(stats)
if not match:
await ctx.send("Failed to parse stats. Make sure it's the correct format")
return
pokemon_name = pokemonrefs.get(embed.image.url.split('/')[-1].split('.')[0])
if not pokemon_name:
await ctx.send('Could not get pokemon name from message. Please give the name of the pokemon')
msg = await self.bot.wait_for('message', check=basic_check(author=ctx.author, channel=ctx.channel),
timeout=30)
pokemon_name = msg.content
stats = match.groupdict()
stats['name'] = pokemon_name
return stats
def check_msg(msg):
embed = msg.embeds[0]
if embed.title != EmptyEmbed and embed.title.startswith('Level '):
return embed
author = None
try:
if stats:
author = await (UserConverter().convert(ctx, stats))
except UserInputError:
pass
if not author:
try:
stats = int(stats)
except (ValueError, TypeError):
author = ctx.author
not_found = 'Could not find p!info message'
accept_any = True
if stats:
match = pokestats.match(stats)
if not match:
await ctx.send("Failed to parse stats. Make sure it's the correct format")
return
stats = match.groupdict()
else:
stats = None
not_found = f'No p!info message found for user {author}'
accept_any = False
if not stats:
_embed = None
async for msg in ctx.channel.history():
if msg.author.id != 365975655608745985:
continue
if not msg.embeds:
continue
embed = msg.embeds[0]
if embed.title != EmptyEmbed and embed.title.startswith('Level '):
if author.avatar_url.startswith(embed.thumbnail.url):
_embed = embed
break
if accept_any and _embed is None:
_embed = embed
if _embed is None:
await ctx.send(not_found)
return
stats = await process_embed(_embed)
elif isinstance(stats, int):
try:
msg = await ctx.channel.get_message(stats)
except HTTPException as e:
return await ctx.send(f'Could not get message with id `{stats}` because of an error\n{e}')
embed = check_msg(msg)
stats = await process_embed(embed)
try:
level = int(stats['level'])
except ValueError:
raise BadArgument('Could not convert level to integer')
current_stats = []
try:
for name in stat_names:
iv = stats[name + '_iv']
if iv is not None:
stats[name + '_iv'] = int(iv)
i = int(stats[name])
current_stats.append(i)
stats[name] = i
except ValueError:
raise BadArgument(f'Failed to convert {name} to integer')
nature = stats['nature'].lower()
try:
max_stats = calc_all_stats(stats['name'], level, nature)
min_stats = calc_all_stats(stats['name'], level, nature, ivs=MIN_IV)
except KeyError as e:
return await ctx.send(f"{e}\nMake sure you replace the nickname to the pokemons real name in the message or this won't work")
s = f'```py\nLevel {stats["level"]} {stats["name"]}\nStat: Max value | Delta | Percentage | lvl 100 | iv\n'
nature_mod = natures[nature]
base_stats = get_base_stats(stats['name'])
if stats['hp_iv'] is not None:
iv_ranges = []
for name in stat_names:
iv_ranges.append((stats[name + '_iv'], ))
else:
iv_ranges = iv_range(level, nature_mod, current_stats, base_stats)
idx = 0
for min_val, max_val, name, ivs in zip(min_stats, max_stats, stat_names, iv_ranges):
diff, from_max = from_max_stat(min_val, max_val, stats[name])
fill = ' ' * (11 - len(name))
fill2 = ' ' * (4 - len(str(max_val)))
fill3 = ' ' * (6 - len(str(from_max)))
if isinstance(diff, float):
diff = f'{diff*100:.0f}%'
fill4 = ' ' * (11 - len(diff))
if ivs == 'N/A':
ivs = (0, 31)
iv = 'N/A'
elif len(ivs) == 1:
iv = str(ivs[0])
else:
iv = f'{ivs[0]}-{ivs[-1]}'
if idx == 0:
minimum = calc_hp_stats(ivs[0], base_stats[idx], 102, 100)
maximum = calc_hp_stats(ivs[-1], base_stats[idx], 102, 100)
else:
minimum = calc_stat(ivs[0], base_stats[idx], 102, 100, nature_mod[idx - 1])
maximum = calc_stat(ivs[-1], base_stats[idx], 102, 100, nature_mod[idx - 1])
if maximum == minimum:
stat_range = str(maximum)
else:
stat_range = f'{minimum}-{maximum}'
idx += 1
fill5 = ' ' * (8 - len(stat_range))
s += f'{name}:{fill}{max_val}{fill2}| {from_max}{fill3}| {diff}{fill4}| {stat_range}{fill5}| {iv}\n'
s += '```'
await ctx.send(s)
'''
"""
Command removed because pokecord didnt want it to exist.
Kept here for archival purposes
@command(ignore_extra=True, aliases=['gp'])
@cooldown(1, 5, BucketType.guild)
async def guess_pokemon(self, ctx, url=None):
if not self.bot.poke_model:
return await ctx.send('Not supported atm')
if not url:
url = self.poke_spawns.get(ctx.guild.id)
if not url:
async for msg in ctx.channel.history(limit=10):
if self._is_spawn(msg):
url = msg.embeds[0].image.url
break
if not url:
ctx.command.undo_use(ctx)
return await ctx.send('No image specified')
img = await image_from_url(url, self.bot.aiohttp_client)
if not img:
ctx.command.undo_use(ctx)
return await ctx.send(f'No image found from {url}')
img = self.bot.poke_model.process_image(img)
guess, accuracy = await self.bot.loop.run_in_executor(self.bot.threadpool, self.bot.poke_model.sample, img)
await ctx.send(f"That pokemon is `{guess}` I'm {accuracy*100:.01f}% sure of that")
"""
@staticmethod
async def create_pokelog(ctx):
guild = ctx.guild
overwrites = {
guild.default_role: discord.PermissionOverwrite(send_messages=False),
guild.me: discord.PermissionOverwrite(send_messages=True,
embed_links=True)
}
try:
channel = await guild.create_text_channel('pokelog', overwrites=overwrites,
reason=f'{ctx.author} created pokelog')
except discord.HTTPException as e:
return await ctx.send(f'Failed to create pokelog because of an error\n{e}')
await ctx.send(f'Pokelog created in {channel.mention}')
@command(no_pm=True)
@cooldown(1, 5, BucketType.guild)
async def pokelog(self, ctx):
"""
To log caught pokecord legendaries and shinies you need a channel name pokelog
You can use this command to set one up with correct perms
To include more pokemon or exclude pokemon you need to edit the
channel description. The format is as follows
```
---
Phione
Shaymin
+++
Beldum
Metang
Metagross
```
where pokemon under the --- are excluded and pokemon under +++ are included
in pokelog. The name of the pokemon must be the same what p!info gives
of that pokemon. Excluding also overrides including so if you put a pokemon
to be excluded and included it will be excluded. Shinies are logged no matter the settings
"""
channel = utils.find(lambda c: c.name == 'pokelog' and isinstance(c, discord.TextChannel), ctx.guild.channels)
if not channel:
if not check_botperm('manage_channels', ctx=ctx, me=ctx.author):
return await ctx.send('Pokelog channel not present')
check_botperm('manage_channels', ctx=ctx, raise_error=BotMissingPermissions)
await ctx.send('Pokelog channel not present. Do you want to create one (Y/N)')
msg = await wait_for_yes(ctx, 30)
if not msg:
return
await self.create_pokelog(ctx)
return
await ctx.send(f'Current pokelog channel is {channel.mention}\n'
'Make sure this bot has send messages and embed links perms set to ✅')
@command()
@guild_only()
@has_permissions(manage_channels=True)
@cooldown(1, 1, BucketType.guild)
async def log_pokemon(self, ctx, message: discord.Message):
"""
This command can be used to force log any pokemon to the pokelog.
This is done by linking the message where the pokemon is caught
(usually in the format of "Congratulations @user! You caught a level 10 Magikarp!")
Usage:
{prefix}{name} https://discord.com/channels/353927534439825429/354712220761980939/826470021713625169
"""
link = f'https://discord.com/channels/{message.guild.id}/{message.channel.id}/{message.id}'
if not self._is_pokebot(message.author.id):
await ctx.send(f'Message not from a supported bot. {link}')
return
success = await self._post2pokelog(message)
if not success:
await ctx.send(f'No pokelog found or failed to scan pokemon from message {link}')
else:
await ctx.send('Sent pokelog message')
async def _post2pokelog(self, message):
if not message.guild:
return
channel = utils.find(lambda c: c.name == 'pokelog' and isinstance(c, discord.TextChannel), message.guild.channels)
if not channel:
return
perms = channel.permissions_for(message.guild.get_member(self.bot.user.id))
if not (perms.send_messages and perms.read_messages and perms.embed_links):
return
match = legendary_detector.match(message.content)
if not match:
return
mention, shiny, poke, shiny2 = match.groups()
shiny = shiny or shiny2
include = []
exclude = []
if channel.topic:
container = None
for line in channel.topic.split('\n'):
line = line.strip()
if not line:
continue
if line == '---':
container = exclude
continue
if line == '+++':
container = include
continue
if container is not None:
container.append(line.lower())
if poke.lower() not in legendaries and not shiny and poke.lower() not in include:
return
if poke.lower() in exclude and not shiny:
return
if shiny:
shiny = '-shiny'
else:
shiny = ''
poke_fmt = poke.lower().replace('♂', 'm').replace('♀', 'f')
poke_fmt = re.sub('[-. :]', '', poke_fmt)
# Hardcode unown to always return the link to unown f since
# that's the only unown in pokecord
if 'unown' in poke_fmt:
poke_fmt = 'unown-f'
poke = 'Unown-f'
if 'alolan' in poke_fmt:
poke_fmt = poke_fmt.replace('alolan', '').strip() + '-alola'
icon_fmt = ' '.join(poke.split(' ')[1:]) + '-alola'
else:
icon_fmt = poke
# Temp fix until pokemon showdown adds sprites
if 'meltan' in poke_fmt:
icon = 'https://cdn.bulbagarden.net/upload/3/34/808MS.png'
if shiny:
url = 'https://i.imgur.com/m2YsdDT.png'
else:
url = 'https://i.imgur.com/fdrf77L.png'
elif 'melmetal' in poke_fmt:
icon = 'https://cdn.bulbagarden.net/upload/f/f1/809MS.png'
if shiny:
url = 'https://i.imgur.com/F1N9TQm.png'
else:
url = 'https://i.imgur.com/1M3QklX.png'
elif 'detectivepikachu' in poke_fmt:
icon = ''
if shiny:
url = 'https://i.imgur.com/5YWs0rA.png'
else:
url = 'https://i.imgur.com/9Sfddti.png'
else:
url = 'http://play.pokemonshowdown.com/sprites/xyani{}/{}.gif'.format(shiny, poke_fmt)
icon_fmt = re.sub(' |: ', '-', icon_fmt).lower().replace('♂', '-m').replace('♀', '-f').replace('.', '')
icon = f'https://raw.githubusercontent.com/msikma/pokesprite/master/icons/pokemon/{shiny[1:] or "regular"}/{icon_fmt}.png'
desc = f'{mention} caught a **{"Shiny " if shiny else ""}{poke}**\n' \
f'[Jump to message](https://discord.com/channels/{message.guild.id}/{message.channel.id}/{message.id})'
embed = Embed(description=desc, colour=random_color())
embed.set_image(url=url)
embed.set_thumbnail(url=icon)
await channel.send(embed=embed)
return True
@staticmethod
def _is_spawn(msg):
if msg.embeds:
embed = msg.embeds[0]
return isinstance(embed.title, str) and 'wild' in embed.title.lower()
return False
def _is_pokebot(self, uid) -> bool:
# Ignore others than pokecord
# old pokecord id: 365975655608745985
# new pokecord and poketwo
return self.bot.test_mode or uid in (665301904791699476, 716390085896962058)
@Cog.listener()
async def on_message(self, message):
if not self._is_pokebot(message.author.id):
return
if message.content:
return await self._post2pokelog(message)
#if self._is_spawn(message):
# self.poke_spawns[message.guild.id] = message.embeds[0].image.url
"""
Unused code. Removed for same reason as guess_pokemon
def get_match(self, img):
raise NotImplementedError('Now uses a cnn instead of phash')
binarydiff = self.only_hash != imagehash.phash(img,
hash_size=16,
highfreq_factor=6).hash.reshape(1, -1)
hammingdiff = binarydiff.sum(axis=1)
closest_match = numpy.argmin(hammingdiff)
return self.poke_names[closest_match]
async def match_pokemon(self, url):
async with await self.bot.aiohttp_client.get(url) as r:
data = BytesIO(await r.content.read())
return await self.bot.loop.run_in_executor(self.bot.threadpool, self.get_match, Image.open(data))
"""
def setup(bot):
bot.add_cog(Pokemon(bot))
| 35.5
| 160
| 0.560017
|
4318ae19649650c19a5f78e3c5a71e680728cf13
| 5,220
|
py
|
Python
|
pipenv/patched/notpip/_vendor/requests/__init__.py
|
sthagen/pipenv
|
0924f75fd1004c848ea67d4272315eda4210b352
|
[
"MIT"
] | 23
|
2017-01-20T01:18:31.000Z
|
2017-01-20T17:25:11.000Z
|
pipenv/patched/notpip/_vendor/requests/__init__.py
|
sthagen/pipenv
|
0924f75fd1004c848ea67d4272315eda4210b352
|
[
"MIT"
] | 1
|
2017-01-20T05:13:58.000Z
|
2017-01-20T05:13:58.000Z
|
pipenv/patched/notpip/_vendor/requests/__init__.py
|
sthagen/pipenv
|
0924f75fd1004c848ea67d4272315eda4210b352
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
Requests HTTP Library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings.
Basic GET usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> b'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('https://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key1": "value1",
"key2": "value2"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <https://requests.readthedocs.io>.
:copyright: (c) 2017 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
from pipenv.patched.notpip._vendor import urllib3
import warnings
from .exceptions import RequestsDependencyWarning
charset_normalizer_version = None
try:
from pipenv.patched.notpip._vendor.chardet import __version__ as chardet_version
except ImportError:
chardet_version = None
def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version):
urllib3_version = urllib3_version.split('.')
assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git.
# Sometimes, urllib3 only reports its version as 16.1.
if len(urllib3_version) == 2:
urllib3_version.append('0')
# Check urllib3 for compatibility.
major, minor, patch = urllib3_version # noqa: F811
major, minor, patch = int(major), int(minor), int(patch)
# urllib3 >= 1.21.1, <= 1.26
assert major == 1
assert minor >= 21
assert minor <= 26
# Check charset_normalizer for compatibility.
if chardet_version:
major, minor, patch = chardet_version.split('.')[:3]
major, minor, patch = int(major), int(minor), int(patch)
# chardet_version >= 3.0.2, < 5.0.0
assert (3, 0, 2) <= (major, minor, patch) < (5, 0, 0)
elif charset_normalizer_version:
major, minor, patch = charset_normalizer_version.split('.')[:3]
major, minor, patch = int(major), int(minor), int(patch)
# charset_normalizer >= 2.0.0 < 3.0.0
assert (2, 0, 0) <= (major, minor, patch) < (3, 0, 0)
else:
raise Exception("You need either charset_normalizer or chardet installed")
def _check_cryptography(cryptography_version):
# cryptography < 1.3.4
try:
cryptography_version = list(map(int, cryptography_version.split('.')))
except ValueError:
return
if cryptography_version < [1, 3, 4]:
warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version)
warnings.warn(warning, RequestsDependencyWarning)
# Check imported dependencies for compatibility.
try:
check_compatibility(urllib3.__version__, chardet_version, charset_normalizer_version)
except (AssertionError, ValueError):
warnings.warn("urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported "
"version!".format(urllib3.__version__, chardet_version, charset_normalizer_version),
RequestsDependencyWarning)
# Attempt to enable urllib3's fallback for SNI support
# if the standard library doesn't support SNI or the
# 'ssl' library isn't available.
try:
# Note: This logic prevents upgrading cryptography on Windows, if imported
# as part of pip.
from pipenv.patched.notpip._internal.utils.compat import WINDOWS
if not WINDOWS:
raise ImportError("pip internals: don't import cryptography on Windows")
try:
import ssl
except ImportError:
ssl = None
if not getattr(ssl, "HAS_SNI", False):
from pipenv.patched.notpip._vendor.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
# Check cryptography version
from cryptography import __version__ as cryptography_version
_check_cryptography(cryptography_version)
except ImportError:
pass
# urllib3's DependencyWarnings should be silenced.
from pipenv.patched.notpip._vendor.urllib3.exceptions import DependencyWarning
warnings.simplefilter('ignore', DependencyWarning)
from .__version__ import __title__, __description__, __url__, __version__
from .__version__ import __build__, __author__, __author_email__, __license__
from .__version__ import __copyright__, __cake__
from . import utils
from . import packages
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError,
FileModeWarning, ConnectTimeout, ReadTimeout, JSONDecodeError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
from logging import NullHandler
logging.getLogger(__name__).addHandler(NullHandler())
# FileModeWarnings go off per the default.
warnings.simplefilter('default', FileModeWarning, append=True)
| 33.677419
| 102
| 0.695785
|
571db55026c51e0be11dec07ffd437ee9db9c8da
| 23,199
|
py
|
Python
|
metalibm_core/code_generation/gappa_code_generator.py
|
kalray/metalibm
|
e331ee4a1b3df9ebdf581453852ac019d7c1b6da
|
[
"MIT"
] | 27
|
2018-03-12T16:49:36.000Z
|
2021-12-15T06:53:55.000Z
|
metalibm_core/code_generation/gappa_code_generator.py
|
kalray/metalibm
|
e331ee4a1b3df9ebdf581453852ac019d7c1b6da
|
[
"MIT"
] | 57
|
2018-03-12T16:49:56.000Z
|
2021-03-04T15:25:39.000Z
|
metalibm_core/code_generation/gappa_code_generator.py
|
kalray/metalibm
|
e331ee4a1b3df9ebdf581453852ac019d7c1b6da
|
[
"MIT"
] | 4
|
2018-03-12T15:40:22.000Z
|
2018-11-28T14:34:54.000Z
|
# -*- coding: utf-8 -*-
###############################################################################
# This file is part of metalibm (https://github.com/kalray/metalibm)
###############################################################################
# MIT License
#
# Copyright (c) 2018 Kalray
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
# created: Dec 24th, 2013
# last-modified: Mar 7th, 2018
#
# author(s): Nicolas Brunie (nicolas.brunie@kalray.eu)
###############################################################################
import sys
import os
from .code_element import CodeVariable, CodeExpression
from ..core.ml_operations import (
Variable, Constant, ConditionBlock, Return, TableLoad, Statement,
SpecificOperation, Conversion)
from ..core.ml_table import ML_Table
from ..core.ml_formats import *
from .generator_utility import C_Code, Gappa_Code, RoundOperator
from .code_object import Gappa_Unknown, GappaCodeObject
from ..utility.debug_utils import ML_Debug
from ..utility.gappa_utils import execute_gappa_script_extract
import metalibm_core.utility.gappa_utils as gappa_utils
from ..utility.log_report import Log
class GappaCodeGenerator(object):
language = Gappa_Code
""" C language code generator """
def __init__(self, processor, declare_cst = True, disable_debug = False, libm_compliant = False, exact_mode = False):
# on level for each of exact_mode possible values
self.memoization_map = [{}]
self.processor = processor
self.declare_cst = declare_cst
self.disable_debug = disable_debug
self.libm_compliant = libm_compliant
self.exact_mode = exact_mode
self.exact_hint_map = {False: {}, True: {}}
def get_unknown_precision(self):
""" return a default format when compound operator encounter
an undefined precision """
return ML_Exact
def set_exact_mode(self, value = True):
self.exact_mode = value
def get_exact_mode(self):
return self.exact_mode
def open_memoization_level(self):
self.memoization_map.insert(0, {})
def close_memoization_level(self):
self.memoization_map.pop(0)
def clear_memoization_map(self):
self.exact_hint_map = {False: {}, True: {}}
self.memoization_map = [{}]
def has_memoization(self, optree):
""" test if a optree has already been generated and memoized """
for memoization_level in self.memoization_map:
if optree in memoization_level: return True
return False
def get_memoization(self, optree):
""" retrieve pre-existing memoization entry """
for memoization_level in self.memoization_map:
if optree in memoization_level: return memoization_level[optree]
return None
def add_memoization(self, optree, code_value):
""" register memoization value <code_value> for entry <optree> """
self.memoization_map[0][optree] = code_value
def add_hypothesis(self, code_object, hypoth_optree, hypoth_value):
hypothesis_code = self.generate_expr(code_object, hypoth_optree, initial = True, language = Gappa_Code)
code_object.add_hypothesis(hypothesis_code, hypoth_value)
def add_goal(self, code_object, goal_optree, goal_value = Gappa_Unknown):
goal_code = self.generate_expr(code_object, goal_optree, initial = True, language = Gappa_Code)
code_object.add_goal(goal_code, goal_value)
def add_hint(self, code_object, hint_hypoth, hint_goal, hint_annotation = None, isApprox = False):
hypoth_code = self.generate_expr(code_object, hint_hypoth, initial = False, folded = False, language = Gappa_Code)
goal_code = self.generate_expr(code_object, hint_goal, initial = False, folded = False, language = Gappa_Code)
if hint_annotation is not None:
declare_cst = self.declare_cst
self.declare_cst = False
annotation_code = self.generate_expr(code_object, hint_annotation, initial = False, folded = False, language = Gappa_Code, strip_outer_parenthesis = True)
self.declare_cst = declare_cst
else:
annotation_code = None
code_object.add_hint(hypoth_code, goal_code, annotation_code, isApprox)
# force_variable_storing is not supported
def generate_expr(self, code_object, optree, folded = True, result_var = None, initial = False, __exact = None, language = None, strip_outer_parenthesis = False, force_variable_storing = False):
""" code generation function """
#exact_value = exact or self.get_exact_mode()
# search if <optree> has already been processed
if self.has_memoization(optree):
return self.get_memoization(optree)
result = None
# implementation generation
if isinstance(optree, CodeVariable):
result = optree
elif isinstance(optree, Variable):
#if optree.get_max_abs_error() != None:
# max_abs_error = optree.get_max_abs_error()
# var_name = code_object.get_free_var_name(optree.get_precision(), prefix = "%s_" % optree.get_tag(), declare = True)
# result = CodeVariable(var_name, optree.get_precision())
# error_var = Variable(tag = var_name, precision = optree.get_precision())
# optree.set_max_abs_error(None)
# hypothesis = error_var - optree
# hypothesis.set_precision(ML_Exact)
# self.add_hypothesis(code_object, hypothesis, Interval(-max_abs_error, max_abs_error))
# optree.set_max_abs_error(max_abs_error)
# self.add_memoization(error_var, result)
#else:
# result = CodeVariable(optree.get_tag(), optree.get_precision())
result = CodeVariable(optree.get_tag(), optree.get_precision())
elif isinstance(optree, Constant):
precision = optree.get_precision()
if self.declare_cst:
cst_prefix = "cst" if optree.get_tag() is None else optree.get_tag()
cst_varname = code_object.declare_cst(optree, prefix = cst_prefix)
result = CodeVariable(cst_varname, precision)
else:
result = CodeExpression(precision.get_gappa_cst(optree.get_value()), precision)
elif isinstance(optree, Conversion):
if optree.get_rounding_mode() is not None:
local_implementation = RoundOperator(optree.get_precision(), direction = optree.get_rounding_mode())
else:
local_implementation = RoundOperator(optree.get_precision())
return local_implementation.generate_expr(self, code_object, optree, optree.inputs, folded = folded, result_var = result_var)
elif isinstance(optree, TableLoad):
# declaring table
table = optree.inputs[0]
tag = table.get_tag()
table_name = code_object.declare_table(table, prefix = tag if tag != None else "table")
index_code = [self.generate_expr(code_object, index_op, folded = folded).get() for index_op in optree.inputs[1:]]
result = CodeExpression("%s[%s]" % (table_name, "][".join(index_code)), optree.inputs[0].get_storage_precision())
elif isinstance(optree, ConditionBlock):
condition = optree.inputs[0]
if_branch = optree.inputs[1]
else_branch = optree.inputs[2] if len(optree.inputs) > 2 else None
# generating pre_statement
self.generate_expr(code_object, optree.get_pre_statement(), folded = folded)
cond_code = self.generate_expr(code_object, condition, folded = folded)
if condition.get_likely() in [True, False]:
code_object << "\nif (__builtin_expect(%s, %d)) " % (cond_code.get(), {True: 1, False: 0}[condition.get_likely()])
else:
code_object << "\nif (%s) " % cond_code.get()
self.open_memoization_level()
code_object.open_level()
#if_branch_code = self.processor.generate_expr(self, code_object, if_branch, if_branch.inputs, folded)
if_branch_code = self.generate_expr(code_object, if_branch, folded = folded)
code_object.close_level(cr = "")
self.close_memoization_level()
if else_branch:
code_object << " else "
code_object.open_level()
self.open_memoization_level()
else_branch_code = self.generate_expr(code_object, else_branch, folded = folded)
code_object.close_level()
self.close_memoization_level()
else:
code_object << "\n"
return None
elif isinstance(optree, Return):
return None
elif isinstance(optree, SpecificOperation):
result_code = self.processor.generate_expr(self, code_object, optree, optree.inputs, folded = False, result_var = result_var, language = self.language)
code_object << "%s;\n" % result_code.get()
return None
elif isinstance(optree, Statement):
for op in optree.inputs:
if not self.has_memoization(op):
self.generate_expr(code_object, op, folded = folded, initial = True)
return None
else:
result = self.processor.generate_expr(self, code_object, optree, optree.inputs, folded = folded, result_var = result_var, language = self.language)
if optree.get_exact():
key = optree.get_handle()
exact_flag = (optree.get_precision() == ML_Exact or self.get_exact_mode() == True)
if key in self.exact_hint_map[True] and key in self.exact_hint_map[False]:
# already processed, skip
pass
else:
self.exact_hint_map[exact_flag][key] = optree
if key in self.exact_hint_map[not exact_flag]:
self.add_hint(code_object, self.exact_hint_map[False][key], self.exact_hint_map[True][key])
# registering result into memoization table
self.add_memoization(optree, result)
# debug management
if optree.get_debug() and not self.disable_debug:
code_object << self.generate_debug_msg(optree, result)
if initial and not isinstance(result, CodeVariable):
final_var = result_var if result_var else code_object.get_free_var_name(optree.get_precision(), prefix = "result", declare = True)
code_object << self.generate_assignation(final_var, result.get())
return CodeVariable(final_var, optree.get_precision())
if strip_outer_parenthesis and isinstance(result, CodeExpression):
result.strip_outer_parenthesis()
return result
def generate_code_assignation(self, code_object, result_var, expr_code, final=True, original_node=None):
return self.generate_assignation(result_var, expr_code, final=final)
def generate_assignation(self, result_var, expression_code, final = True):
""" generate code for assignation of value <expression_code> to
variable <result_var> """
final_symbol = ";\n" if final else ""
return "%s = %s%s" % (result_var, expression_code, final_symbol)
def generate_declaration(self, symbol, symbol_object, initial = True, final = True):
if isinstance(symbol_object, Constant):
initial_symbol = ""#(symbol_object.get_precision().get_c_name() + " ") if initial else ""
final_symbol = ";\n" if final else ""
return "%s%s = %s%s" % (initial_symbol, symbol, symbol_object.get_precision().get_gappa_cst(symbol_object.get_value()), final_symbol)
elif isinstance(symbol_object, Variable):
initial_symbol = ""#(symbol_object.get_precision().get_c_name() + " ") if initial else ""
final_symbol = ";\n" if final else ""
return "%s%s%s" % (initial_symbol, symbol, final_symbol)
elif isinstance(symbol_object, ML_Table):
raise NotImplementedError
else:
raise NotImplementedError
def generate_initialization(self, symbol, symbol_object, initial = True, final = True):
return ""
def generate_debug_msg(self, optree, result):
debug_object = optree.get_debug()
precision = optree.get_precision()
display_format = debug_object.get_display_format(precision.get_c_display_format()) if isinstance(debug_object, ML_Debug) else precision.get_c_display_format()
debug_msg = "#ifdef ML_DEBUG\n"
debug_msg += """printf("%s: %s\\n", %s);\n""" % (optree.get_tag(), display_format, result.get())
debug_msg += "#endif\n"
return debug_msg
def generate_gappa_filename(self, basename=None):
""" generate a temporary file name to receive gappa code """
return gappa_utils.generate_gappa_filename(basename)
def get_eval_error(self, pre_optree, variable_copy_map=None,
goal_precision=ML_Binary32, gappa_filename=None):
""" helper to compute the evaluation error of <pre_optree> bounded by tagged-node in variable_map,
assuming variable_map[v] is the liverange of node v """
if gappa_filename is None:
gappa_filename = self.generate_gappa_filename()
if variable_copy_map is None:
variable_copy_map = {}
# registering initial bounds
bound_list = [op for op in variable_copy_map]
# copying pre-operation tree
optree = pre_optree.copy(variable_copy_map)
gappa_code = GappaCodeObject()
gappa_result_approx = self.generate_expr(gappa_code, optree, initial = False)
gappa_result_exact = self.generate_expr(gappa_code, optree, initial = False)
goal = gappa_result_approx.get_variable(gappa_code) - gappa_result_exact.get_variable(gappa_code)
goal.set_attributes(precision = goal_precision, tag = "goal")
self.add_goal(gappa_code, goal)
for v in bound_list:
self.add_hypothesis(gappa_code, variable_copy_map[v], variable_copy_map[v].get_interval())
self.clear_memoization_map()
return execute_gappa_script_extract(gappa_code.get(self), gappa_filename = gappa_filename)["goal"]
def get_eval_error_v2(self, opt_engine, pre_optree, variable_copy_map=None,
goal_precision=ML_Exact, gappa_filename=None,
relative_error=False):
""" helper to compute the evaluation error of <pre_optree> bounded by tagged-node in variable_map,
assuming variable_map[v] is the liverange of node v """
if gappa_filename is None:
gappa_filename = self.generate_gappa_filename()
if variable_copy_map is None:
variable_copy_map = {}
# registering initial bounds
bound_list = []
bound_unique_list = []
bound_targets = []
for op in variable_copy_map:
bound_list.append(op)
if not variable_copy_map[op] in bound_targets:
bound_unique_list.append(op)
bound_targets.append(variable_copy_map[op])
# copying pre-operation tree
optree = pre_optree.copy(variable_copy_map)
gappa_code = GappaCodeObject()
# quantization error variable map
var_error_copy_map = {}
for v in bound_list:
max_abs_error = v.get_max_abs_error()
if max_abs_error == None:
var_error_copy_map[v] = variable_copy_map[v]
if v in bound_unique_list:
self.add_hypothesis(gappa_code, variable_copy_map[v], variable_copy_map[v].get_interval())
else:
var_error_interval = Interval(-max_abs_error, max_abs_error)
var = variable_copy_map[v]
exact_var = Variable(var.get_tag() + "_", precision = var.get_precision(), interval = var.get_interval())
var_error_copy_map[v] = exact_var
if v in bound_unique_list:
self.add_hypothesis(gappa_code, exact_var, variable_copy_map[v].get_interval())
sub_var = var - exact_var
sub_var.set_precision(ML_Exact)
if v in bound_unique_list:
self.add_hypothesis(gappa_code, sub_var, var_error_interval)
pre_exact_optree = pre_optree.copy(var_error_copy_map)
exact_optree = opt_engine.exactify(pre_exact_optree.copy())
gappa_result_exact = self.generate_expr(gappa_code, exact_optree, initial = True)
#print "gappa_code: ", gappa_code.get(self)
gappa_result_approx = self.generate_expr(gappa_code, optree, initial = False)
#print "gappa_code: ", gappa_code.get(self)
# Gappa Result Approx variable
gra_var = gappa_result_approx.get_variable(gappa_code)
# Gappa Result Exact variable
gre_var = gappa_result_exact.get_variable(gappa_code)
goal_diff = gra_var - gre_var
goal_diff.set_attributes(precision = goal_precision, tag = "goal_diff")
if relative_error:
goal = goal_diff / gre_var
else:
goal = goal_diff
goal.set_attributes(precision = goal_precision, tag = "goal")
self.add_goal(gappa_code, goal)
self.clear_memoization_map()
try:
eval_error = execute_gappa_script_extract(gappa_code.get(self), gappa_filename = gappa_filename)["goal"]
return eval_error
except ValueError:
Log.report(Log.Error, "Unable to compute evaluation error with gappa")
def get_eval_error_v3(self, opt_engine, pre_optree, variable_copy_map=None,
goal_precision = ML_Exact, gappa_filename=None,
dichotomy=None, relative_error=False):
if variable_copy_map is None:
variable_copy_map = {}
if dichotomy is None:
dichotomy = []
# storing initial interval values
init_interval = {}
for op in variable_copy_map:
init_interval[op] = variable_copy_map[op].get_interval()
eval_error_list = []
case_id = 0
# performing dichotomised search
for case in dichotomy:
clean_copy_map = {}
for op in variable_copy_map:
clean_copy_map[op] = variable_copy_map[op]
if op in case:
# if op interval is set in case, transmist interval information to copy map
clean_copy_map[op].set_interval(case[op])
else:
# else making sure initial interval is set
clean_copy_map[op].set_interval(init_interval[op])
# building gappa filename (without full path)
# get_eval_error_v2 will make sure a full path is properly pre-pended
if gappa_filename is None:
sub_gappa_filename = "c{}_gappa_tmp.g".format(case_id)
else:
head, tail = os.path.split(gappa_filename)
sub_gappa_filename = os.path.join(head, "c{}_{}".format(case_id, tail))
# computing evaluation error in local conditions
eval_error = self.get_eval_error_v2(opt_engine, pre_optree, clean_copy_map, goal_precision, sub_gappa_filename, relative_error = relative_error)
eval_error_list.append(eval_error)
case_id += 1
return eval_error_list
def get_interval_code(self, pre_goal_list, bound_list, variable_copy_map=None, goal_precision=ML_Exact, update_handle=True, gappa_code=None, register_bound_hypothesis=True):
""" build a gappa proof to determine the liverange for each node in pre_goal_list.
The gappa proof is built assuming nodes in bound_list are roots of the operation graph.
variable_copy_map is used to copy the graph.
This method creates a new GappaCodeObject whose goal is a copy of pre_goal assuming
the mapping described in variable_copy_map and registering hypothesis
which correspond to variable_copy_map bounds """
variable_copy_map = variable_copy_map or {}
# registering initial bounds
# bound_list = [op for op in variable_copy_map]
gappa_code = gappa_code or GappaCodeObject()
# to avoid infinite loop is the old API of get_interval_code is used
# and a node (with __getitem__) is passed as pre_goal_list
assert isinstance(pre_goal_list, list)
# copying pre-operation tree
goal_list = [pre_goal.copy(variable_copy_map) for pre_goal in pre_goal_list]
for goal in goal_list:
goal.set_attributes(precision=goal_precision)
self.add_goal(gappa_code, goal)
# updating handle
if update_handle:
for v in variable_copy_map:
new_v = variable_copy_map[v]
v.get_handle().set_node(new_v)
if register_bound_hypothesis:
for v in bound_list:
v_interval = variable_copy_map[v].get_interval()
if v_interval is None:
Log.report(Log.Error, "node {} has not defined interval in gappa get_interval_code", v)
self.add_hypothesis(gappa_code, variable_copy_map[v], v_interval)
return gappa_code
def get_interval_code_no_copy(self, goal, goal_precision = ML_Exact, update_handle = True, bound_list = []):
# copying pre-operation tree
goal.set_attributes(precision = goal_precision, tag = "goal")
gappa_code = GappaCodeObject()
#gappa_result_approx = self.generate_expr(gappa_code, goal, initial = False, exact = False)
self.add_goal(gappa_code, goal)
for v in bound_list:
self.add_hypothesis(gappa_code, v, v.get_interval())
return gappa_code
| 46.305389
| 198
| 0.648821
|
5df03d66411e95709b0b422d747acbefa1a1e34b
| 443
|
py
|
Python
|
bandwidth/voice/models/conference_event_method_enum.py
|
roverdotcom/python-sdk
|
c6947fb3331b77f0064aeec2dcf0c4ff178de34c
|
[
"MIT"
] | 5
|
2020-11-04T14:29:37.000Z
|
2022-02-23T20:33:07.000Z
|
bandwidth/voice/models/conference_event_method_enum.py
|
roverdotcom/python-sdk
|
c6947fb3331b77f0064aeec2dcf0c4ff178de34c
|
[
"MIT"
] | 3
|
2021-07-23T18:48:48.000Z
|
2022-03-15T14:59:07.000Z
|
bandwidth/voice/models/conference_event_method_enum.py
|
roverdotcom/python-sdk
|
c6947fb3331b77f0064aeec2dcf0c4ff178de34c
|
[
"MIT"
] | 8
|
2020-04-14T09:22:53.000Z
|
2022-03-11T10:46:06.000Z
|
# -*- coding: utf-8 -*-
"""
bandwidth
This file was automatically generated by APIMATIC v3.0 (
https://www.apimatic.io ).
"""
class ConferenceEventMethodEnum(object):
"""Implementation of the 'ConferenceEventMethod' enum.
TODO: type enum description here.
Attributes:
POST: TODO: type description here.
GET: TODO: type description here.
"""
POST = 'POST'
GET = 'GET'
| 17.038462
| 59
| 0.602709
|
19cd4b9218162bb2df70de9917f6dbc7d09ba5f7
| 9,748
|
py
|
Python
|
endesive/xades/bes.py
|
zengoma/endesive
|
ad3aa54e67aa80af3d01261168cb64796818c9c9
|
[
"MIT"
] | null | null | null |
endesive/xades/bes.py
|
zengoma/endesive
|
ad3aa54e67aa80af3d01261168cb64796818c9c9
|
[
"MIT"
] | null | null | null |
endesive/xades/bes.py
|
zengoma/endesive
|
ad3aa54e67aa80af3d01261168cb64796818c9c9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import base64
import datetime
import hashlib
import io
import uuid
from lxml import etree, builder
DS = builder.ElementMaker(
namespace="http://www.w3.org/2000/09/xmldsig#",
nsmap={
"ds": "http://www.w3.org/2000/09/xmldsig#",
},
)
CanonicalizationMethod = DS.CanonicalizationMethod
DigestMethod = DS.DigestMethod
DigestValue = DS.DigestValue
KeyInfo = DS.KeyInfo
Object = DS.Object
Reference = DS.Reference
Signature = DS.Signature
SignatureMethod = DS.SignatureMethod
SignatureValue = DS.SignatureValue
SignedInfo = DS.SignedInfo
Transform = DS.Transform
Transforms = DS.Transforms
X509Certificate = DS.X509Certificate
X509Data = DS.X509Data
X509IssuerName = DS.X509IssuerName
X509SerialNumber = DS.X509SerialNumber
XADES = builder.ElementMaker(
namespace="http://uri.etsi.org/01903/v1.3.2#",
nsmap={
"xades": "http://uri.etsi.org/01903/v1.3.2#",
"ds": "http://www.w3.org/2000/09/xmldsig#",
},
)
Cert = XADES.Cert
CertDigest = XADES.CertDigest
DataObjectFormat = XADES.DataObjectFormat
Description = XADES.Description
DocumentationReference = XADES.DocumentationReference
DocumentationReferences = XADES.DocumentationReferences
Identifier = XADES.Identifier
IssuerSerial = XADES.IssuerSerial
MimeType = XADES.MimeType
ObjectIdentifier = XADES.ObjectIdentifier
QualifyingProperties = XADES.QualifyingProperties
SignedDataObjectProperties = XADES.SignedDataObjectProperties
SignedProperties = XADES.SignedProperties
SignedSignatureProperties = XADES.SignedSignatureProperties
SigningCertificate = XADES.SigningCertificate
SigningTime = XADES.SigningTime
UnsignedProperties = XADES.UnsignedProperties
def ensure_str(x, encoding="utf-8", none_ok=False):
if none_ok is True and x is None:
return x
if not isinstance(x, str):
x = x.decode(encoding)
return x
class BES:
def __init__(self):
self.guid = str(uuid.uuid1())
self.time = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
def sha1(self, data):
h = hashlib.sha1(data).digest()
return ensure_str(base64.b64encode(h))
def _c14n(self, nodes, algorithm, inclusive_ns_prefixes=None):
exclusive, with_comments = False, False
if algorithm.startswith("http://www.w3.org/2001/10/xml-exc-c14n#"):
exclusive = True
if algorithm.endswith("#WithComments"):
with_comments = True
if not isinstance(nodes, list):
nodes = [nodes]
c14n = b""
for node in nodes:
c14n += etree.tostring(node, method="c14n", exclusive=exclusive, with_comments=with_comments,
inclusive_ns_prefixes=inclusive_ns_prefixes) # TODO: optimize if needed
if exclusive is False:
# TODO: there must be a nicer way to do this. See also:
# http://www.w3.org/TR/xml-c14n, "namespace axis"
# http://www.w3.org/TR/xml-c14n2/#sec-Namespace-Processing
c14n = c14n.replace(b' xmlns=""', b'')
return c14n
def build(self, fname, data, smime, cert, certcontent, signproc, base64encode=True, withcomments=False):
swithcomments = ""
if withcomments:
swithcomments = "#WithComments"
if base64encode:
data = ensure_str(base64.b64encode(data))
signedobj = Object(
data,
Encoding="http://www.w3.org/2000/09/xmldsig#base64",
MimeType=smime,
Id="Object1_" + self.guid,
)
elif 0:
signedobj = Object(
data,
MimeType='text/xml',
Id="Object1_" + self.guid,
)
else:
signedobj = Object(
MimeType='text/xml',
Id="Object1_" + self.guid,
)
tree = etree.parse(io.BytesIO(data))
signedobj.append(tree.getroot())
certdigest = self.sha1(certcontent)
b64 = b''.join(base64.encodebytes(certcontent).split())
certcontent = []
for i in range(0, len(b64), 64):
certcontent.append(b64[i:i + 64])
certcontent = b'\n'.join(certcontent)
certserialnumber = '%d' % cert.serial_number
certissuer = []
for k, v in (
('CN', 'common_name'),
('O', 'organization_name'),
('C', 'country_name'),
('serialNumber', 'serial_number'),
):
try:
v = cert.issuer.native[v]
certissuer.append('%s=%s' % (k, v))
except:
pass
certissuer = ','.join(certissuer)
signedprop = SignedProperties(
SignedSignatureProperties(
SigningTime(
self.time
),
SigningCertificate(
Cert(
CertDigest(
DigestMethod(
Algorithm="http://www.w3.org/2000/09/xmldsig#sha1",
),
DigestValue(
certdigest,
),
),
IssuerSerial(
X509IssuerName(
certissuer,
),
X509SerialNumber(
certserialnumber,
),
),
),
),
Id="SignedSignatureProperties_" + self.guid + "_54",
),
SignedDataObjectProperties(
DataObjectFormat(
Description("""\
MIME-Version: 1.0
Content-Type: %s
Content-Transfer-Encoding: binary
Content-Disposition: filename="%s"\
""" % (smime, fname),
),
ObjectIdentifier(
Identifier(
"http://www.certum.pl/OIDAsURI/signedFile/1.2.616.1.113527.3.1.1.3.1",
Qualifier="OIDAsURI",
),
Description(
u"Opis formatu dokumentu oraz jego pełna nazwa",
),
DocumentationReferences(
DocumentationReference(
"http://www.certum.pl/OIDAsURI/signedFile.pdf",
),
),
),
MimeType(
smime,
),
ObjectReference="#Reference1_" + self.guid + "_79",
),
Id="SignedDataObjectProperties_" + self.guid + "_15",
),
Id="SignedProperties_" + self.guid + "_10",
)
canonicalizedxml = self._c14n(signedobj, '')
digestvalue1 = self.sha1(canonicalizedxml)
canonicalizedxml = self._c14n(signedprop, '')
digestvalue2 = self.sha1(canonicalizedxml)
signedinfo = SignedInfo(
CanonicalizationMethod(
Algorithm="http://www.w3.org/TR/2001/REC-xml-c14n-20010315",
),
SignatureMethod(
Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1",
),
Reference(
Transforms(
Transform(
Algorithm="http://www.w3.org/TR/2001/REC-xml-c14n-20010315" + swithcomments,
)
),
DigestMethod(
Algorithm="http://www.w3.org/2000/09/xmldsig#sha1",
),
DigestValue(
digestvalue1,
),
Id="Reference1_" + self.guid + "_79",
URI="#Object1_" + self.guid,
),
Reference(
DigestMethod(
Algorithm="http://www.w3.org/2000/09/xmldsig#sha1",
),
DigestValue(
digestvalue2,
),
Id="SignedProperties-Reference_" + self.guid + "_76",
Type="http://uri.etsi.org/01903#SignedProperties",
URI="#SignedProperties_" + self.guid + "_10",
),
Id="SignedInfo_" + self.guid + "_1f",
)
canonicalizedxml = self._c14n(signedinfo, '')
signature = signproc(canonicalizedxml, 'sha1')
actualdigestencoded = ensure_str(base64.b64encode(signature))
digestvalue3 = []
for i in range(0, len(actualdigestencoded), 64):
digestvalue3.append(actualdigestencoded[i:i + 64])
digestvalue3 = '\n'.join(digestvalue3)
DOC = Signature(
signedinfo,
SignatureValue(
digestvalue3,
Id="SignatureValue_" + self.guid + "_0c",
),
KeyInfo(
X509Data(
X509Certificate(
certcontent.decode()
),
),
Id="KeyInfo_" + self.guid + "_7a",
),
Object(
QualifyingProperties(
signedprop,
UnsignedProperties(
Id="UnsignedProperties_" + self.guid + "_0b",
),
Id="QualifyingProperties_" + self.guid + "_1d",
Target="#Signature_" + self.guid + "_47",
),
),
signedobj,
Id="Signature_" + self.guid + "_47",
)
return DOC
| 34.690391
| 108
| 0.508822
|
0b243117704319ab97aed0ce7d4699d04bea7bd2
| 4,832
|
py
|
Python
|
aether/sdk/multitenancy/views.py
|
eHealthAfrica/aether-django-sdk-library
|
fc371af89bfed155d465049320f32bf43860d001
|
[
"Apache-2.0"
] | 1
|
2020-05-04T21:05:11.000Z
|
2020-05-04T21:05:11.000Z
|
aether/sdk/multitenancy/views.py
|
eHealthAfrica/aether-django-sdk-library
|
fc371af89bfed155d465049320f32bf43860d001
|
[
"Apache-2.0"
] | 3
|
2019-09-30T15:45:43.000Z
|
2020-04-29T08:12:37.000Z
|
aether/sdk/multitenancy/views.py
|
eHealthAfrica/aether-django-sdk-library
|
fc371af89bfed155d465049320f32bf43860d001
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.utils.translation import gettext_lazy as _
from rest_framework.decorators import action, api_view, permission_classes, renderer_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAdminUser
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from aether.sdk.drf.views import CacheViewSetMixin
from aether.sdk.multitenancy.utils import (
filter_by_realm,
filter_users_by_realm,
is_accessible_by_realm,
)
class MtViewSetMixin(CacheViewSetMixin):
'''
Defines ``get_queryset`` method to filter by realm.
Expects ``mt_field`` property.
Adds two new methods:
- ``get_object_or_404(pk)`` raises NOT_FOUND error if the instance
does not exists or is not accessible by current realm.
- ``get_object_or_403(pk)`` raises FORBIDDEN error if the instance
exists and is not accessible by current realm.
Adds a detail endpoint ``/is-accessible`` only permitted with HEAD method,
returns the following statuses:
- 403 FORBIDDEN if the instance is not accessible by current realm
- 404 NOT_FOUND if the instance does not exist
- 204 NO_CONTENT otherwise
'''
mt_field = None
def get_queryset(self):
'''
Includes filter by realm in each query
'''
qs = super(MtViewSetMixin, self).get_queryset()
return filter_by_realm(self.request, qs, self.mt_field)
def get_object_or_404(self, pk):
'''
Custom method that raises NOT_FOUND error
if the instance does not exists or is not accessible by current realm
otherwise return the instance
'''
return get_object_or_404(self.get_queryset(), pk=pk)
def get_object_or_403(self, pk):
'''
Custom method that raises FORBIDDEN error
if the instance exists and is not accessible by current realm
otherwise returns the instance or ``None`` if it does not exist
'''
# without filtering by realm
qs = super(MtViewSetMixin, self).get_queryset()
if not qs.filter(pk=pk).exists():
return None
obj = qs.get(pk=pk)
if not is_accessible_by_realm(self.request, obj):
raise PermissionDenied(_('Not accessible by this realm'))
return obj
@action(detail=True, methods=['head'], url_path='is-accessible')
def is_accessible(self, request, pk=None, *args, **kwargs):
'''
Returns the following statuses:
- 404 NOT_FOUND if the instance does not exist
- 403 FORBIDDEN if the instance is not accessible by current realm
- 204 NO_CONTENT otherwise
'''
self.get_object_or_403(pk)
self.get_object_or_404(pk)
return Response(status=204)
class MtUserViewSetMixin(CacheViewSetMixin):
'''
Defines ``get_queryset`` method to filter by realm authorization group.
'''
def get_queryset(self):
'''
Includes filter by realm authorization group in each query
'''
qs = super(MtUserViewSetMixin, self).get_queryset()
return filter_users_by_realm(self.request, qs)
@api_view(['GET'])
@renderer_classes([JSONRenderer])
@permission_classes([IsAdminUser])
def get_realms(*args, **kwargs):
'''
Get the list of current realms.
If MULTITENANCY is not enabled then
returns the fake realm `settings.NO_MULTITENANCY_REALM`
If MULTITENANCY is enabled then
the default realm is always included in the list
'''
if settings.MULTITENANCY:
from aether.sdk.multitenancy.models import MtInstance
realms = set(
MtInstance.objects.values_list('realm', flat=True).order_by('realm').distinct()
)
# include always the default realm
realms.add(settings.DEFAULT_REALM)
else:
realms = [settings.NO_MULTITENANCY_REALM]
return Response({'realms': list(realms)})
| 32.870748
| 92
| 0.692881
|
e7d20f9747b1361a28a323d37966a67af0c60335
| 1,955
|
py
|
Python
|
setup.py
|
navenduagarwal/pypi-testing
|
a2ef50bbbdca8f390397a125a8184b639393fee4
|
[
"MIT"
] | null | null | null |
setup.py
|
navenduagarwal/pypi-testing
|
a2ef50bbbdca8f390397a125a8184b639393fee4
|
[
"MIT"
] | null | null | null |
setup.py
|
navenduagarwal/pypi-testing
|
a2ef50bbbdca8f390397a125a8184b639393fee4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: setup.py
Description: Setup script to build and distribute sparshik-kyc module.
"""
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "sparshik-kyc"
PACKAGE_PPRINT_NAME = "Sparshik KYC"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py')
if os.path.exists(os.path.join(package_folder_path, 'version.py'))
else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
packages=find_packages(),
description="Python SDK for Sparshik KYC API",
long_description=readme + '\n\n' + changelog,
long_description_content_type="text/markdown",
url="https://github.com/navenduagarwal/pypi-testing",
license='MIT License',
author='Sparshik Technologies',
author_email='support@sparshik.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
"Operating System :: OS Independent",
],
include_package_data=True,
install_requires=['requests'],
)
| 31.532258
| 70
| 0.706394
|
eb46bfc12d2e296f085655ea1e43e4b9b993d63e
| 2,878
|
py
|
Python
|
coconut/convenience.py
|
CS121Fresh/compiler
|
e50e2fd8b167bf6f00ab237b91009d785c72f0cd
|
[
"MIT"
] | null | null | null |
coconut/convenience.py
|
CS121Fresh/compiler
|
e50e2fd8b167bf6f00ab237b91009d785c72f0cd
|
[
"MIT"
] | null | null | null |
coconut/convenience.py
|
CS121Fresh/compiler
|
e50e2fd8b167bf6f00ab237b91009d785c72f0cd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------------------------------------------------
# INFO:
#-----------------------------------------------------------------------------------------------------------------------
"""
Author: Evan Hubinger
License: Apache 2.0
Description: Convenience functions for using Coconut as a module.
"""
#-----------------------------------------------------------------------------------------------------------------------
# IMPORTS:
#-----------------------------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
from coconut.root import * # NOQA
from coconut.exceptions import CoconutException
from coconut.command import Command
from coconut.constants import version_tag, version_long
#-----------------------------------------------------------------------------------------------------------------------
# COMMAND:
#-----------------------------------------------------------------------------------------------------------------------
CLI = Command()
def cmd(args, interact=False):
"""Process command-line arguments."""
if isinstance(args, (str, bytes)):
args = args.split()
return CLI.cmd(args=args, interact=interact)
VERSIONS = {
"num": VERSION,
"name": VERSION_NAME,
"spec": VERSION_STR,
"tag": version_tag,
"-v": version_long,
}
def version(which="num"):
"""Get the Coconut version."""
if which in VERSIONS:
return VERSIONS[which]
else:
raise CoconutException(
"invalid version type " + ascii(which),
extra="valid versions are " + ", ".join(VERSIONS),
)
#-----------------------------------------------------------------------------------------------------------------------
# COMPILER:
#-----------------------------------------------------------------------------------------------------------------------
setup = CLI.setup
PARSERS = {
"sys": lambda comp: comp.parse_sys,
"exec": lambda comp: comp.parse_exec,
"file": lambda comp: comp.parse_file,
"package": lambda comp: comp.parse_package,
"block": lambda comp: comp.parse_block,
"single": lambda comp: comp.parse_single,
"eval": lambda comp: comp.parse_eval,
"debug": lambda comp: comp.parse_debug,
}
def parse(code="", mode="sys"):
"""Compile Coconut code."""
if CLI.comp is None:
setup()
if mode in PARSERS:
return PARSERS[mode](CLI.comp)(code)
else:
raise CoconutException(
"invalid parse mode " + ascii(mode),
extra="valid modes are " + ", ".join(PARSERS),
)
| 31.977778
| 121
| 0.404448
|
01254614526268857d3c9092bf56842ea107c281
| 192
|
py
|
Python
|
tests/Unit/Evolution/Systems/ScalarAdvection/TestFunctions.py
|
erfz/spectre
|
dc772598a8197a4f2c4a729ee30dd4398f4cd591
|
[
"MIT"
] | 1
|
2018-10-01T06:07:16.000Z
|
2018-10-01T06:07:16.000Z
|
tests/Unit/Evolution/Systems/ScalarAdvection/TestFunctions.py
|
erfz/spectre
|
dc772598a8197a4f2c4a729ee30dd4398f4cd591
|
[
"MIT"
] | 4
|
2018-06-04T20:26:40.000Z
|
2018-07-27T14:54:55.000Z
|
tests/Unit/Evolution/Systems/ScalarAdvection/TestFunctions.py
|
erfz/spectre
|
dc772598a8197a4f2c4a729ee30dd4398f4cd591
|
[
"MIT"
] | null | null | null |
# Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
# Test function for computing flux
def compute_flux(u, velocity_field):
return u * velocity_field
| 19.2
| 36
| 0.760417
|
f969a9ce5f4f50c629b6953672119c8ab5936bf3
| 11,787
|
py
|
Python
|
aionetworking/actions/file_storage.py
|
primal100/aionetworking
|
a29cbb022cbae1a4ad1c3d44327e9d0b0c930227
|
[
"MIT"
] | null | null | null |
aionetworking/actions/file_storage.py
|
primal100/aionetworking
|
a29cbb022cbae1a4ad1c3d44327e9d0b0c930227
|
[
"MIT"
] | 1
|
2018-12-23T00:50:33.000Z
|
2018-12-23T00:50:33.000Z
|
aionetworking/actions/file_storage.py
|
primal100/aionetworking
|
a29cbb022cbae1a4ad1c3d44327e9d0b0c930227
|
[
"MIT"
] | null | null | null |
from abc import abstractmethod
import asyncio
from dataclasses import dataclass, field
import logging
from pathlib import Path
from .base import BaseAction
from aionetworking.logging.loggers import get_logger_receiver
from aionetworking import settings
from aionetworking.compatibility import create_task, set_task_name, Protocol
from aionetworking.logging.utils_logging import p
from aionetworking.futures.value_waiters import StatusWaiter
from aionetworking.utils import makedirs
from aionetworking.types.logging import LoggerType
from typing import Any, ClassVar, AnyStr
from aionetworking.types.formats import MessageObjectType
@dataclass
class ManagedFile:
path: Path
mode: str = 'ab'
buffering: int = -1
timeout: int = 10
retries: int = 3
retry_interval: int = 2
logger: LoggerType = field(default_factory=get_logger_receiver)
_status: StatusWaiter = field(default_factory=StatusWaiter, init=False)
previous: 'ManagedFile' = field(default=None)
_queue: asyncio.Queue = field(default_factory=asyncio.Queue, init=False, repr=False, hash=False, compare=False)
_exception: OSError = field(default=None, init=False)
_open_files: ClassVar = {}
@classmethod
def open(cls, path, *args, **kwargs) -> 'ManagedFile':
try:
f = cls._open_files[path]
if not f.is_closing():
return f
kwargs['previous'] = f
except KeyError:
pass
f = cls(path, *args, **kwargs)
cls._open_files[path] = f
return f
@classmethod
async def close_all(cls, base_path: Path = None) -> None:
if base_path:
files = [f for f in cls._open_files.values() if f.is_in(base_path)]
else:
files = [f for f in cls._open_files.values()]
await asyncio.gather(*[f.close() for f in files])
@classmethod
def num_files(cls):
return len(cls._open_files)
def __post_init__(self):
self.path.parent.mkdir(parents=True, exist_ok=True)
self._task = create_task(self.manage())
set_task_name(self._task, f"ManagedFile:{self.path.name}")
def is_in(self, path) -> bool:
try:
self.path.relative_to(path)
return True
except ValueError:
return False
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb): ...
def is_closing(self):
return self._status.has_started() and self._status.is_stopping_or_stopped()
async def wait_closed(self):
return await self._status.wait_stopped()
async def wait_has_started(self):
return await self._status.wait_has_started()
def _cleanup(self) -> None:
self._status.set_stopping()
if self._open_files[self.path] == self:
del self._open_files[self.path]
self.logger.debug('Cleanup completed for %s', self.path)
self._status.set_stopped()
async def write(self, data: AnyStr) -> None:
fut = asyncio.Future()
if self._exception:
fut.set_exception(self._exception)
else:
self._queue.put_nowait((data, fut))
if self.logger.isEnabledFor(logging.DEBUG):
qsize = self._queue.qsize()
self.logger.debug('Message added to queue with future %s', id(fut))
self.logger.debug('There %s now %s in the write queue %s for %s',
p.plural_verb('is', qsize), p.no('item', qsize), id(self._queue), self.path)
await fut
async def close(self) -> None:
if not self.is_closing():
await self._status.wait_started()
self._status.set_stopping()
self.logger.debug('Closing file %s', self.path)
if not self._task.done():
await self.wait_has_started()
await self.wait_writes_done()
self._task.cancel()
try:
await self._task
except asyncio.CancelledError:
pass
else:
self.logger.debug('File %s already closed', self.path)
self.logger.debug('Closed file %s', self.path)
async def wait_writes_done(self) -> None:
self.logger.debug('Waiting for writes to complete for %s', self.path)
done, pending = await asyncio.wait([self._queue.join(), self._task], return_when=asyncio.FIRST_COMPLETED)
for d in done:
if d.exception(): #3.8 assignment expressions
self.logger.error(d.exception())
await d
self.logger.debug('Writes completed for %s', self.path)
def _task_done(self, num: int) -> None:
for _ in range(0, num):
self._queue.task_done()
self.logger.info('Task done set for %s on file %s', p.no('item', num), self.path)
async def manage_wrapper(self):
if self.previous:
await self.previous.wait_closed()
for i in range(0, self.retries):
try:
await self.manage()
return
except OSError as e:
if i == 3:
self._exception = e
while not self._queue.empty():
try:
item, fut = self._queue.get_nowait()
fut.set_exception(e)
self._task_done(1)
except asyncio.QueueEmpty:
self.logger.info('QueueEmpty error was unexpectedly caught for file %s', self.path)
await asyncio.sleep(self.retry_interval)
async def _write_items_from_queue(self, f):
self.logger.info('Retrieving item from queue for file %s. Timeout: %s', self.path,
p.no('second', self.timeout))
try:
data, fut = self._queue.get_nowait()
except asyncio.QueueEmpty:
data, fut = await asyncio.wait_for(self._queue.get(), timeout=self.timeout)
futs = [fut]
try:
while not self._queue.empty():
try:
item, fut = self._queue.get_nowait()
data += item
futs.append(fut)
except asyncio.QueueEmpty:
self.logger.warning('QueueEmpty error was unexpectedly caught for file %s', self.path)
self.logger.info('Retrieved %s from queue. Writing to file %s.', p.no('item',
len(futs)), self.path)
await f.write(data)
self.logger.info('%s written to file %s', p.no('byte', len(data)), self.path)
for fut in futs:
fut.set_result(True)
self.logger.debug('Result set on future %s', id(fut))
except Exception as e:
for fut in futs:
fut.set_exception(e)
finally:
asyncio.get_event_loop().call_soon(self._task_done, len(futs))
async def manage(self) -> None:
self._status.set_starting()
await makedirs(self.path.parent, exist_ok=True)
self.logger.info('Opening file %s', self.path)
async with settings.FILE_OPENER(self.path, mode=self.mode, buffering=self.buffering) as f:
try:
self.logger.debug('File %s opened', self.path)
self._status.set_started()
while True:
try:
await self._write_items_from_queue(f)
except asyncio.TimeoutError:
qsize = self._queue.qsize()
if qsize:
self.logger.warning(
'Get item for file %s timed out out even though there %s %s in the queue id %s',
self.path, p.plural_verb('is', qsize), p.no('item', qsize), id(self._queue))
await self._write_items_from_queue(f)
else:
self.logger.info('File %s closing due to timeout on new items to write', self.path)
break
except asyncio.CancelledError as e:
self.logger.info('File %s closing due to task being cancelled', self.path)
raise e
finally:
self._cleanup()
qsize = self._queue.qsize()
if qsize:
self.logger.warning(
'There %s %s in queue id %s for path %s even after cleanup',
p.plural_verb('is', qsize), p.no('item', qsize), id(self._queue), self.path)
await self._write_items_from_queue(f)
self.logger.info('File %s closed', self.path)
def default_data_dir():
return settings.DATA_DIR
@dataclass
class BaseFileStorage(BaseAction, Protocol):
base_path: Path = field(default_factory=default_data_dir, metadata={'pickle': True})
path: str = ''
attr: str = 'encoded'
mode: str = 'wb'
separator: AnyStr = ''
def __post_init__(self):
if 'b' in self.mode:
if isinstance(self.separator, str):
self.separator = self.separator.encode()
self._status.is_started()
settings.ACTION_CONFIG = {
'base_path': self.base_path,
'path': self.path,
'attr': self.attr,
'mode': self.mode,
'separator': self.separator
}
async def start(self, logger: LoggerType = None) -> None:
await super().start(logger)
if not self.quiet:
print(f'Storing output files in {self.base_path}')
def _get_full_path(self, msg: MessageObjectType) -> Path:
return self.base_path / self._get_path(msg)
def _get_path(self, msg: MessageObjectType) -> Path:
return Path(self.path.format(msg=msg))
def _get_data(self, msg: MessageObjectType) -> AnyStr:
data = getattr(msg, self.attr)
if self.separator:
data += self.separator
return data
@abstractmethod
async def _write_to_file(self, path: Path, data: AnyStr): ...
async def write_one(self, msg: MessageObjectType) -> Path:
path = self._get_full_path(msg)
msg.logger.debug('Writing to file %s', path)
data = self._get_data(msg)
await self._write_to_file(path, data)
msg.logger.debug('Data written to file %s', path)
return path
async def do_one(self, msg: MessageObjectType) -> Any:
self._status.set_started()
if getattr(msg, 'store', True):
await self.write_one(msg)
return getattr(msg, 'response', None)
@dataclass
class FileStorage(BaseFileStorage):
name = 'File Storage'
async def _write_to_file(self, path: Path, data: AnyStr) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
async with settings.FILE_OPENER(path, self.mode) as f:
await f.write(data)
@dataclass
class BufferedFileStorage(BaseFileStorage):
name = 'Buffered File Storage'
mode: str = 'ab'
_qsize: int = 0
close_file_after_inactivity: int = 10
buffering: int = -1
async def _write_to_file(self, path: Path, data: AnyStr) -> None:
async with ManagedFile.open(path, mode=self.mode, buffering=self.buffering,
timeout=self.close_file_after_inactivity, logger=self.logger) as f:
await f.write(data)
async def close(self) -> None:
self._status.set_stopping()
await ManagedFile.close_all(base_path=self.base_path)
self._status.set_stopped()
| 38.269481
| 115
| 0.580809
|
38d872f3188aa48d880de710b0c6e5dd553d4574
| 979
|
py
|
Python
|
blueapps/opentelemetry/instrument_app/celery.py
|
qqqqqie/bk-sops
|
f2e734c2cdac76f89d2e4f0fd7de36168e452141
|
[
"Apache-2.0"
] | 881
|
2019-03-25T02:45:42.000Z
|
2022-03-30T09:10:49.000Z
|
blueapps/opentelemetry/instrument_app/celery.py
|
Tencent/bk-sops
|
f2938f6162a39908eac0bde3e5230a3c62ac09f6
|
[
"Apache-2.0"
] | 3,303
|
2019-03-25T04:18:03.000Z
|
2022-03-31T11:52:03.000Z
|
blueapps/opentelemetry/instrument_app/celery.py
|
qqqqqie/bk-sops
|
f2e734c2cdac76f89d2e4f0fd7de36168e452141
|
[
"Apache-2.0"
] | 395
|
2019-03-25T02:53:36.000Z
|
2022-03-31T08:37:28.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from celery.signals import worker_process_init, worker_init
from blueapps.opentelemetry.setup import setup_by_settings
@worker_process_init.connect(weak=False)
def worker_process_init_otel_trace_setup(*args, **kwargs):
setup_by_settings()
| 44.5
| 115
| 0.800817
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.