content
stringlengths 5
1.05M
|
|---|
from conans import ConanFile, CMake, tools
class CppAgentConan(ConanFile):
name = "mtconnect_cppagent"
version = "2.0"
generators = "cmake"
url = "https://github.com/mtconnect/cppagent.git"
license = "Apache License 2.0"
settings = "os", "compiler", "arch", "build_type", "arch_build"
options = { "run_tests": [True, False], "build_tests": [True, False], "without_python": [True, False],
"without_ruby": [True, False], "without_ipv6": [True, False], "with_ruby": [True, False],
"with_python": [True, False] }
description = "MTConnect reference C++ agent copyright Association for Manufacturing Technology"
requires = ["boost/1.77.0", "libxml2/2.9.10", "date/2.4.1", "nlohmann_json/3.9.1",
"mqtt_cpp/11.0.0", "openssl/1.1.1k"]
build_policy = "missing"
default_options = {
"run_tests": True,
"build_tests": True,
"without_python": True,
"without_ruby": False,
"without_ipv6": False,
"with_python": False,
"with_ruby": False,
"boost:shared": False,
"boost:without_python": True,
"boost:without_test": True,
"boost:extra_b2_flags": "visibility=hidden",
"libxml2:shared": False,
"libxml2:include_utils": False,
"libxml2:http": False,
"libxml2:ftp": False,
"libxml2:iconv": False,
"libxml2:zlib": False,
"gtest:shared": False,
"date:use_system_tz_db": True
}
def configure(self):
if not self.options.without_python:
self.options["boost"].without_python = False
self.windows_xp = self.settings.os == 'Windows' and self.settings.compiler.toolset and \
self.settings.compiler.toolset in ('v141_xp', 'v140_xp')
if self.settings.os == 'Windows':
if self.settings.build_type and self.settings.build_type == 'Debug':
self.settings.compiler.runtime = 'MTd'
else:
self.settings.compiler.runtime = 'MT'
self.settings.compiler.version = '16'
if "libcxx" in self.settings.compiler.fields and self.settings.compiler.libcxx == "libstdc++":
raise Exception("This package is only compatible with libstdc++11, add -s compiler.libcxx=libstdc++11")
self.settings.compiler.cppstd = 17
if self.windows_xp:
self.options.build_tests = False
if not self.options.build_tests:
self.options.run_tests = False
if not self.options.without_ruby:
self.options.with_ruby = True
if not self.options.without_python:
self.options.with_python = True
# if self.windows_xp:
# self.options["boost"].extra_b2_flags = self.options["boost"].extra_b2_flags + "define=BOOST_USE_WINAPI_VERSION=0x0501 "
# elif self.settings.os == 'Windows':
# self.options["boost"].extra_b2_flags = self.options["boost"].extra_b2_flags + "define=BOOST_USE_WINAPI_VERSION=0x0600 "
def requirements(self):
if not self.windows_xp:
self.requires("gtest/1.10.0")
if self.options.with_ruby:
self.requires("mruby/3.1.0")
def build(self):
cmake = CMake(self)
cmake.verbose = True
if not self.options.build_tests:
cmake.definitions['AGENT_ENABLE_UNITTESTS'] = 'OFF'
if self.options.without_ipv6:
cmake.definitions['AGENT_WITHOUT_IPV6'] = 'ON'
if self.options.with_python:
cmake.definitions['WITH_PYTHON'] = 'ON'
else:
cmake.definitions['WITH_PYTHON'] = 'OFF'
if self.options.with_ruby:
cmake.definitions['WITH_RUBY'] = 'ON'
else:
cmake.definitions['WITH_RUBY'] = 'OFF'
if self.windows_xp:
cmake.definitions['WINVER'] = '0x0501'
cmake.configure()
cmake.build()
if self.options.run_tests:
cmake.test()
def imports(self):
self.copy("*.dll", "bin", "bin")
self.copy("*.so*", "bin", "lib")
self.copy("*.dylib", "bin", "lib")
|
from protox import Message, Int32
from protox.message import define_fields
def test_define_fields():
class User(Message):
id: int
define_fields(
User,
id=Int32(number=1),
)
user_id = 123
user = User(id=user_id)
assert isinstance(user.id, int)
assert user.id == user_id
|
import sys
char_ = None
def readchar_():
global char_
if char_ == None:
char_ = sys.stdin.read(1)
return char_
def skipchar():
global char_
char_ = None
return
def stdinsep():
while True:
c = readchar_()
if c == '\n' or c == '\t' or c == '\r' or c == ' ':
skipchar()
else:
return
def readint():
c = readchar_()
if c == '-':
sign = -1
skipchar()
else:
sign = 1
out = 0
while True:
c = readchar_()
if c <= '9' and c >= '0' :
out = out * 10 + int(c)
skipchar()
else:
return out * sign
def mktoto(v1):
t = {"foo":v1, "bar":0, "blah":0}
return t
def result(t, len):
out0 = 0
for j in range(0, len):
t[j]["blah"] += 1
out0 = out0 + t[j]["foo"] + t[j]["blah"] * t[j]["bar"] + t[j]["bar"] * t[j]["foo"]
return out0
t = [None] * 4
for i in range(0, 4):
t[i] = mktoto(i)
t[0]["bar"] = readint()
stdinsep()
t[1]["blah"] = readint()
titi = result(t, 4)
print("%d%d" % (titi, t[2]["blah"]), end='')
|
from django.db import models
# -----------------------------------------------------------------------------
# Contacts Address
# -----------------------------------------------------------------------------
class Contacts(models.Model):
first_name = models.CharField(max_length=40)
last_name = models.CharField(max_length=40)
nick_name = models.CharField(max_length=40, blank=True)
title = models.CharField(max_length=10, blank=True)
company = models.CharField(max_length=40, blank=True)
#job_title = models.CharField(max_length=40,default='x')
email = models.EmailField(blank=True)
#notes = models.TextField(null=True, blank=True, default='none')
# created = models.DateTimeField(auto_now_add=True)
# updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.first_name
|
"""Utility functions for Python."""
from __future__ import annotations
import ctypes
import io
import os
import sys
import tempfile
from contextlib import contextmanager
from typing import Any, Optional
from typing import overload, Sequence
import os
import re
import numpy as np
import pyproj
#import pyproj.transformer
#import pyproj.crs
libc = ctypes.CDLL(None)
c_stdout = ctypes.c_void_p.in_dll(libc, 'stdout')
@contextmanager
def no_stdout(stream=None, disable=False):
"""
Redirect the stdout to a stream file.
Source: https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
param: stream: a BytesIO object to write to.
param: disable: whether to temporarily disable the feature.
"""
if disable:
yield
return
if stream is None:
stream = io.BytesIO()
# The original fd stdout points to. Usually 1 on POSIX systems.
original_stdout_fd = sys.stdout.fileno()
def _redirect_stdout(to_fd):
"""Redirect stdout to the given file descriptor."""
# Flush the C-level buffer stdout
libc.fflush(c_stdout)
# Flush and close sys.stdout - also closes the file descriptor (fd)
sys.stdout.close()
# Make original_stdout_fd point to the same file as to_fd
os.dup2(to_fd, original_stdout_fd)
# Create a new sys.stdout that points to the redirected fd
sys.stdout = io.TextIOWrapper(os.fdopen(original_stdout_fd, 'wb'))
# Save a copy of the original stdout fd in saved_stdout_fd
saved_stdout_fd = os.dup(original_stdout_fd)
try:
# Create a temporary file and redirect stdout to it
tfile = tempfile.TemporaryFile(mode='w+b')
_redirect_stdout(tfile.fileno())
# Yield to caller, then redirect stdout back to the saved fd
yield
_redirect_stdout(saved_stdout_fd)
# Copy contents of temporary file to the given stream
tfile.flush()
tfile.seek(0, io.SEEK_SET)
stream.write(tfile.read())
finally:
tfile.close()
os.close(saved_stdout_fd)
_LV03_TO_LV95 = pyproj.transformer.Transformer.from_crs(pyproj.crs.CRS.from_epsg(21781), pyproj.crs.CRS.from_epsg(2056))
def list_files(directory: str, pattern: str = ".*") -> list[str]:
"""
List all files in a directory and return their absolute paths.
:param directory: The directory to list files within.
:param pattern: A regex pattern to match (for example to filter certain extensions).
"""
files: list[str] = []
for filename in os.listdir(directory):
if re.match(pattern, filename) is None:
continue
filepath = os.path.abspath(os.path.join(directory, filename))
if not os.path.isfile(filepath):
continue
files.append(filepath)
return files
def station_from_filepath(filepath: str) -> str:
"""Parse the station_XXXX or station_XXXX_Y part from a filepath."""
basename = os.path.basename(filepath)
max_index = 14 if "_A" in basename or "_B" in basename else 12
station = basename[:max_index]
assert station.startswith("station_")
return station
def sgi_1973_to_2016(sgi_id: str) -> str:
"""
Convert the slightly different SGI1973 to the SGI2016 id format.
:examples:
>>> sgi_1973_to_2016("B55")
'B55'
>>> sgi_1973_to_2016("B55-19")
'B55-19'
>>> sgi_1973_to_2016("E73-2")
'E73-02'
"""
if "-" not in sgi_id:
return sgi_id
start, end = sgi_id.split("-")
return start + "-" + end.zfill(2)
def sgi_2016_to_1973(sgi_id: str) -> str:
"""
Convert the slightly different SGI2016 to the SGI1973 id format.
:examples:
>>> sgi_2016_to_1973("B55")
'B55'
>>> sgi_2016_to_1973("B55-19")
'B55-19'
>>> sgi_2016_to_1973("E73-02")
'E73-2'
"""
if "-" not in sgi_id:
return sgi_id
start, end = sgi_id.split("-")
return start + "-" + str(int(end))
@overload
def lv03_to_lv95(easting: np.ndarray, northing: np.ndarray) -> np.ndarray: ...
@overload
def lv03_to_lv95(easting: float, northing: float) -> tuple[float, float]: ...
def lv03_to_lv95(easting: np.ndarray | float, northing: np.ndarray | float) -> np.ndarray | tuple[float, float]:
trans = _LV03_TO_LV95.transform(easting, northing)
return trans if not isinstance(easting, Sequence) else np.array(trans).T
|
#!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of kevlar (http://github.com/dib-lab/kevlar) and is
# licensed under the MIT license: see LICENSE.
# -----------------------------------------------------------------------------
from collections import namedtuple
import kevlar
import re
AlignmentBlock = namedtuple('AlignmentBlock', 'length type target query')
class AlignmentTokenizer(object):
def __init__(self, queryseq, targetseq, cigar):
self._query = queryseq
self._target = targetseq
self._origcigar = cigar
self._cigar = cigar
self.blocks = list(self._tokenize())
self._endcheck()
def _tokenize(self):
target = self._target
query = self._query
blocks = re.finditer(r'(\d+)([DIM])', self._origcigar)
for block in blocks:
length = int(block.group(1))
blocktype = block.group(2)
tseq, qseq = None, None
if blocktype in ('M', 'D'):
tseq = target[:length]
target = target[length:]
if blocktype in ('M', 'I'):
qseq = query[:length]
query = query[length:]
yield AlignmentBlock(length, blocktype, tseq, qseq)
assert target == ''
assert query == ''
def _endcheck(self):
if len(self.blocks) < 3:
return
if self.blocks[-1].type != 'M' or self.blocks[-3].type != 'M':
return
if self.blocks[-2].type == 'D':
prevseq = self.blocks[-2].target
lastseq = self.blocks[-1].target
endseq = self.blocks[-1].query
else:
prevseq = self.blocks[-2].query
lastseq = self.blocks[-1].query
endseq = self.blocks[-1].target
longseq = prevseq + lastseq
if longseq.startswith(endseq):
self.blocks[-3] = AlignmentBlock(
self.blocks[-3].length + self.blocks[-1].length, 'M',
self.blocks[-3].target + self.blocks[-1].target,
self.blocks[-3].query + self.blocks[-1].query,
)
del self.blocks[-1]
newcigar = ''
for block in self.blocks:
newcigar += '{:d}{:s}'.format(block.length, block.type)
self._cigar = newcigar
|
# Generated by Django 2.1.7 on 2019-04-15 05:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounting_tech', '0034_auto_20190415_0848'),
]
operations = [
migrations.AddField(
model_name='acquisition',
name='room',
field=models.CharField(default='435', max_length=50, verbose_name='Кабинет'),
),
migrations.AddField(
model_name='acquisition',
name='timestamp',
field=models.DateTimeField(auto_now=True, verbose_name='Время создания заявки'),
),
]
|
# utils functions for file io
import re
def split_region(region):
region_split = re.split('[\W_+]', region)
contig, start, end = region_split[:3] # ignore other fields. Overlapping ignoring strandness (TO DO: consider strandness?)
start = int(start)
end = int(end)
return contig, start, end
def id2value(values):
return {(id+1):value for id, value in enumerate(values)}
def value2id(values):
return {value:(id+1) for id, value in enumerate(values)}
def load_values(file):
print("== loading: {} ==".format(file))
with open(file,"r") as f:
return [value.strip() for value in f.readlines()]
def load_count_mat(file):
"""
read sparse count matrix (row x col) into dict, with row ids as keys
"""
print("== loading sparse count matrix {} ==".format(file))
with open(file, "r") as f:
i_skip = 0
for i, line in enumerate(f.readlines()):
if line.startswith("%"):
i_skip += 1
elif i > i_skip:
row_id, col_id, count = re.split('\W', line.strip())
counts[int(row_id)][int(col_id)] = int(count)
else:
n_rows, n_cols, _ = re.split('\W', line.strip())
counts = {(id+1):{} for id in range(int(n_rows))} # 1-indexed
return counts
def reindex(actual, base=1):
"""
Re-index a numeric list with missing values removed (assuming both are unique and sorted ascendingly):
Ex:
input: original:[1,2,3,4,5], actual:[2,4];
return: {2:1,4:2}
"""
reindex = {v:(i+base) for i,v in enumerate(actual)}
return reindex
|
"""
This file is part of mss.
:copyright: Copyright 2021 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import matplotlib as mpl
import mslib.mswms.mpl_vsec_styles
class VS_Template(mslib.mswms.mpl_vsec_styles.AbstractVerticalSectionStyle):
name = "VSTemplate" # Pick a proper name starting with "VS"
title = "Air Temperature"
abstract = "Air Temperature (degC)"
required_datafields = [
# level type, CF standard name, unit
("pl", "air_pressure", "Pa"), # air_pressure must be given for VS plots
("pl", "air_temperature", "degC"),
]
def _plot_style(self):
fill_range = np.arange(-93, 28, 2)
fill_entity = "air_temperature"
contour_entity = "air_temperature"
# main plot
cmap = mpl.cm.plasma
cf = self.ax.contourf(
self.horizontal_coordinate, self.data["air_pressure"], self.data[fill_entity],
fill_range, cmap=cmap, extend="both")
self.add_colorbar(cf, fill_entity)
# contour
temps_c = self.ax.contour(
self.horizontal_coordinate, self.data["air_pressure"], self.data[contour_entity], colors="w")
self.ax.clabel(temps_c, fmt="%i")
# finalise the plot
self._latlon_logp_setup()
|
import re
from torch import nn, optim
from torchvision import models
#####################################################################################################
class ConvNet(nn.Module):
def __init__(self, model_name=None, pretrained=False):
super(ConvNet, self).__init__()
if re.match(r"resnet", model_name, re.IGNORECASE):
self.model = models.resnet50(pretrained=pretrained, progress=True)
if pretrained:
self.freeze()
in_features = self.model.fc.in_features
self.model.fc = nn.Linear(in_features=in_features, out_features=4)
self.model.add_module("Final Activation", nn.LogSoftmax(dim=1))
elif re.match(r"vgg", model_name, re.IGNORECASE):
self.model = models.vgg16_bn(pretrained=pretrained, progress=True)
if pretrained:
self.freeze()
in_features = self.model.classifier[-1].in_features
self.model.classifier[-1] = nn.Linear(in_features=in_features, out_features=4)
self.model.classifier.add_module("Final Activation", nn.LogSoftmax(dim=1))
elif re.match(r"mobilenet", model_name, re.IGNORECASE):
self.model = models.mobilenet_v3_small(pretrained=pretrained, progress=True)
if pretrained:
self.freeze()
in_features = self.model.classifier[-1].in_features
self.model.classifier[-1] = nn.Linear(in_features=in_features, out_features=4)
self.model.classifier.add_module("Final Activation", nn.LogSoftmax(dim=1))
else:
raise ValueError("Incorrect value passed to model_name. Supported are \n\n1. resnet\n2. vgg\n3. mobilenet\n\n")
def freeze(self):
for params in self.parameters():
params.requires_grad = False
def get_optimizer(self, lr=1e-3, wd=0):
params = [p for p in self.parameters() if p.requires_grad]
return optim.Adam(params, lr=lr, weight_decay=wd)
def get_plateau_scheduler(self, optimizer=None, patience=5, eps=1e-8):
return optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, patience=patience, eps=eps, verbose=True)
def forward(self, x):
return self.model(x)
#####################################################################################################
|
from o3seespy.command.nd_material.base_material import NDMaterialBase
class ContactMaterial2D(NDMaterialBase):
"""
The ContactMaterial2D NDMaterial Class
This command is used to construct a ContactMaterial2D nDMaterial object.
"""
op_type = 'ContactMaterial2D'
def __init__(self, osi, mu, g_mod, c, t):
"""
Initial method for ContactMaterial2D
Parameters
----------
osi: o3seespy.OpenSeesInstance
mu: float
Interface frictional coefficient
g_mod: float
Interface stiffness parameter
c: float
Interface cohesive intercept
t: float
Interface tensile strength
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> o3.nd_material.ContactMaterial2D(osi, mu=1.0, g_mod=1.0, c=1.0, t=1.0)
"""
self.osi = osi
self.mu = float(mu)
self.g_mod = float(g_mod)
self.c = float(c)
self.t = float(t)
if osi is not None:
osi.n_mat += 1
self._tag = osi.n_mat
self._parameters = [self.op_type, self._tag, self.mu, self.g_mod, self.c, self.t]
if osi is None:
self.built = 0
if osi is not None:
self.to_process(osi)
class ContactMaterial3D(NDMaterialBase):
"""
The ContactMaterial3D NDMaterial Class
This command is used to construct a ContactMaterial3D nDMaterial object.
"""
op_type = 'ContactMaterial3D'
def __init__(self, osi, mu, g_mod, c, t):
"""
Initial method for ContactMaterial3D
Parameters
----------
osi: o3seespy.OpenSeesInstance
mu: float
Interface frictional coefficient
g_mod: float
Interface stiffness parameter
c: float
Interface cohesive intercept
t: float
Interface tensile strength
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=3)
>>> o3.nd_material.ContactMaterial3D(osi, mu=1.0, g_mod=1.0, c=1.0, t=1.0)
"""
self.osi = osi
self.mu = float(mu)
self.g_mod = float(g_mod)
self.c = float(c)
self.t = float(t)
if osi is not None:
osi.n_mat += 1
self._tag = osi.n_mat
self._parameters = [self.op_type, self._tag, self.mu, self.g_mod, self.c, self.t]
if osi is None:
self.built = 0
if osi is not None:
self.to_process(osi)
|
#!/usr/bin/python
# -*- coding: utf-8 -*
import time
class Instruction(object):
def __init__(self, f, l, a):
self.f = f
self.l = l
self.a = a
class Interpret(object):
def __init__(self, taclist):
self.code = taclist
self.paramInit()
def paramInit(self):
self.buf = None
self.c = 0 # current code index
self.p = 0 # pointer of code
self.b = 1 # base address of code
self.t = 0 # pointer of stack top
# runtime stack
self.s = [0 for x in xrange(5000)]
self.reg = [0 for x in xrange(16)]
def judge(self):
if self.p == 0:
return False
return True
def base(self, l, b):
b1 = b
while l > 0:
b1 = self.s[b1]
l -= 1
return b1
def send(self, data):
self.s[self.t] = data
def recv(self):
return self.buf
def showStack(self):
return [self.s[x] for x in xrange(self.t+1)]
def sg_step(self):
p, b, t = self.p, self.b, self.t
s, reg = self.s, self.reg
self.c = p
'''
tag is for judge IO
0 means common operation
1 means input
2 means output
'''
tag = 0
i = self.code[p]
p += 1
if i.f == "lit":
t += 1
s[t] = i.a
if i.f == "opr":
if i.a == 0: #return
t = b - 1
p = s[t + 3]
b = s[t + 2]
if i.a == 1: # inverse
s[t] = -s[t]
if i.a == 2: # plus
t = t - 1
s[t] = s[t] + s[t + 1]
if i.a == 3: # minus
t = t - 1
s[t] = s[t] - s[t + 1]
if i.a == 4: # times
t = t - 1
s[t] = s[t] * s[t + 1]
if i.a == 5:
t = t - 1
if i.l == 0: # slash
s[t] = s[t] / s[t + 1]
if i.l == 1: # mod
s[t] = s[t] % s[t + 1]
if i.a == 6: # odd
s[t] = s[t] & 1
if i.a == 7:
t = t - 1
if i.l == 0: # equal
s[t] = (s[t] == s[t + 1])
if i.l == 1: # not equal
s[t] = (s[t] != s[t + 1])
if i.a == 8:
t = t - 1
if i.l == 0: # less
s[t] = (s[t] < s[t + 1])
if i.l == 1: # less or equal
s[t] = (s[t] <= s[t + 1])
if i.a == 9:
t = t - 1
if i.l == 0: # larger
s[t] = (s[t] > s[t + 1])
if i.l == 1: # larger or equal
s[t] = (s[t] >= s[t + 1])
if i.a == 10: # bitwise and
t = t - 1
s[t] = s[t] & s[t + 1]
if i.a == 11: # bitwise or
t = t - 1
s[t] = s[t] | s[t + 1]
if i.a == 12: # bitwise not
s[t] = ~s[t]
if i.a == 13: # xor
t = t - 1
s[t] = s[t] ^ s[t + 1]
if i.a == 14: # logic and
t = t - 1
s[t] = (s[t] and s[t + 1])
if i.a == 15: # logic or
t = t - 1
s[t] = (s[t] or s[t + 1])
if i.a == 16: # shift operation
t = t - 1
if i.l == 0:
s[t] = s[t] << s[t + 1]
if i.l == 1:
s[t] = s[t] >> s[t + 1]
if i.f == "lod": # push value from memory into stack
t = t + 1
s[t] = s[self.base(i.l, b) + i.a]
if i.f == "sto": # save the stack top value
s[self.base(i.l, b) + i.a] = s[t]
t = t - 1
if i.f == "cal": # call function
s[t + 1] = self.base(i.l, b)
s[t + 2] = b
s[t + 3] = p
b = t + 1
p = i.a
if i.f == "int": # initialize a size data space
t = t + i.a
if i.f == "jmp": # unconditional jump
p = i.a
tag = 3
if i.f == "jne": # not-equal conditional jump
if s[t] == 0:
p = i.a
t = t - 1
if i.f == "jeq": # equal conditional jump
if s[t] != 0:
p = i.a
t = t - 1
if i.f == "in": # input
t = t + 1
if i.a == 0:
tag = 1
else:
s[t] = reg[i.a]
if i.f == "out": # output
if i.a == 0:
self.buf = s[t]
tag = 2
else:
reg[i.a] = s[t]
t = t - 1
self.p, self.b, self.t = p, b, t
self.s, self.reg = s, reg
return tag
|
"""
Given an array of numbers which is sorted in ascending order and is rotated ‘k’
times around a pivot, find ‘k’.
You can assume that the array does not have any duplicates.
Example 1:
Input: [10, 15, 1, 3, 8]
Output: 2
Explanation: The array has been rotated 2 times.
"""
# Time: O(n) Space: O(1)
def count_rotations(arr):
start, end = 0, len(arr) - 1
while start < end:
mid = start + (end - start) // 2
# if mid is greater than the next element
if mid < end and arr[mid] > arr[mid + 1]:
return mid + 1
# if mid is smaller than the previous element
if mid > start and arr[mid - 1] > arr[mid]:
return mid
if arr[start] < arr[mid]: # left side is sorted, so the pivot is on right side
start = mid + 1
else: # right side is sorted, so the pivot is on the left side
end = mid - 1
return 0 # the array has not been rotated
def main():
print(count_rotations([10, 15, 1, 3, 8]))
print(count_rotations([4, 5, 7, 9, 10, -1, 2]))
print(count_rotations([1, 3, 8, 10]))
main()
|
# coding=utf-8
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.NaampadObject import NaampadObject
from OTLMOW.OTLModel.Datatypes.KlPadNetwerkprotectie import KlPadNetwerkprotectie
from OTLMOW.GeometrieArtefact.GeenGeometrie import GeenGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class Pad(NaampadObject, GeenGeometrie):
"""Een aaneengesloten reeks van links die samen een verbinding realiseren over het netwerk, gebruik makende van eenzelfde technologie (vb SDH, OTN…)."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/installatie#Pad'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
NaampadObject.__init__(self)
GeenGeometrie.__init__(self)
self._netwerkprotectie = OTLAttribuut(field=KlPadNetwerkprotectie,
naam='netwerkprotectie',
label='netwerkprotectie',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/installatie#Pad.netwerkprotectie',
kardinaliteit_max='*',
definition='Referentie van het pad dat redundantie levert aan dit pad.',
owner=self)
@property
def netwerkprotectie(self):
"""Referentie van het pad dat redundantie levert aan dit pad."""
return self._netwerkprotectie.get_waarde()
@netwerkprotectie.setter
def netwerkprotectie(self, value):
self._netwerkprotectie.set_waarde(value, owner=self)
|
from thop import profile
import torch
from .network_factory.resnet_feature import BackBone_ResNet
from .network_factory.mobilenet_v2_feature import BackBone_MobileNet
from .network_factory.part_group_network import Part_Group_Network
from .network_factory.pose_hrnet import BackBone_HRNet
from myutils import get_model_summary
import logging
logger = logging.getLogger(__name__)
def bulid_up_network(config,criterion):
# if config.model.use_backbone:
# logger.info("backbone of architecture is {}".format(config.model.backbone_net_name))
if config.model.backbone_net_name=="resnet":
backbone = BackBone_ResNet(config,is_train=True)
if config.model.backbone_net_name=="mobilenet_v2":
backbone = BackBone_MobileNet(config,is_train=True)
if config.model.backbone_net_name=="hrnet":
backbone = BackBone_HRNet(config,is_train=True)
Arch = Part_Group_Network(config.model.keypoints_num, criterion, backbone, **config.model.part_group_config)
if config.model.use_pretrained:
Arch.load_pretrained(config.model.pretrained)
logger.info("\n\nbackbone: params and flops")
logger.info(get_model_summary(backbone,torch.randn(1, 3, config.model.input_size.h,config.model.input_size.w)))
logger.info("\n\nwhole architecture: params and flops")
logger.info(get_model_summary(Arch,torch.randn(1, 3, config.model.input_size.h,config.model.input_size.w)))
logger.info("=========== thop statistics ==========")
dump = torch.randn(1, 3, config.model.input_size.h,config.model.input_size.w)
flops, params = profile( backbone, inputs=(dump,), )
logger.info(">>> total params of BackBone: {:.2f}M\n>>> total FLOPS of Backbone: {:.3f} G\n".format(
(params / 1000000.0),(flops / 1000000000.0)))
flops, params = profile(Arch, inputs=(dump,), )
logger.info(">>> total params of Whole Model: {:.2f}M\n>>> total FLOPS of Model: {:.3f} G\n".format(
(params / 1000000.0),(flops / 1000000000.0)))
return Arch
|
import six
import redis
from redis import Redis, RedisError
from redis.client import bool_ok
from redis.client import int_or_none
from redis._compat import (long, nativestr)
from redis.exceptions import DataError
class TSInfo(object):
chunk_count = None
labels = []
last_time_stamp = None
max_samples_per_chunk = None
retention_secs = None
rules = []
def __init__(self, args):
self.chunk_count = args['chunkCount']
self.labels = list_to_dict(args['labels'])
self.last_time_stamp = args['lastTimestamp']
self.max_samples_per_chunk = args['maxSamplesPerChunk']
self.retention_secs = args['retentionSecs']
self.rules = args['rules']
def list_to_dict(aList):
return {nativestr(aList[i][0]):nativestr(aList[i][1])
for i in range(len(aList))}
def parse_range(response):
return [tuple((l[0], l[1].decode())) for l in response]
def parse_m_range(response):
res = []
for item in response:
res.append({ nativestr(item[0]) : [list_to_dict(item[1]),
parse_range(item[2])]})
return res
def parse_info(response):
res = dict(zip(map(nativestr, response[::2]), response[1::2]))
info = TSInfo(res)
return info
class Client(Redis): #changed from StrictRedis
"""
This class subclasses redis-py's `Redis` and implements
RedisTimeSeries's commmands (prefixed with "ts").
The client allows to interact with RedisTimeSeries and use all of
it's functionality.
"""
MODULE_INFO = {
'name': 'RedisTimeSeries',
'ver': '0.1.0'
}
CREATE_CMD = 'TS.CREATE'
ADD_CMD = 'TS.ADD'
INCRBY_CMD = 'TS.INCRBY'
DECRBY_CMD = 'TS.DECRBY'
CREATERULE_CMD = 'TS.CREATERULE'
DELETERULE_CMD = 'TS.DELETERULE'
RANGE_CMD = 'TS.RANGE'
MRANGE_CMD = 'TS.MRANGE'
GET_CMD = 'TS.GET'
INFO_CMD = 'TS.INFO'
def __init__(self, *args, **kwargs):
"""
Creates a new RedisTimeSeries client.
"""
Redis.__init__(self, *args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
self.CREATE_CMD : bool_ok,
self.ADD_CMD : int_or_none,
self.INCRBY_CMD : bool_ok,
self.DECRBY_CMD : bool_ok,
self.CREATERULE_CMD : bool_ok,
self.DELETERULE_CMD : bool_ok,
self.RANGE_CMD : parse_range,
self.MRANGE_CMD : parse_m_range,
self.GET_CMD : lambda x: (int(x[0]), float(x[1])),
self.INFO_CMD : parse_info,
}
for k, v in six.iteritems(MODULE_CALLBACKS):
self.set_response_callback(k, v)
@staticmethod
def appendRetention(params, retention):
if retention is not None:
params.extend(['RETENTION', retention])
@staticmethod
def appendTimeBucket(params, time_bucket):
if time_bucket is not None:
params.extend(['RESET', time_bucket])
@staticmethod
def appendLabels(params, labels):
if labels:
params.append('LABELS')
for k, v in labels.items():
params.extend([k,v])
@staticmethod
def appendAggregation(params, aggregation_type,
bucket_size_seconds):
params.append('AGGREGATION')
params.extend([aggregation_type, bucket_size_seconds])
def create(self, key, retention_secs=None, labels={}):
"""
Creates a new time-series ``key`` with ``rententionSecs`` in
seconds and ``labels``.
"""
params = [key]
self.appendRetention(params, retention_secs)
self.appendLabels(params, labels)
return self.execute_command(self.CREATE_CMD, *params)
def add(self, key, timestamp, value,
retention_secs=None, labels={}):
"""
Appends (or creates and appends) a new ``value`` to series
``key`` with ``timestamp``. If ``key`` is created,
``retention_secs`` and ``labels`` are applied.
"""
params = [key, timestamp, value]
self.appendRetention(params, retention_secs)
self.appendLabels(params, labels)
return self.execute_command(self.ADD_CMD, *params)
def incrby(self, key, value, time_bucket=None,
retention_secs=None, labels={}):
"""
Increases latest value in ``key`` by ``value``.
``time_bucket`` resets counter. In seconds.
If ``key`` is created, ``retention_secs`` and ``labels`` are
applied.
"""
params = [key, value]
self.appendTimeBucket(params, time_bucket)
self.appendRetention(params, retention_secs)
self.appendLabels(params, labels)
return self.execute_command(self.INCRBY_CMD, *params)
def decrby(self, key, value, time_bucket=None,
retention_secs=None, labels={}):
"""
Decreases latest value in ``key`` by ``value``.
``time_bucket`` resets counter. In seconds.
If ``key`` is created, ``retention_secs`` and ``labels`` are
applied.
"""
params = [key, value]
self.appendTimeBucket(params, time_bucket)
self.appendRetention(params, retention_secs)
self.appendLabels(params, labels)
return self.execute_command(self.DECRBY_CMD, *params)
def createrule(self, source_key, dest_key,
aggregation_type, bucket_size_seconds):
"""
Creates a compaction rule from values added to ``source_key``
into ``dest_key``. Aggregating for ``bucket_size_seconds`` where an
``aggregation_type`` can be ['avg', 'sum', 'min', 'max',
'range', 'count', 'first', 'last']
"""
params=[source_key, dest_key]
self.appendAggregation(params, aggregation_type, bucket_size_seconds)
return self.execute_command(self.CREATERULE_CMD, *params)
def deleterule(self, source_key, dest_key):
"""Deletes a compaction rule"""
return self.execute_command(self.DELETERULE_CMD, source_key, dest_key)
def range(self, key, from_time, to_time,
aggregation_type=None, bucket_size_seconds=0):
"""
Query a range from ``key``, from ``from_time`` to ``to_time``.
Can Aggregate for ``bucket_size_seconds`` where an ``aggregation_type``
can be ['avg', 'sum', 'min', 'max', 'range', 'count', 'first',
'last']
"""
params = [key, from_time, to_time]
if aggregation_type != None:
self.appendAggregation(params, aggregation_type, bucket_size_seconds)
return self.execute_command(self.RANGE_CMD, *params)
def mrange(self, from_time, to_time, filters,
aggregation_type=None, bucket_size_seconds=0):
"""
Query a range based on filters,retention_secs from ``from_time`` to ``to_time``.
``filters`` are a list strings such as ['Test=This'].
Can Aggregate for ``bucket_size_seconds`` where an ``aggregation_type``
can be ['avg', 'sum', 'min', 'max', 'range', 'count', 'first',
'last']
"""
params = [from_time, to_time]
if aggregation_type != None:
self.appendAggregation(params, aggregation_type, bucket_size_seconds)
params.extend(['FILTER'])
params += filters
return self.execute_command(self.MRANGE_CMD, *params)
def get(self, key):
"""Gets the last sample of ``key``"""
return self.execute_command(self.GET_CMD, key)
def info(self, key):
"""Gets information of ``key``"""
return self.execute_command(self.INFO_CMD, key)
|
from RLTest import Env
from includes import *
from common import waitForIndex
def testDictAdd():
env = Env()
env.expect('ft.dictadd', 'dict', 'term1', 'term2', 'term3').equal(3)
env.expect('ft.dictadd', 'dict', 'term1', 'term2', 'term4').equal(1)
def testDictAddWrongArity():
env = Env()
env.expect('ft.dictadd', 'dict').raiseError()
def testDictDelete():
env = Env()
env.expect('ft.dictadd', 'dict', 'term1', 'term2', 'term3').equal(3)
env.expect('ft.dictdel', 'dict', 'term1', 'term2', 'term4').equal(2)
env.expect('ft.dictdel', 'dict', 'term3').equal(1)
env.expect('keys', '*').equal([])
def testDictDeleteOnFlush():
env = Env()
env.expect('ft.dictadd', 'dict', 'term1', 'term2', 'term3').equal(3)
env.expect('FLUSHALL').equal(True)
env.expect('ft.dictdump', 'dict').error().contains('could not open dict')
env.expect('ft.dictadd', 'dict', 'term4', 'term5', 'term6').equal(3)
env.expect('ft.dictdump', 'dict').equal(['term4', 'term5', 'term6'])
def testDictDeleteWrongArity():
env = Env()
env.expect('ft.dictdel', 'dict').raiseError()
def testDictDeleteOnNoneExistingKey():
env = Env()
env.expect('ft.dictdel', 'dict', 'term1').equal(0)
def testDictDump():
env = Env()
env.expect('ft.dictadd', 'dict', 'term1', 'term2', 'term3').equal(3)
env.expect('ft.dictdump', 'dict').equal(['term1', 'term2', 'term3'])
def testDictDumpWrongArity():
env = Env()
env.expect('ft.dictdump').raiseError()
def testDictDumpOnNoneExistingKey():
env = Env()
env.expect('ft.dictdump', 'dict').raiseError()
def testBasicSpellCheck():
env = Env()
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'SCHEMA', 'name', 'TEXT', 'body', 'TEXT')
waitForIndex(env, 'idx')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'FIELDS', 'name', 'name1', 'body', 'body1')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'FIELDS', 'name', 'name2', 'body', 'body2')
env.cmd('ft.add', 'idx', 'doc3', 1.0, 'FIELDS', 'name', 'name2', 'body', 'name2')
env.expect('ft.spellcheck', 'idx', 'name').equal([['TERM', 'name',
[['0.66666666666666663', 'name2'], ['0.33333333333333331', 'name1']]]])
if not env.isCluster():
env.expect('ft.spellcheck', 'idx', '@body:name').equal([['TERM', 'name', [['0.66666666666666663', 'name2']]]])
def testBasicSpellCheckWithNoResult():
env = Env()
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'SCHEMA', 'name', 'TEXT', 'body', 'TEXT')
waitForIndex(env, 'idx')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'FIELDS', 'name', 'name1', 'body', 'body1')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'FIELDS', 'name', 'name2', 'body', 'body2')
env.cmd('ft.add', 'idx', 'doc3', 1.0, 'FIELDS', 'name', 'name2', 'body', 'name2')
env.expect('ft.spellcheck', 'idx', 'somenotexiststext').equal([['TERM', 'somenotexiststext', []]])
def testSpellCheckOnExistingTerm():
env = Env()
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'SCHEMA', 'name', 'TEXT', 'body', 'TEXT')
waitForIndex(env, 'idx')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'FIELDS', 'name', 'name', 'body', 'body1')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'FIELDS', 'name', 'name2', 'body', 'body2')
env.cmd('ft.add', 'idx', 'doc3', 1.0, 'FIELDS', 'name', 'name2', 'body', 'name2')
env.expect('ft.spellcheck', 'idx', 'name').equal([])
def testSpellCheckWithIncludeDict():
env = Env()
env.cmd('ft.dictadd', 'dict', 'name3', 'name4', 'name5')
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'SCHEMA', 'name', 'TEXT', 'body', 'TEXT')
waitForIndex(env, 'idx')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'FIELDS', 'name', 'name1', 'body', 'body1')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'FIELDS', 'name', 'name2', 'body', 'body2')
env.cmd('ft.add', 'idx', 'doc3', 1.0, 'FIELDS', 'name', 'name2', 'body', 'name2')
env.expect('ft.spellcheck', 'idx', 'name', 'TERMS', 'INCLUDE', 'dict').equal([['TERM', 'name',
[['0.66666666666666663', 'name2'],
['0.33333333333333331', 'name1'],
['0', 'name3'], ['0', 'name4'],
['0', 'name5']]]])
env.expect('ft.spellcheck', 'idx', 'name', 'TERMS', 'include', 'dict').equal([['TERM', 'name',
[['0.66666666666666663', 'name2'],
['0.33333333333333331', 'name1'],
['0', 'name3'], ['0', 'name4'],
['0', 'name5']]]])
def testSpellCheckWithDuplications():
env = Env()
env.cmd('ft.dictadd', 'dict', 'name1', 'name4', 'name5')
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'SCHEMA', 'name', 'TEXT', 'body', 'TEXT')
waitForIndex(env, 'idx')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'FIELDS', 'name', 'name1', 'body', 'body1')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'FIELDS', 'name', 'name2', 'body', 'body2')
env.cmd('ft.add', 'idx', 'doc3', 1.0, 'FIELDS', 'name', 'name2', 'body', 'name2')
env.expect('ft.spellcheck', 'idx', 'name', 'TERMS', 'INCLUDE', 'dict').equal([['TERM', 'name',
[['0.66666666666666663', 'name2'],
['0.33333333333333331', 'name1'],
['0', 'name4'], ['0', 'name5']]]])
def testSpellCheckExcludeDict():
env = Env()
env.cmd('ft.dictadd', 'dict', 'name')
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'SCHEMA', 'name', 'TEXT', 'body', 'TEXT')
waitForIndex(env, 'idx')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'FIELDS', 'name', 'name1', 'body', 'body1')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'FIELDS', 'name', 'name2', 'body', 'body2')
env.cmd('ft.add', 'idx', 'doc3', 1.0, 'FIELDS', 'name', 'name2', 'body', 'name2')
env.expect('ft.spellcheck', 'idx', 'name', 'TERMS', 'EXCLUDE', 'dict').equal([])
env.expect('ft.spellcheck', 'idx', 'name', 'TERMS', 'exclude', 'dict').equal([])
def testSpellCheckNoneExistingIndex():
env = Env()
env.expect('ft.spellcheck', 'idx', 'name', 'TERMS', 'EXCLUDE', 'dict').raiseError()
def testSpellCheckWrongArity():
env = Env()
env.cmd('ft.dictadd', 'dict', 'name')
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'SCHEMA', 'name', 'TEXT', 'body', 'TEXT')
waitForIndex(env, 'idx')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'FIELDS', 'name', 'name1', 'body', 'body1')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'FIELDS', 'name', 'name2', 'body', 'body2')
env.cmd('ft.add', 'idx', 'doc3', 1.0, 'FIELDS', 'name', 'name2', 'body', 'name2')
env.expect('ft.spellcheck', 'idx').raiseError()
env.expect('ft.spellcheck', 'idx').raiseError()
def testSpellCheckBadFormat():
env = Env()
env.cmd('ft.dictadd', 'dict', 'name')
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'SCHEMA', 'name', 'TEXT', 'body', 'TEXT')
waitForIndex(env, 'idx')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'FIELDS', 'name', 'name1', 'body', 'body1')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'FIELDS', 'name', 'name2', 'body', 'body2')
env.cmd('ft.add', 'idx', 'doc3', 1.0, 'FIELDS', 'name', 'name2', 'body', 'name2')
env.expect('ft.spellcheck', 'idx', 'name', 'TERMS').raiseError()
env.expect('ft.spellcheck', 'idx', 'name', 'TERMS', 'INCLUDE').raiseError()
env.expect('ft.spellcheck', 'idx', 'name', 'TERMS', 'EXCLUDE').raiseError()
env.expect('ft.spellcheck', 'idx', 'name', 'DISTANCE').raiseError()
env.expect('ft.spellcheck', 'idx', 'name', 'DISTANCE', 0).raiseError()
env.expect('ft.spellcheck', 'idx', 'name', 'DISTANCE', -1).raiseError()
env.expect('ft.spellcheck', 'idx', 'name', 'DISTANCE', 101).raiseError()
def testSpellCheckNoneExistingDicts():
env = Env()
env.cmd('ft.create', 'idx', 'ON', 'HASH',
'SCHEMA', 'name', 'TEXT', 'body', 'TEXT')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'FIELDS', 'name', 'name1', 'body', 'body1')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'FIELDS', 'name', 'name2', 'body', 'body2')
env.cmd('ft.add', 'idx', 'doc3', 1.0, 'FIELDS', 'name', 'name2', 'body', 'name2')
env.expect('ft.spellcheck', 'idx', 'name', 'TERMS', 'INCLUDE', 'dict').raiseError()
env.expect('ft.spellcheck', 'idx', 'name', 'TERMS', 'EXCLUDE', 'dict').raiseError()
def testSpellCheckResultsOrder():
env = Env()
env.cmd('ft.dictadd', 'dict', 'name')
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'SCHEMA', 'name', 'TEXT', 'body', 'TEXT')
waitForIndex(env, 'idx')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'FIELDS', 'name', 'Elior', 'body', 'body1')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'FIELDS', 'name', 'Hila', 'body', 'body2')
env.expect('ft.spellcheck', 'idx', 'Elioh Hilh').equal([['TERM', 'elioh', [['0.5', 'elior']]], ['TERM', 'hilh', [['0.5', 'hila']]]])
def testSpellCheckDictReleadRDB():
env = Env()
env.expect('FT.DICTADD test 1 2 3').equal(3)
for _ in env.retry_with_rdb_reload():
env.expect('FT.DICTDUMP test').equal(['1', '2', '3'])
def testSpellCheckIssue437():
env = Env()
env.cmd('ft.create', 'incidents', 'ON', 'HASH', 'SCHEMA', 'report', 'text')
env.cmd('FT.DICTADD', 'slang', 'timmies', 'toque', 'toonie', 'serviette', 'kerfuffle', 'chesterfield')
env.expect('FT.SPELLCHECK', 'incidents',
'Tooni toque kerfuffle', 'TERMS',
'EXCLUDE', 'slang', 'TERMS',
'INCLUDE', 'slang').equal([['TERM', 'tooni', [['0', 'toonie']]]])
def test_spell_check_dialect_errors(env):
env.cmd('ft.create', 'idx', 'SCHEMA', 't', 'text')
env.expect('FT.SPELLCHECK', 'idx', 'Tooni toque kerfuffle', 'DIALECT').error().contains("Need an argument for DIALECT")
env.expect('FT.SPELLCHECK', 'idx', 'Tooni toque kerfuffle', 'DIALECT', 0).error().contains("DIALECT requires a non negative integer >=1 and <= 2")
env.expect('FT.SPELLCHECK', 'idx', 'Tooni toque kerfuffle', 'DIALECT', 3).error().contains("DIALECT requires a non negative integer >=1 and <= 2")
|
import numpy as np
import rawpy
import imageio
import os
import ncempy.io as nio
import matplotlib.pyplot as plt
# -- Osmo action use DNG as the raw format.
class DNGReader:
@staticmethod
def imread(folder_path, str_format, start_idx, end_idx, step, log_compress=False, im_format='DNG'):
img_stack = []
for img_id in range(start_idx, end_idx+1, step):
file_name = os.path.join(folder_path, str_format.format(img_id))
if im_format == 'DNG':
with rawpy.imread(file_name) as raw:
img = raw.postprocess(gamma=(1, 1), no_auto_bright=True, no_auto_scale=True, output_bps=16)
img_stack.append(img)
elif im_format == 'JPG' or im_format == "PNG":
img = imageio.imread(file_name)
img_stack.append(img)
img_stack = np.array(img_stack).astype(np.float)
mean_img = np.mean(img_stack, axis=0)
if log_compress:
img_stack = np.log(img_stack)
mean_img = np.log(mean_img)
return img_stack, mean_img
class SERReader:
@staticmethod
def imread(file_path, im_format='SER'):
if im_format == 'AVI':
cap = cv2.VideoCapture(file_path)
frame_stack = []
frame_len = 0
while True:
ret, frame = cap.read()
if not ret:
break
frame_stack.append(frame)
frame_len += 1
cap.release()
return frame_stack, frame_len
elif im_format == 'SER':
with nio.ser.fileSER(file_path) as ser1:
data, metadata = ser1.getDataset(0)
return None, None
class FolderReader:
@staticmethod
def imread(path):
img_list = os.listdir(path)
img_list.sort(key=lambda x: int(x.split('_')[0]))
frame_stack = []
for name in img_list:
img = cv2.imread(os.path.join(path, name))
frame_stack.append(img)
return frame_stack
|
def mySqrt(x):
start = 0
end = x
while start <= end:
mid = start + (end - start) // 2
if mid * mid == x:
return mid
elif mid * mid < x:
start = mid + 1
elif mid * mid > x:
end = mid - 1
else:
return end
for i in range(17):
print(i, mySqrt(i))
# rotated_array
def find_pivot(rotated_array):
start = 0
end = len(rotated_array) - 1
while start <= end:
mid = start + (end - start) // 2
# print(start, mid, end)
if mid < end and rotated_array[mid] > rotated_array[mid + 1]:
return mid
elif mid > start and rotated_array[mid] < rotated_array[mid - 1]:
return mid
elif rotated_array[start] > rotated_array[mid]:
end = mid - 1
elif rotated_array[start] < rotated_array[mid]:
start = mid + 1
elif start == mid:
return -1
# print(start, mid, end)
else:
return -1
def binary_search(array, start, end, target):
while start <= end:
mid = start + (end - start) // 2
if array[mid] == target:
return mid
elif array[mid] > target:
end = mid - 1
elif array[mid] < target:
start = mid + 1
else:
return -1
def search_rotated_array(array, target):
pivot = find_pivot(array)
print(f"printing the pivot: {pivot}")
if pivot == -1:
return binary_search(array, 0, len(array) - 1, target)
elif array[pivot] == target:
return pivot
elif target >= array[0]:
return binary_search(array, 0, pivot - 1, target)
elif target < array[0]:
return binary_search(array, pivot + 1, len(array) - 1, target)
print("searching in a rotated array")
nums = [9, 1, 2, 3, 4, 5, 7, 8]
target = 7
print(find_pivot(nums))
print(search_rotated_array(nums, target))
# https://leetcode.com/explore/learn/card/binary-search/126/template-ii/947/
# find first bad version
# You are a product manager and currently leading a team to develop a new product. Unfortunately, the latest version of
# your product fails the quality check. Since each version is developed based on the previous version,
# all the versions after a bad version are also bad.
#
# Suppose you have n versions [1, 2, ..., n] and you want to find out the first bad one, which causes all the
# following ones to be bad.
#
# You are given an API bool isBadVersion(version) which returns whether version is bad. Implement a function to
# find the first bad version. You should minimize the number of calls to the API.
def is_bad_version(total_versions, first_bad):
return [True if i < first_bad else False for i in range(1, total_versions + 1)]
no_of_versions = 10
first_bad_version = 10
versions = is_bad_version(no_of_versions, first_bad_version)
print(versions)
def first_bad_version(list_of_versions):
start = 0
end = len(list_of_versions)-1
while start <= end:
mid = start + (end - start) // 2
print(start, mid, end)
print(f"list_of_versions[{mid}] is {list_of_versions[mid]}")
if start == mid:
return end + 1
if list_of_versions[mid]:
start = mid
else:
end = mid - 1
print(start, mid, end)
print("--------------------------------------")
return end + 1
print("printing the position of first bad version")
print(first_bad_version(versions))
|
import os
import numpy as np
from geospace.gdal_calc import Calc
from geospace._const import CREATION
from geospace.raster import mosaic
from geospace.utils import ds_name, context_file
from multiprocessing import Pool, cpu_count
from collections.abc import Iterable
def band_map(i, ras_multi, band_idx_multi, calc_arg, out_file):
tem_file = os.path.join(os.path.dirname(out_file),
'_temp_' + os.path.splitext(
os.path.basename(out_file))[0] +
'_' + str(i) + '.tif')
if os.path.exists(tem_file):
return tem_file
ras_args = {chr(i + 65): ras
for i, ras in enumerate(ras_multi)}
band_args = {chr(i + 65) + '_band': int(band_idx)
for i, band_idx in enumerate(band_idx_multi)}
input_args = {**ras_args, **band_args}
Calc(calc_arg, tem_file, creation_options=CREATION,
quiet=True, **input_args)
return tem_file
def check_iter(ds_multi, calc_args, band_idxs):
iter_ds_multi = isinstance(
ds_multi, Iterable) and not isinstance(ds_multi, str)
iter_calc_args = isinstance(
calc_args, Iterable) and not isinstance(calc_args, str)
if band_idxs is not None:
if iter_ds_multi:
iter_band_idxs = isinstance(band_idxs[0], Iterable)
else:
iter_band_idxs = isinstance(band_idxs, Iterable)
else:
iter_band_idxs = False
return iter_ds_multi, iter_calc_args, iter_band_idxs
def broadcast_args(ds_multi, calc_args, band_idxs):
iter_ds_multi, iter_calc_args, iter_band_idxs = check_iter(
ds_multi, calc_args, band_idxs)
if iter_ds_multi:
ds = ds_multi[0]
else:
ds = ds_multi
ds, ras = ds_name(ds)
if band_idxs is not None:
if iter_band_idxs and iter_calc_args:
if len(band_idxs) != len(calc_args):
raise Exception(
'length of band list not equal to that of calc args')
elif iter_band_idxs:
calc_args = [calc_args] * len(band_idxs)
elif iter_calc_args:
band_idxs = [band_idxs] * len(calc_args)
else:
calc_args = [calc_args]
band_idxs = [band_idxs]
else:
n_band = ds.RasterCount
if iter_calc_args:
if len(calc_args) != n_band:
raise Exception(
'calc args length not equal to band counts')
else:
calc_args = [calc_args] * n_band
if iter_ds_multi:
band_idxs = np.repeat(
np.arange(1, n_band + 1, dtype=int), len(ds_multi)).reshape(-1, len(ds_multi))
else:
band_idxs = np.arange(1, n_band + 1, dtype=int)
if not iter_ds_multi:
ds_multi = [ras]
band_idxs = np.array(band_idxs).reshape(len(band_idxs), 1)
return ds_multi, calc_args, band_idxs
def map_calc(ds_multi, calc_args, out_path, band_idxs=None, multiprocess=True):
iter_ds_multi = isinstance(
ds_multi, Iterable) and not isinstance(ds_multi, str)
if iter_ds_multi:
ds = ds_multi[0]
else:
ds = ds_multi
ds, ras = ds_name(ds)
out_file = context_file(ras, out_path)
if os.path.exists(out_file):
return out_file
ds_multi, calc_args, band_idxs = broadcast_args(
ds_multi, calc_args, band_idxs)
n = len(calc_args)
args = zip(np.arange(1, n + 1, dtype=int),
[ds_multi] * n, band_idxs,
calc_args, [out_file] * n)
if multiprocess:
with Pool(min(cpu_count() - 1, n)) as p:
tem_files = p.starmap(band_map, args)
else:
tem_files = []
for arg in args:
tem_files.append(band_map(*arg))
if len(tem_files) == 1:
os.rename(tem_files[0], out_file)
else:
mosaic(tem_files, out_file, separate=True)
[os.remove(f) for f in tem_files]
return out_file
|
from django import forms
from django.conf import settings
from django.utils.timezone import now
from django.utils.translation import ugettext as _
from pretalx.submission.models import Submission, SubmissionType
class InfoForm(forms.ModelForm):
def __init__(self, event, **kwargs):
self.event = event
readonly = kwargs.pop('readonly', False)
super().__init__(**kwargs)
instance = kwargs.get('instance')
self.fields['submission_type'].queryset = SubmissionType.objects.filter(event=self.event)
self.initial['submission_type'] = getattr(instance, 'submission_type', self.event.cfp.default_type)
_now = now()
if not self.event.cfp.deadline or self.event.cfp.deadline >= _now: # No global deadline or still open
types = self.event.submission_types.exclude(deadline__lt=_now)
else:
types = self.event.submission_types.filter(deadline__gte=_now)
pks = set(types.values_list('pk', flat=True))
if instance and instance.pk:
pks |= {instance.submission_type.pk, }
self.fields['submission_type'].queryset = self.event.submission_types.filter(pk__in=pks)
locale_names = dict(settings.LANGUAGES)
self.fields['content_locale'].choices = [(a, locale_names[a]) for a in self.event.locales]
if readonly:
for f in self.fields.values():
f.disabled = True
class Meta:
model = Submission
fields = [
'title', 'submission_type', 'content_locale', 'abstract',
'description', 'notes', 'do_not_record', 'image',
]
class SubmissionFilterForm(forms.ModelForm):
def __init__(self, event, *args, **kwargs):
self.event = event
super().__init__(*args, **kwargs)
self.fields['submission_type'].queryset = self.fields['submission_type'].queryset.filter(event=event)
self.fields['submission_type'].required = False
self.fields['state'].required = False
self.fields['state'].choices = [('', _('All states'))] + self.fields['state'].choices
self.fields['state'].initial = ''
class Meta:
model = Submission
fields = [
'submission_type', 'state',
]
|
'''
UCCSD with spatial integrals
'''
import time
import tempfile
import numpy
import numpy as np
import h5py
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.cc import rccsd
from pyscf.lib import linalg_helper
import uintermediates as imd
from pyscf.cc.addons import spatial2spin, spin2spatial
#einsum = np.einsum
einsum = lib.einsum
# This is unrestricted (U)CCSD, i.e. spin-orbital form.
def kernel(cc, eris, t1=None, t2=None, max_cycle=50, tol=1e-8, tolnormt=1e-6,
verbose=logger.INFO):
"""Exactly the same as pyscf.cc.ccsd.kernel, which calls a
*local* energy() function."""
if isinstance(verbose, logger.Logger):
log = verbose
else:
log = logger.Logger(cc.stdout, verbose)
r1, r2 = cc.init_amps(eris)[1:]
if t1 is None:
t1 = r1
if t2 is None:
t2 = r2
r1 = r2 = None
cput1 = cput0 = (time.clock(), time.time())
eold = 0
eccsd = 0
if cc.diis:
adiis = lib.diis.DIIS(cc, cc.diis_file)
adiis.space = cc.diis_space
conv = False
for istep in range(max_cycle):
t1new, t2new = cc.update_amps(t1, t2, eris)
vec = cc.amplitudes_to_vector(t1new, t2new)
normt = np.linalg.norm(vec - cc.amplitudes_to_vector(t1, t2))
t1, t2 = t1new, t2new
t1new = t2new = None
if cc.diis:
if (istep > cc.diis_start_cycle and
abs(eccsd-eold) < cc.diis_start_energy_diff):
vec = adiis.update(vec)
t1, t2 = cc.vector_to_amplitudes(vec)
log.debug1('DIIS for step %d', istep)
vec = None
eold, eccsd = eccsd, energy(cc, t1, t2, eris)
log.info('istep = %d E(CCSD) = %.15g dE = %.9g norm(t1,t2) = %.6g',
istep, eccsd, eccsd - eold, normt)
cput1 = log.timer('CCSD iter', *cput1)
if abs(eccsd-eold) < tol and normt < tolnormt:
conv = True
break
log.timer('CCSD', *cput0)
return conv, eccsd, t1, t2
def update_amps(cc, t1, t2, eris):
time0 = time.clock(), time.time()
log = logger.Logger(cc.stdout, cc.verbose)
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
fooa = eris.focka[:nocca,:nocca]
foob = eris.fockb[:noccb,:noccb]
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
fvva = eris.focka[nocca:,nocca:]
fvvb = eris.fockb[noccb:,noccb:]
u1a = np.zeros_like(t1a)
u1b = np.zeros_like(t1b)
u2aa = np.zeros_like(t2aa)
u2ab = np.zeros_like(t2ab)
u2bb = np.zeros_like(t2bb)
tauaa, tauab, taubb = make_tau(t2, t1, t1)
Fooa = fooa - np.diag(np.diag(fooa))
Foob = foob - np.diag(np.diag(foob))
Fvva = fvva - np.diag(np.diag(fvva))
Fvvb = fvvb - np.diag(np.diag(fvvb))
Fooa += .5 * lib.einsum('me,ie->mi', fova, t1a)
Foob += .5 * lib.einsum('me,ie->mi', fovb, t1b)
Fvva -= .5 * lib.einsum('me,ma->ae', fova, t1a)
Fvvb -= .5 * lib.einsum('me,ma->ae', fovb, t1b)
wovvo = np.zeros((nocca,nvira,nvira,nocca))
wOVVO = np.zeros((noccb,nvirb,nvirb,noccb))
woVvO = np.zeros((nocca,nvirb,nvira,noccb))
woVVo = np.zeros((nocca,nvirb,nvirb,nocca))
wOvVo = np.zeros((noccb,nvira,nvirb,nocca))
wOvvO = np.zeros((noccb,nvira,nvira,noccb))
mem_now = lib.current_memory()[0]
max_memory = lib.param.MAX_MEMORY - mem_now
blksize = max(int(max_memory*1e6/8/(nvira**3*3)), 2)
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = np.asarray(eris.ovvv[p0:p1]).reshape((p1-p0)*nvira,-1)
ovvv = lib.unpack_tril(ovvv).reshape(-1,nvira,nvira,nvira)
ovvv = ovvv - ovvv.transpose(0,3,2,1)
Fvva += np.einsum('mf,mfae->ae', t1a[p0:p1], ovvv)
wovvo[p0:p1] += einsum('jf,mebf->mbej', t1a, ovvv)
u1a += 0.5*lib.einsum('mief,meaf->ia', t2aa[p0:p1], ovvv)
u2aa[:,p0:p1] += lib.einsum('ie,mbea->imab', t1a, ovvv.conj())
tmp1aa = lib.einsum('ijef,mebf->ijmb', tauaa, ovvv)
u2aa -= lib.einsum('ijmb,ma->ijab', tmp1aa, t1a[p0:p1]*.5)
ovvv = tmp1aa = None
blksize = max(int(max_memory*1e6/8/(nvirb**3*3)), 2)
for p0,p1 in lib.prange(0, noccb, blksize):
OVVV = np.asarray(eris.OVVV[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVVV = lib.unpack_tril(OVVV).reshape(-1,nvirb,nvirb,nvirb)
OVVV = OVVV - OVVV.transpose(0,3,2,1)
Fvvb += np.einsum('mf,mfae->ae', t1b[p0:p1], OVVV)
wOVVO[p0:p1] = einsum('jf,mebf->mbej', t1b, OVVV)
u1b += 0.5*lib.einsum('MIEF,MEAF->IA', t2bb[p0:p1], OVVV)
u2bb[:,p0:p1] += lib.einsum('ie,mbea->imab', t1b, OVVV.conj())
tmp1bb = lib.einsum('ijef,mebf->ijmb', taubb, OVVV)
u2bb -= lib.einsum('ijmb,ma->ijab', tmp1bb, t1b[p0:p1]*.5)
OVVV = tmp1bb = None
blksize = max(int(max_memory*1e6/8/(nvira*nvirb**2*3)), 2)
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = np.asarray(eris.ovVV[p0:p1]).reshape((p1-p0)*nvira,-1)
ovVV = lib.unpack_tril(ovVV).reshape(-1,nvira,nvirb,nvirb)
Fvvb += np.einsum('mf,mfAE->AE', t1a[p0:p1], ovVV)
woVvO[p0:p1] = einsum('JF,meBF->mBeJ', t1b, ovVV)
woVVo[p0:p1] = einsum('jf,mfBE->mBEj',-t1a, ovVV)
u1b += lib.einsum('mIeF,meAF->IA', t2ab[p0:p1], ovVV)
u2ab[p0:p1] += lib.einsum('IE,maEB->mIaB', t1b, ovVV.conj())
tmp1ab = lib.einsum('iJeF,meBF->iJmB', tauab, ovVV)
u2ab -= lib.einsum('iJmB,ma->iJaB', tmp1ab, t1a[p0:p1])
ovVV = tmp1ab = None
blksize = max(int(max_memory*1e6/8/(nvirb*nocca**2*3)), 2)
for p0,p1 in lib.prange(0, noccb, blksize):
OVvv = np.asarray(eris.OVvv[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVvv = lib.unpack_tril(OVvv).reshape(-1,nvirb,nvira,nvira)
Fvva += np.einsum('MF,MFae->ae', t1b[p0:p1], OVvv)
wOvVo[p0:p1] = einsum('jf,MEbf->MbEj', t1a, OVvv)
wOvvO[p0:p1] = einsum('JF,MFbe->MbeJ',-t1b, OVvv)
u1a += lib.einsum('iMfE,MEaf->ia', t2ab[:,p0:p1], OVvv)
u2ab[:,p0:p1] += lib.einsum('ie,MBea->iMaB', t1a, OVvv.conj())
tmp1abba = lib.einsum('iJeF,MFbe->iJbM', tauab, OVvv)
u2ab -= lib.einsum('iJbM,MA->iJbA', tmp1abba, t1b[p0:p1])
OVvv = tmp1abba = None
eris_ovov = np.asarray(eris.ovov)
eris_ooov = np.asarray(eris.ooov)
Woooo = lib.einsum('je,mine->mnij', t1a, eris_ooov)
Woooo = Woooo - Woooo.transpose(0,1,3,2)
Woooo += np.asarray(eris.oooo).transpose(0,2,1,3)
Woooo += lib.einsum('ijef,menf->mnij', tauaa, eris_ovov) * .5
u2aa += lib.einsum('mnab,mnij->ijab', tauaa, Woooo*.5)
Woooo = tauaa = None
ooov = eris_ooov - eris_ooov.transpose(2,1,0,3)
Fooa += np.einsum('ne,mine->mi', t1a, ooov)
u1a += 0.5*lib.einsum('mnae,nime->ia', t2aa, ooov)
wovvo += einsum('nb,mjne->mbej', t1a, ooov)
ooov = eris_ooov = None
tilaa = make_tau_aa(t2[0], t1a, t1a, fac=0.5)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
Fvva -= .5 * einsum('mnaf,menf->ae', tilaa, ovov)
Fooa += .5 * einsum('inef,menf->mi', tilaa, ovov)
Fova = np.einsum('nf,menf->me',t1a, ovov)
u2aa += ovov.conj().transpose(0,2,1,3) * .5
wovvo -= 0.5*einsum('jnfb,menf->mbej', t2aa, ovov)
woVvO += 0.5*einsum('nJfB,menf->mBeJ', t2ab, ovov)
tmpaa = einsum('jf,menf->mnej', t1a, ovov)
wovvo -= einsum('nb,mnej->mbej', t1a, tmpaa)
eirs_ovov = ovov = tmpaa = tilaa = None
eris_OVOV = np.asarray(eris.OVOV)
eris_OOOV = np.asarray(eris.OOOV)
WOOOO = lib.einsum('je,mine->mnij', t1b, eris_OOOV)
WOOOO = WOOOO - WOOOO.transpose(0,1,3,2)
WOOOO += np.asarray(eris.OOOO).transpose(0,2,1,3)
WOOOO += lib.einsum('ijef,menf->mnij', taubb, eris_OVOV) * .5
u2bb += lib.einsum('mnab,mnij->ijab', taubb, WOOOO*.5)
WOOOO = taubb = None
OOOV = eris_OOOV - eris_OOOV.transpose(2,1,0,3)
Foob += np.einsum('ne,mine->mi', t1b, OOOV)
u1b += 0.5*lib.einsum('mnae,nime->ia', t2bb, OOOV)
wOVVO += einsum('nb,mjne->mbej', t1b, OOOV)
OOOV = eris_OOOV = None
tilbb = make_tau_aa(t2[2], t1b, t1b, fac=0.5)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
Fvvb -= .5 * einsum('MNAF,MENF->AE', tilbb, OVOV)
Foob += .5 * einsum('inef,menf->mi', tilbb, OVOV)
Fovb = np.einsum('nf,menf->me',t1b, OVOV)
u2bb += OVOV.conj().transpose(0,2,1,3) * .5
wOVVO -= 0.5*einsum('jnfb,menf->mbej', t2bb, OVOV)
wOvVo += 0.5*einsum('jNbF,MENF->MbEj', t2ab, OVOV)
tmpbb = einsum('jf,menf->mnej', t1b, OVOV)
wOVVO -= einsum('nb,mnej->mbej', t1b, tmpbb)
eris_OVOV = OVOV = tmpbb = tilbb = None
eris_ooOV = np.asarray(eris.ooOV)
eris_OOov = np.asarray(eris.OOov)
Fooa += np.einsum('NE,miNE->mi', t1b, eris_ooOV)
u1a -= lib.einsum('nMaE,niME->ia', t2ab, eris_ooOV)
wOvVo -= einsum('nb,njME->MbEj', t1a, eris_ooOV)
woVVo += einsum('NB,mjNE->mBEj', t1b, eris_ooOV)
Foob += np.einsum('ne,MIne->MI', t1a, eris_OOov)
u1b -= lib.einsum('mNeA,NIme->IA', t2ab, eris_OOov)
woVvO -= einsum('NB,NJme->mBeJ', t1b, eris_OOov)
wOvvO += einsum('nb,MJne->MbeJ', t1a, eris_OOov)
WoOoO = lib.einsum('JE,miNE->mNiJ', t1b, eris_ooOV)
WoOoO+= lib.einsum('je,MIne->nMjI', t1a, eris_OOov)
WoOoO += np.asarray(eris.ooOO).transpose(0,2,1,3)
eris_ooOV = eris_OOov = None
eris_ovOV = np.asarray(eris.ovOV)
WoOoO += lib.einsum('iJeF,meNF->mNiJ', tauab, eris_ovOV)
u2ab += lib.einsum('mNaB,mNiJ->iJaB', tauab, WoOoO)
WoOoO = None
tilab = make_tau_ab(t2[1], t1 , t1 , fac=0.5)
Fvva -= einsum('mNaF,meNF->ae', tilab, eris_ovOV)
Fvvb -= einsum('nMfA,nfME->AE', tilab, eris_ovOV)
Fooa += einsum('iNeF,meNF->mi', tilab, eris_ovOV)
Foob += einsum('nIfE,nfME->MI', tilab, eris_ovOV)
Fova+= np.einsum('NF,meNF->me',t1b, eris_ovOV)
Fovb+= np.einsum('nf,nfME->ME',t1a, eris_ovOV)
u2ab += eris_ovOV.conj().transpose(0,2,1,3)
wovvo += 0.5*einsum('jNbF,meNF->mbej', t2ab, eris_ovOV)
wOVVO += 0.5*einsum('nJfB,nfME->MBEJ', t2ab, eris_ovOV)
wOvVo -= 0.5*einsum('jnfb,nfME->MbEj', t2aa, eris_ovOV)
woVvO -= 0.5*einsum('JNFB,meNF->mBeJ', t2bb, eris_ovOV)
woVVo += 0.5*einsum('jNfB,mfNE->mBEj', t2ab, eris_ovOV)
wOvvO += 0.5*einsum('nJbF,neMF->MbeJ', t2ab, eris_ovOV)
tmpabab = einsum('JF,meNF->mNeJ', t1b, eris_ovOV)
tmpbaba = einsum('jf,nfME->MnEj', t1a, eris_ovOV)
woVvO -= einsum('NB,mNeJ->mBeJ', t1b, tmpabab)
wOvVo -= einsum('nb,MnEj->MbEj', t1a, tmpbaba)
woVVo += einsum('NB,NmEj->mBEj', t1b, tmpbaba)
wOvvO += einsum('nb,nMeJ->MbeJ', t1a, tmpabab)
tmpabab = tmpbaba = tilab = None
u1a += fova.conj()
u1a += np.einsum('ie,ae->ia',t1a,Fvva)
u1a -= np.einsum('ma,mi->ia',t1a,Fooa)
u1a -= np.einsum('imea,me->ia', t2aa, Fova)
u1a += np.einsum('iMaE,ME->ia', t2ab, Fovb)
u1b += fovb.conj()
u1b += np.einsum('ie,ae->ia',t1b,Fvvb)
u1b -= np.einsum('ma,mi->ia',t1b,Foob)
u1b -= np.einsum('imea,me->ia', t2bb, Fovb)
u1b += np.einsum('mIeA,me->IA', t2ab, Fova)
eris_oovv = np.asarray(eris.oovv)
eris_ovvo = np.asarray(eris.ovvo)
wovvo -= eris_oovv.transpose(0,2,3,1)
wovvo += eris_ovvo.transpose(0,2,1,3)
oovv = eris_oovv - eris_ovvo.transpose(0,3,2,1)
u1a-= np.einsum('nf,niaf->ia', t1a, oovv)
tmp1aa = lib.einsum('ie,mjbe->mbij', t1a, oovv)
u2aa += 2*lib.einsum('ma,mbij->ijab', t1a, tmp1aa)
eris_ovvo = eris_oovv = oovv = tmp1aa = None
eris_OOVV = np.asarray(eris.OOVV)
eris_OVVO = np.asarray(eris.OVVO)
wOVVO -= eris_OOVV.transpose(0,2,3,1)
wOVVO += eris_OVVO.transpose(0,2,1,3)
OOVV = eris_OOVV - eris_OVVO.transpose(0,3,2,1)
u1b-= np.einsum('nf,niaf->ia', t1b, OOVV)
tmp1bb = lib.einsum('ie,mjbe->mbij', t1b, OOVV)
u2bb += 2*lib.einsum('ma,mbij->ijab', t1b, tmp1bb)
eris_OVVO = eris_OOVV = OOVV = None
eris_ooVV = np.asarray(eris.ooVV)
eris_ovVO = np.asarray(eris.ovVO)
woVVo -= eris_ooVV.transpose(0,2,3,1)
woVvO += eris_ovVO.transpose(0,2,1,3)
u1b+= np.einsum('nf,nfAI->IA', t1a, eris_ovVO)
tmp1ab = lib.einsum('ie,meBJ->mBiJ', t1a, eris_ovVO)
tmp1ab+= lib.einsum('IE,mjBE->mBjI', t1b, eris_ooVV)
u2ab -= lib.einsum('ma,mBiJ->iJaB', t1a, tmp1ab)
eris_ooVV = eris_ovVo = tmp1ab = None
eris_OOvv = np.asarray(eris.OOvv)
eris_OVvo = np.asarray(eris.OVvo)
wOvvO -= eris_OOvv.transpose(0,2,3,1)
wOvVo += eris_OVvo.transpose(0,2,1,3)
u1a+= np.einsum('NF,NFai->ia', t1b, eris_OVvo)
tmp1ba = lib.einsum('IE,MEbj->MbIj', t1b, eris_OVvo)
tmp1ba+= lib.einsum('ie,MJbe->MbJi', t1a, eris_OOvv)
u2ab -= lib.einsum('MA,MbIj->jIbA', t1b, tmp1ba)
eris_OOvv = eris_OVvO = tmp1ba = None
u2aa += 2*lib.einsum('imae,mbej->ijab', t2aa, wovvo)
u2aa += 2*lib.einsum('iMaE,MbEj->ijab', t2ab, wOvVo)
u2bb += 2*lib.einsum('imae,mbej->ijab', t2bb, wOVVO)
u2bb += 2*lib.einsum('mIeA,mBeJ->IJAB', t2ab, woVvO)
u2ab += lib.einsum('imae,mBeJ->iJaB', t2aa, woVvO)
u2ab += lib.einsum('iMaE,MBEJ->iJaB', t2ab, wOVVO)
u2ab += lib.einsum('iMeA,MbeJ->iJbA', t2ab, wOvvO)
u2ab += lib.einsum('IMAE,MbEj->jIbA', t2bb, wOvVo)
u2ab += lib.einsum('mIeA,mbej->jIbA', t2ab, wovvo)
u2ab += lib.einsum('mIaE,mBEj->jIaB', t2ab, woVVo)
wovvo = wOVVO = woVvO = wOvVo = woVVo = wOvvO = None
Ftmpa = Fvva - .5*lib.einsum('mb,me->be',t1a,Fova)
Ftmpb = Fvvb - .5*lib.einsum('mb,me->be',t1b,Fovb)
u2aa += lib.einsum('ijae,be->ijab', t2aa, Ftmpa)
u2bb += lib.einsum('ijae,be->ijab', t2bb, Ftmpb)
u2ab += lib.einsum('iJaE,BE->iJaB', t2ab, Ftmpb)
u2ab += lib.einsum('iJeA,be->iJbA', t2ab, Ftmpa)
Ftmpa = Fooa + 0.5*lib.einsum('je,me->mj', t1a, Fova)
Ftmpb = Foob + 0.5*lib.einsum('je,me->mj', t1b, Fovb)
u2aa -= lib.einsum('imab,mj->ijab', t2aa, Ftmpa)
u2bb -= lib.einsum('imab,mj->ijab', t2bb, Ftmpb)
u2ab -= lib.einsum('iMaB,MJ->iJaB', t2ab, Ftmpb)
u2ab -= lib.einsum('mIaB,mj->jIaB', t2ab, Ftmpa)
#:eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvirb)
#:eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
#:eris_vvVV = _restore(np.asarray(eris.vvVV), nvira, nvirb)
#:u2aa += lib.einsum('ijef,aebf->ijab', tauaa, eris_vvvv) * .5
#:u2bb += lib.einsum('ijef,aebf->ijab', taubb, eris_VVVV) * .5
#:u2ab += lib.einsum('iJeF,aeBF->iJaB', tauab, eris_vvVV)
tauaa, tauab, taubb = make_tau(t2, t1, t1)
_add_vvvv_(cc, (tauaa,tauab,taubb), eris, (u2aa,u2ab,u2bb))
eris_oovo = numpy.asarray(eris.oovo)
eris_OOVO = numpy.asarray(eris.OOVO)
eris_ooVO = numpy.asarray(eris.ooVO)
eris_OOvo = numpy.asarray(eris.OOvo)
oovo = eris_oovo - eris_oovo.transpose(0,3,2,1)
OOVO = eris_OOVO - eris_OOVO.transpose(0,3,2,1)
u2aa -= lib.einsum('ma,mibj->ijab', t1a, oovo)
u2bb -= lib.einsum('ma,mibj->ijab', t1b, OOVO)
u2ab -= lib.einsum('ma,miBJ->iJaB', t1a, eris_ooVO)
u2ab -= lib.einsum('MA,MJbi->iJbA', t1b, eris_OOvo)
eris_oovo = eris_ooVO = eris_OOVO = eris_OOvo = None
u2aa *= .5
u2bb *= .5
u2aa = u2aa - u2aa.transpose(0,1,3,2)
u2aa = u2aa - u2aa.transpose(1,0,2,3)
u2bb = u2bb - u2bb.transpose(0,1,3,2)
u2bb = u2bb - u2bb.transpose(1,0,2,3)
eia_a = lib.direct_sum('i-a->ia', fooa.diagonal(), fvva.diagonal())
eia_b = lib.direct_sum('i-a->ia', foob.diagonal(), fvvb.diagonal())
u1a /= eia_a
u1b /= eia_b
u2aa /= lib.direct_sum('ia+jb->ijab', eia_a, eia_a)
u2ab /= lib.direct_sum('ia+jb->ijab', eia_a, eia_b)
u2bb /= lib.direct_sum('ia+jb->ijab', eia_b, eia_b)
time0 = log.timer_debug1('update t1 t2', *time0)
t1new = u1a, u1b
t2new = u2aa, u2ab, u2bb
return t1new, t2new
def energy(cc, t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
e = np.einsum('ia,ia', fova, t1a)
e += np.einsum('ia,ia', fovb, t1b)
e += 0.25*np.einsum('ijab,iajb',t2aa,eris_ovov)
e -= 0.25*np.einsum('ijab,ibja',t2aa,eris_ovov)
e += 0.25*np.einsum('ijab,iajb',t2bb,eris_OVOV)
e -= 0.25*np.einsum('ijab,ibja',t2bb,eris_OVOV)
e += np.einsum('iJaB,iaJB',t2ab,eris_ovOV)
e += 0.5*np.einsum('ia,jb,iajb',t1a,t1a,eris_ovov)
e -= 0.5*np.einsum('ia,jb,ibja',t1a,t1a,eris_ovov)
e += 0.5*np.einsum('ia,jb,iajb',t1b,t1b,eris_OVOV)
e -= 0.5*np.einsum('ia,jb,ibja',t1b,t1b,eris_OVOV)
e += np.einsum('ia,jb,iajb',t1a,t1b,eris_ovOV)
return e.real
class UCCSD(rccsd.RCCSD):
def __init__(self, mf, frozen=[[],[]], mo_coeff=None, mo_occ=None):
rccsd.RCCSD.__init__(self, mf, frozen, mo_coeff, mo_occ)
# Spin-orbital CCSD needs a stricter tolerance than spatial-orbital
self.conv_tol_normt = 1e-6
if hasattr(mf, 'mo_energy'):
self.orbspin = orbspin_of_sorted_mo_energy(mf.mo_energy, self.mo_occ)
else:
self.orbspin = None
self._keys = self._keys.union(['orbspin'])
def build(self):
'''Initialize integrals and orbspin'''
self.orbspin = None
@property
def nocc(self):
nocca, noccb = self.get_nocc()
return nocca + noccb
@property
def nmo(self):
nmoa, nmob = self.get_nmo()
return nmoa + nmob
def get_nocc(self):
if self._nocc is not None:
return self._nocc
if isinstance(self.frozen, (int, numpy.integer)):
nocca = len(self.mo_occ[0]) - (self.frozen+1)//2
noccb = len(self.mo_occ[1]) - self.frozen//2
elif isinstance(self.frozen[0], (int, numpy.integer)):
nocca = int(self.mo_occ[0].sum()) - self.frozen[0]
noccb = int(self.mo_occ[1].sum()) - self.frozen[1]
else:
mo_occa, mo_occb = self.mo_occ
if len(self.frozen[0]) > 0:
mo_occa = mo_occa.copy()
mo_occa[numpy.asarray(self.frozen[0])] = 0
if len(self.frozen[1]) > 0:
mo_occb = mo_occb.copy()
mo_occb[numpy.asarray(self.frozen[1])] = 0
nocca = np.count_nonzero(mo_occa==1)
noccb = np.count_nonzero(mo_occb==1)
return nocca, noccb
def get_nmo(self):
if self._nmo is not None:
return self._nmo
if isinstance(self.frozen, (int, numpy.integer)):
nmoa = self.mo_occ[0].size - (self.frozen+1)//2
nmob = self.mo_occ[1].size - self.frozen//2
elif isinstance(self.frozen[0], (int, numpy.integer)):
nmoa = self.mo_occ[0].size - self.frozen[0]
nmob = self.mo_occ[1].size - self.frozen[1]
else:
nmoa = len(self.mo_occ[0]) - len(self.frozen[0])
nmob = len(self.mo_occ[1]) - len(self.frozen[1])
return nmoa, nmob
def init_amps(self, eris):
time0 = time.clock(), time.time()
nocca, noccb = self.get_nocc()
fooa = eris.focka[:nocca,:nocca]
foob = eris.fockb[:noccb,:noccb]
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
fvva = eris.focka[nocca:,nocca:]
fvvb = eris.fockb[noccb:,noccb:]
eia_a = lib.direct_sum('i-a->ia', fooa.diagonal(), fvva.diagonal())
eia_b = lib.direct_sum('i-a->ia', foob.diagonal(), fvvb.diagonal())
t1a = fova.conj() / eia_a
t1b = fovb.conj() / eia_b
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
t2aa = eris_ovov.transpose(0,2,1,3) / lib.direct_sum('ia+jb->ijab', eia_a, eia_a)
t2ab = eris_ovOV.transpose(0,2,1,3) / lib.direct_sum('ia+jb->ijab', eia_a, eia_b)
t2bb = eris_OVOV.transpose(0,2,1,3) / lib.direct_sum('ia+jb->ijab', eia_b, eia_b)
t2aa = t2aa - t2aa.transpose(0,1,3,2)
t2bb = t2bb - t2bb.transpose(0,1,3,2)
e = np.einsum('iJaB,iaJB', t2ab, eris_ovOV)
e += 0.25*np.einsum('ijab,iajb', t2aa, eris_ovov)
e -= 0.25*np.einsum('ijab,ibja', t2aa, eris_ovov)
e += 0.25*np.einsum('ijab,iajb', t2bb, eris_OVOV)
e -= 0.25*np.einsum('ijab,ibja', t2bb, eris_OVOV)
self.emp2 = e.real
logger.info(self, 'Init t2, MP2 energy = %.15g', self.emp2)
logger.timer(self, 'init mp2', *time0)
return self.emp2, (t1a,t1b), (t2aa,t2ab,t2bb)
def kernel(self, t1=None, t2=None, eris=None, mbpt2=False):
return self.ccsd(t1, t2, eris, mbpt2)
def ccsd(self, t1=None, t2=None, eris=None, mbpt2=False):
'''Ground-state unrestricted (U)CCSD.
Kwargs:
mbpt2 : bool
Use one-shot MBPT2 approximation to CCSD.
'''
if eris is None: eris = self.ao2mo(self.mo_coeff)
self.eris = eris
self.dump_flags()
if mbpt2:
cctyp = 'MBPT2'
self.e_corr, self.t1, self.t2 = self.init_amps(eris)
else:
cctyp = 'CCSD'
self.converged, self.e_corr, self.t1, self.t2 = \
kernel(self, eris, t1, t2, max_cycle=self.max_cycle,
tol=self.conv_tol, tolnormt=self.conv_tol_normt,
verbose=self.verbose)
if self.converged:
logger.info(self, 'CCSD converged')
else:
logger.info(self, 'CCSD not converged')
if self._scf.e_tot == 0:
logger.note(self, 'E_corr = %.16g', self.e_corr)
else:
logger.note(self, 'E(%s) = %.16g E_corr = %.16g',
cctyp, self.e_tot, self.e_corr)
return self.e_corr, self.t1, self.t2
def ao2mo(self, mo_coeff=None):
return _ERIS(self, mo_coeff)
def update_amps(self, t1, t2, eris):
return update_amps(self, t1, t2, eris)
def nip(self):
nocc = self.nocc
nvir = self.nmo - nocc
self._nip = nocc + nocc*(nocc-1)//2*nvir
return self._nip
def nea(self):
nocc = self.nocc
nvir = self.nmo - nocc
self._nea = nvir + nocc*nvir*(nvir-1)//2
return self._nea
def nee(self):
nocc = self.nocc
nvir = self.nmo - nocc
self._nee = nocc*nvir + nocc*(nocc-1)//2*nvir*(nvir-1)//2
return self._nee
def ipccsd_matvec(self, vector):
# Ref: Tu, Wang, and Li, J. Chem. Phys. 136, 174102 (2012) Eqs.(8)-(9)
if not hasattr(self,'imds'):
self.imds = _IMDS(self)
if not self.imds.made_ip_imds:
self.eris.__dict__.update(_ERISspin(self).__dict__)
self.imds.make_ip()
imds = self.imds
r1,r2 = self.vector_to_amplitudes_ip(vector)
nocc, nvir = r2.shape[1:]
eris = self.eris
# Eq. (8)
Hr1 = np.einsum('me,mie->i',imds.Fov,r2)
Hr1 -= np.einsum('mi,m->i',imds.Foo,r1)
Hr1 -= 0.5*np.einsum('nmie,mne->i',imds.Wooov,r2)
# Eq. (9)
Hr2 = lib.einsum('ae,ije->ija',imds.Fvv,r2)
tmp1 = lib.einsum('mi,mja->ija',imds.Foo,r2)
Hr2 -= tmp1 - tmp1.transpose(1,0,2)
Hr2 -= np.einsum('maji,m->ija',imds.Wovoo,r1)
Hr2 += 0.5*lib.einsum('mnij,mna->ija',imds.Woooo,r2)
tmp2 = lib.einsum('maei,mje->ija',imds.Wovvo,r2)
Hr2 += tmp2 - tmp2.transpose(1,0,2)
eris_ovov = np.asarray(eris.ovov)
tmp = 0.5*np.einsum('menf,mnf->e', eris_ovov, r2)
tmp-= 0.5*np.einsum('mfne,mnf->e', eris_ovov, r2)
t2 = spatial2spin(self.t2, eris.orbspin)
Hr2 += np.einsum('e,ijae->ija', tmp, t2)
vector = self.amplitudes_to_vector_ip(Hr1,Hr2)
return vector
def ipccsd_diag(self):
if not hasattr(self,'imds'):
self.imds = _IMDS(self)
if not self.imds.made_ip_imds:
self.eris.__dict__.update(_ERISspin(self).__dict__)
self.imds.make_ip()
imds = self.imds
t1, t2, eris = self.t1, self.t2, self.eris
t1 = spatial2spin(t1, eris.orbspin)
t2 = spatial2spin(t2, eris.orbspin)
nocc, nvir = t1.shape
Fo = np.diagonal(imds.Foo)
Fv = np.diagonal(imds.Fvv)
Hr1 = -Fo
Hr2 = lib.direct_sum('-i-j+a->ija', Fo, Fo, Fv)
Woooo = np.asarray(imds.Woooo)
Woo = np.zeros((nocc,nocc), dtype=t1.dtype)
Woo += np.einsum('ijij->ij', Woooo)
Woo -= np.einsum('ijji->ij', Woooo)
Hr2 += Woo.reshape(nocc,nocc,-1) * .5
Wov = np.einsum('iaai->ia', imds.Wovvo)
Hr2 += Wov
Hr2 += Wov.reshape(nocc,1,nvir)
eris_ovov = np.asarray(eris.ovov)
Hr2 -= np.einsum('iajb,ijab->ija', eris_ovov, t2)
Hr2 -= np.einsum('iajb,ijab->ijb', eris_ovov, t2)
vector = self.amplitudes_to_vector_ip(Hr1,Hr2)
return vector
def vector_to_amplitudes_ip(self,vector):
nocc = self.nocc
nvir = self.nmo - nocc
r1 = vector[:nocc].copy()
r2 = np.zeros((nocc**2,nvir), vector.dtype)
otril = np.tril_indices(nocc, k=-1)
r2_tril = vector[nocc:].reshape(-1,nvir)
lib.takebak_2d(r2, r2_tril, otril[0]*nocc+otril[1], np.arange(nvir))
lib.takebak_2d(r2,-r2_tril, otril[1]*nocc+otril[0], np.arange(nvir))
return r1,r2.reshape(nocc,nocc,nvir)
def amplitudes_to_vector_ip(self,r1,r2):
nocc = self.nocc
nvir = self.nmo - nocc
size = nocc + nocc*(nocc-1)//2*nvir
vector = np.empty(size, r1.dtype)
vector[:nocc] = r1.copy()
otril = np.tril_indices(nocc, k=-1)
lib.take_2d(r2.reshape(-1,nvir), otril[0]*nocc+otril[1],
np.arange(nvir), out=vector[nocc:])
return vector
def eaccsd_matvec(self,vector):
# Ref: Nooijen and Bartlett, J. Chem. Phys. 102, 3629 (1994) Eqs.(30)-(31)
if not hasattr(self,'imds'):
self.imds = _IMDS(self)
if not self.imds.made_ea_imds:
self.eris.__dict__.update(_ERISspin(self).__dict__)
self.imds.make_ea()
imds = self.imds
r1,r2 = self.vector_to_amplitudes_ea(vector)
t1, t2, eris = self.t1, self.t2, self.eris
t1 = spatial2spin(t1, eris.orbspin)
t2 = spatial2spin(t2, eris.orbspin)
Hr1 = np.einsum('ac,c->a',imds.Fvv,r1)
Hr1 += np.einsum('ld,lad->a',imds.Fov,r2)
tmp1 = lib.einsum('ac,jcb->jab',imds.Fvv,r2)
Hr2 = (tmp1 - tmp1.transpose(0,2,1))
Hr2 -= lib.einsum('lj,lab->jab',imds.Foo,r2)
eris_ovvv = np.asarray(eris.ovvv)
Hr1 -= 0.5*np.einsum('lcad,lcd->a',eris_ovvv,r2)
Hr1 += 0.5*np.einsum('ldac,lcd->a',eris_ovvv,r2)
tau2 = r2 + np.einsum('jd,c->jcd', t1, r1) * 2
tau2 = tau2 - tau2.transpose(0,2,1)
tmp = lib.einsum('mcad,jcd->maj', eris_ovvv, tau2)
tmp = lib.einsum('mb,maj->jab', t1, tmp)
Hr2 += .5 * (tmp - tmp.transpose(0,2,1))
eris_ovov = np.asarray(eris.ovov)
tau = imd.make_tau(t2, t1, t1)
tmp = lib.einsum('menf,jef->mnj', eris_ovov, tau2)
Hr2 += .25*lib.einsum('mnab,mnj->jab', tau, tmp)
eris_ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
tmp = np.einsum('ndlc,lcd->n', eris_ovov, r2)
Hr1 += .5 * np.einsum('na,n->a', t1, tmp)
tmp = np.einsum('kcld,lcd->k', eris_ovov, r2)
t2 = spatial2spin(self.t2, eris.orbspin)
Hr2 -= 0.5 * np.einsum('k,kjab->jab', tmp, t2)
tmp = lib.einsum('lbdj,lad->jab', imds.Wovvo, r2)
Hr2 += tmp - tmp.transpose(0,2,1)
Hr2 += np.einsum('abcj,c->jab', imds.Wvvvo, r1)
eris_vvvv = np.asarray(eris.vvvv)
Hr2 += 0.5*einsum('acbd,jcd->jab',eris_vvvv,tau2)
vector = self.amplitudes_to_vector_ea(Hr1,Hr2)
return vector
def eaccsd_diag(self):
if not hasattr(self,'imds'):
self.imds = _IMDS(self)
if not self.imds.made_ea_imds:
self.eris.__dict__.update(_ERISspin(self).__dict__)
self.imds.make_ea()
imds = self.imds
t1, t2, eris = self.t1, self.t2, self.eris
t1 = spatial2spin(t1, eris.orbspin)
t2 = spatial2spin(t2, eris.orbspin)
nocc, nvir = t1.shape
Fo = np.diagonal(imds.Foo)
Fv = np.diagonal(imds.Fvv)
Hr1 = Fv
Hr2 = lib.direct_sum('-j+a+b->jab', Fo, Fv, Fv)
Wov = np.einsum('iaai->ia', imds.Wovvo)
Hr2 += Wov.reshape(nocc,nvir,1)
Hr2 += Wov.reshape(nocc,1,nvir)
eris_ovov = np.asarray(eris.ovov)
Hr2 -= np.einsum('iajb,ijab->jab', eris_ovov, t2)
Hr2 -= np.einsum('iajb,ijab->iab', eris_ovov, t2)
eris_ovvv = np.asarray(eris.ovvv)
Wvv = einsum('mb,maab->ab', t1, eris_ovvv)
Wvv -= einsum('mb,mbaa->ab', t1, eris_ovvv)
Wvv = Wvv + Wvv.T
eris_vvvv = np.asarray(eris.vvvv)
Wvv += np.einsum('aabb->ab', eris_vvvv)
Wvv -= np.einsum('abba->ab', eris_vvvv)
tau = imd.make_tau(t2, t1, t1)
Wvv += 0.5*np.einsum('mnab,manb->ab', tau, eris_ovov)
Wvv -= 0.5*np.einsum('mnab,mbna->ab', tau, eris_ovov)
Hr2 += Wvv
vector = self.amplitudes_to_vector_ea(Hr1,Hr2)
return vector
def vector_to_amplitudes_ea(self,vector):
nocc = self.nocc
nvir = self.nmo - nocc
r1 = vector[:nvir].copy()
r2 = np.zeros((nocc,nvir*nvir), vector.dtype)
vtril = np.tril_indices(nvir, k=-1)
r2_tril = vector[nvir:].reshape(nocc,-1)
lib.takebak_2d(r2, r2_tril, np.arange(nocc), vtril[0]*nvir+vtril[1])
lib.takebak_2d(r2,-r2_tril, np.arange(nocc), vtril[1]*nvir+vtril[0])
return r1,r2.reshape(nocc,nvir,nvir)
def amplitudes_to_vector_ea(self,r1,r2):
nocc = self.nocc
nvir = self.nmo - nocc
size = nvir + nvir*(nvir-1)//2*nocc
vector = np.empty(size, r1.dtype)
vector[:nvir] = r1.copy()
vtril = np.tril_indices(nvir, k=-1)
lib.take_2d(r2.reshape(nocc,-1), np.arange(nocc),
vtril[0]*nvir+vtril[1], out=vector[nvir:])
return vector
def eeccsd(self, nroots=1, koopmans=False, guess=None):
'''Calculate N-electron neutral excitations via EE-EOM-CCSD.
Kwargs:
nroots : int
Number of roots (eigenvalues) requested
koopmans : bool
Calculate Koopmans'-like (1p1h) excitations only, targeting via
overlap.
guess : list of ndarray
List of guess vectors to use for targeting via overlap.
'''
spinvec_size = self.nee()
nroots = min(nroots, spinvec_size)
if hasattr(self,'imds') and (self.imds.made_ip_imds or self.imds.made_ea_imds):
self.orbspin = orbspin_of_sorted_mo_energy(self._scf.mo_energy, self.mo_occ)
self.eris = self.ao2mo(self.mo_coeff)
self.imds = _IMDS(self)
diag_ee, diag_sf = self.eeccsd_diag()
guess_ee = []
guess_sf = []
if guess and guess[0].size == spinvec_size:
for g in guess:
r1, r2 = self.vector_to_amplitudes_ee(g)
g = self.amplitudes_to_vector(self.spin2spatial(r1, self.orbspin),
self.spin2spatial(r2, self.orbspin))
if np.linalg.norm(g) > 1e-7:
guess_ee.append(g)
else:
r1 = self.spin2spatial(r1, self.orbspin)
r2 = self.spin2spatial(r2, self.orbspin)
g = self.amplitudes_to_vector_eomsf(r1, r2)
guess_sf.append(g)
r1 = r2 = None
nroots_ee = len(guess_ee)
nroots_sf = len(guess_sf)
elif guess:
for g in guess:
if g.size == diag_ee.size:
guess_ee.append(g)
else:
guess_sf.append(g)
nroots_ee = len(guess_ee)
nroots_sf = len(guess_sf)
else:
dee = np.sort(diag_ee)[:nroots]
dsf = np.sort(diag_sf)[:nroots]
dmax = np.sort(np.hstack([dee,dsf]))[nroots-1]
nroots_ee = np.count_nonzero(dee <= dmax)
nroots_sf = np.count_nonzero(dsf <= dmax)
guess_ee = guess_sf = None
e0 = e1 = []
v0 = v1 = []
if nroots_ee > 0:
e0, v0 = self.eomee_ccsd(nroots_ee, koopmans, guess_ee, diag_ee)
if nroots_ee == 1:
e0, v0 = [e0], [v0]
if nroots_sf > 0:
e1, v1 = self.eomsf_ccsd(nroots_sf, koopmans, guess_sf, diag_sf)
if nroots_sf == 1:
e1, v1 = [e1], [v1]
e = np.hstack([e0,e1])
v = v0 + v1
if nroots == 1:
return e[0], v[0]
else:
idx = e.argsort()
return e[idx], [v[x] for x in idx]
def eomee_ccsd(self, nroots=1, koopmans=False, guess=None, diag=None):
cput0 = (time.clock(), time.time())
if diag is None:
diag = self.eeccsd_diag()[0]
nocca, noccb = self.get_nocc()
nmoa, nmob = self.get_nmo()
nvira, nvirb = nmoa-nocca, nmob-noccb
user_guess = False
if guess:
user_guess = True
assert len(guess) == nroots
for g in guess:
assert g.size == diag.size
else:
idx = diag.argsort()
guess = []
if koopmans:
n = 0
for i in idx:
g = np.zeros_like(diag)
g[i] = 1.0
t1, t2 = self.vector_to_amplitudes(g, (nocca,noccb), (nvira,nvirb))
if np.linalg.norm(t1[0]) > .9 or np.linalg.norm(t1[1]) > .9:
guess.append(g)
n += 1
if n == nroots:
break
else:
for i in idx[:nroots]:
g = np.zeros_like(diag)
g[i] = 1.0
guess.append(g)
def precond(r, e0, x0):
return r/(e0-diag+1e-12)
eig = linalg_helper.eig
if user_guess or koopmans:
def pickeig(w, v, nr, envs):
x0 = linalg_helper._gen_x0(envs['v'], envs['xs'])
idx = np.argmax( np.abs(np.dot(np.array(guess).conj(),np.array(x0).T)), axis=1 )
return w[idx].real, v[:,idx].real, idx
eee, evecs = eig(self.eomee_ccsd_matvec, guess, precond, pick=pickeig,
tol=self.conv_tol, max_cycle=self.max_cycle,
max_space=self.max_space, nroots=nroots,
verbose=self.verbose)
else:
eee, evecs = eig(self.eomee_ccsd_matvec, guess, precond,
tol=self.conv_tol, max_cycle=self.max_cycle,
max_space=self.max_space, nroots=nroots,
verbose=self.verbose)
self.eee = eee.real
if nroots == 1:
eee, evecs = [self.eee], [evecs]
for n, en, vn in zip(range(nroots), eee, evecs):
t1, t2 = self.vector_to_amplitudes(vn, (nocca,noccb), (nvira,nvirb))
qpwt = np.linalg.norm(t1[0])**2 + np.linalg.norm(t1[1])**2
logger.info(self, 'EOM-EE root %d E = %.16g qpwt = %.6g', n, en, qpwt)
logger.timer(self, 'EOM-EE-CCSD', *cput0)
if nroots == 1:
return eee[0], evecs[0]
else:
return eee, evecs
def eomsf_ccsd(self, nroots=1, koopmans=False, guess=None, diag=None):
cput0 = (time.clock(), time.time())
if diag is None:
diag = self.eeccsd_diag()[1]
nocca, noccb = self.get_nocc()
nmoa, nmob = self.get_nmo()
nvira, nvirb = nmoa-nocca, nmob-noccb
user_guess = False
if guess:
user_guess = True
assert len(guess) == nroots
for g in guess:
assert g.size == diag.size
else:
idx = diag.argsort()
guess = []
if koopmans:
n = 0
for i in idx:
g = np.zeros_like(diag)
g[i] = 1.0
t1, t2 = self.vector_to_amplitudes_eomsf(g, (nocca,noccb), (nvira,nvirb))
if np.linalg.norm(t1[0]) > .9 or np.linalg.norm(t1[1]) > .9:
guess.append(g)
n += 1
if n == nroots:
break
else:
for i in idx[:nroots]:
g = np.zeros_like(diag)
g[i] = 1.0
guess.append(g)
def precond(r, e0, x0):
return r/(e0-diag+1e-12)
eig = linalg_helper.eig
if user_guess or koopmans:
def pickeig(w, v, nr, envs):
x0 = linalg_helper._gen_x0(envs['v'], envs['xs'])
idx = np.argmax( np.abs(np.dot(np.array(guess).conj(),np.array(x0).T)), axis=1 )
return w[idx].real, v[:,idx].real, idx
eee, evecs = eig(self.eomsf_ccsd_matvec, guess, precond, pick=pickeig,
tol=self.conv_tol, max_cycle=self.max_cycle,
max_space=self.max_space, nroots=nroots,
verbose=self.verbose)
else:
eee, evecs = eig(self.eomsf_ccsd_matvec, guess, precond,
tol=self.conv_tol, max_cycle=self.max_cycle,
max_space=self.max_space, nroots=nroots,
verbose=self.verbose)
self.eee = eee.real
if nroots == 1:
eee, evecs = [self.eee], [evecs]
for n, en, vn in zip(range(nroots), eee, evecs):
t1, t2 = self.vector_to_amplitudes_eomsf(vn, (nocca,noccb), (nvira,nvirb))
qpwt = np.linalg.norm(t1[0])**2 + np.linalg.norm(t1[1])**2
logger.info(self, 'EOM-SF root %d E = %.16g qpwt = %.6g', n, en, qpwt)
logger.timer(self, 'EOM-SF-CCSD', *cput0)
if nroots == 1:
return eee[0], evecs[0]
else:
return eee, evecs
# Ref: Wang, Tu, and Wang, J. Chem. Theory Comput. 10, 5567 (2014) Eqs.(9)-(10)
# Note: Last line in Eq. (10) is superfluous.
# See, e.g. Gwaltney, Nooijen, and Barlett, Chem. Phys. Lett. 248, 189 (1996)
def eomee_ccsd_matvec(self, vector):
if not hasattr(self,'imds'):
self.imds = _IMDS(self)
if not self.imds.made_ee_imds:
self.imds.make_ee()
imds = self.imds
r1, r2 = self.vector_to_amplitudes(vector)
r1a, r1b = r1
r2aa, r2ab, r2bb = r2
t1, t2, eris = self.t1, self.t2, self.eris
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
Hr1a = lib.einsum('ae,ie->ia', imds.Fvva, r1a)
Hr1a -= lib.einsum('mi,ma->ia', imds.Fooa, r1a)
Hr1a += np.einsum('me,imae->ia',imds.Fova, r2aa)
Hr1a += np.einsum('ME,iMaE->ia',imds.Fovb, r2ab)
Hr1b = lib.einsum('ae,ie->ia', imds.Fvvb, r1b)
Hr1b -= lib.einsum('mi,ma->ia', imds.Foob, r1b)
Hr1b += np.einsum('me,imae->ia',imds.Fovb, r2bb)
Hr1b += np.einsum('me,mIeA->IA',imds.Fova, r2ab)
Hr2aa = lib.einsum('mnij,mnab->ijab', imds.woooo, r2aa) * .25
Hr2bb = lib.einsum('mnij,mnab->ijab', imds.wOOOO, r2bb) * .25
Hr2ab = lib.einsum('mNiJ,mNaB->iJaB', imds.woOoO, r2ab)
Hr2aa+= lib.einsum('be,ijae->ijab', imds.Fvva, r2aa)
Hr2bb+= lib.einsum('be,ijae->ijab', imds.Fvvb, r2bb)
Hr2ab+= lib.einsum('BE,iJaE->iJaB', imds.Fvvb, r2ab)
Hr2ab+= lib.einsum('be,iJeA->iJbA', imds.Fvva, r2ab)
Hr2aa-= lib.einsum('mj,imab->ijab', imds.Fooa, r2aa)
Hr2bb-= lib.einsum('mj,imab->ijab', imds.Foob, r2bb)
Hr2ab-= lib.einsum('MJ,iMaB->iJaB', imds.Foob, r2ab)
Hr2ab-= lib.einsum('mj,mIaB->jIaB', imds.Fooa, r2ab)
#:tau2aa, tau2ab, tau2bb = make_tau(r2, r1, t1, 2)
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:Hr1a += lib.einsum('mfae,imef->ia', eris_ovvv, r2aa)
#:tmpaa = lib.einsum('meaf,ijef->maij', eris_ovvv, tau2aa)
#:Hr2aa+= lib.einsum('mb,maij->ijab', t1a, tmpaa)
#:tmpa = lib.einsum('mfae,me->af', eris_ovvv, r1a)
#:tmpa-= lib.einsum('meaf,me->af', eris_ovvv, r1a)
#:Hr1b += lib.einsum('mfae,imef->ia', eris_OVVV, r2bb)
#:tmpbb = lib.einsum('meaf,ijef->maij', eris_OVVV, tau2bb)
#:Hr2bb+= lib.einsum('mb,maij->ijab', t1b, tmpbb)
#:tmpb = lib.einsum('mfae,me->af', eris_OVVV, r1b)
#:tmpb-= lib.einsum('meaf,me->af', eris_OVVV, r1b)
#:Hr1b += lib.einsum('mfAE,mIfE->IA', eris_ovVV, r2ab)
#:tmpab = lib.einsum('meAF,iJeF->mAiJ', eris_ovVV, tau2ab)
#:Hr2ab-= lib.einsum('mb,mAiJ->iJbA', t1a, tmpab)
#:tmpb-= lib.einsum('meAF,me->AF', eris_ovVV, r1a)
#:Hr1a += lib.einsum('MFae,iMeF->ia', eris_OVvv, r2ab)
#:tmpba =-lib.einsum('MEaf,iJfE->MaiJ', eris_OVvv, tau2ab)
#:Hr2ab+= lib.einsum('MB,MaiJ->iJaB', t1b, tmpba)
#:tmpa-= lib.einsum('MEaf,ME->af', eris_OVvv, r1b)
tau2aa = make_tau_aa(r2aa, r1a, t1a, 2)
mem_now = lib.current_memory()[0]
max_memory = lib.param.MAX_MEMORY - mem_now
tmpa = np.zeros((nvira,nvira))
tmpb = np.zeros((nvirb,nvirb))
blksize = max(int(max_memory*1e6/8/(nvira**3*3)), 2)
for p0, p1 in lib.prange(0, nocca, blksize):
ovvv = np.asarray(eris.ovvv[p0:p1]).reshape((p1-p0)*nvira,-1)
ovvv = lib.unpack_tril(ovvv).reshape(-1,nvira,nvira,nvira)
Hr1a += lib.einsum('mfae,imef->ia', ovvv, r2aa[:,p0:p1])
tmpaa = lib.einsum('meaf,ijef->maij', ovvv, tau2aa)
Hr2aa+= lib.einsum('mb,maij->ijab', t1a[p0:p1], tmpaa)
tmpa+= lib.einsum('mfae,me->af', ovvv, r1a[p0:p1])
tmpa-= lib.einsum('meaf,me->af', ovvv, r1a[p0:p1])
ovvv = tmpaa = None
tau2aa = None
tau2bb = make_tau_aa(r2bb, r1b, t1b, 2)
blksize = max(int(max_memory*1e6/8/(nvirb**3*3)), 2)
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = np.asarray(eris.OVVV[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVVV = lib.unpack_tril(OVVV).reshape(-1,nvirb,nvirb,nvirb)
Hr1b += lib.einsum('mfae,imef->ia', OVVV, r2bb[:,p0:p1])
tmpbb = lib.einsum('meaf,ijef->maij', OVVV, tau2bb)
Hr2bb+= lib.einsum('mb,maij->ijab', t1b[p0:p1], tmpbb)
tmpb+= lib.einsum('mfae,me->af', OVVV, r1b[p0:p1])
tmpb-= lib.einsum('meaf,me->af', OVVV, r1b[p0:p1])
OVVV = tmpbb = None
tau2bb = None
tau2ab = make_tau_ab(r2ab, r1 , t1 , 2)
blksize = max(int(max_memory*1e6/8/(nvira*nvirb**2*3)), 2)
for p0, p1 in lib.prange(0, nocca, blksize):
ovVV = np.asarray(eris.ovVV[p0:p1]).reshape((p1-p0)*nvira,-1)
ovVV = lib.unpack_tril(ovVV).reshape(-1,nvira,nvirb,nvirb)
Hr1b += lib.einsum('mfAE,mIfE->IA', ovVV, r2ab[p0:p1])
tmpab = lib.einsum('meAF,iJeF->mAiJ', ovVV, tau2ab)
Hr2ab-= lib.einsum('mb,mAiJ->iJbA', t1a[p0:p1], tmpab)
tmpb-= lib.einsum('meAF,me->AF', ovVV, r1a[p0:p1])
ovVV = tmpab = None
blksize = max(int(max_memory*1e6/8/(nvirb*nvira**2*3)), 2)
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = np.asarray(eris.OVvv[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVvv = lib.unpack_tril(OVvv).reshape(-1,nvirb,nvira,nvira)
Hr1a += lib.einsum('MFae,iMeF->ia', OVvv, r2ab[:,p0:p1])
tmpba = lib.einsum('MEaf,iJfE->MaiJ', OVvv, tau2ab)
Hr2ab-= lib.einsum('MB,MaiJ->iJaB', t1b[p0:p1], tmpba)
tmpa-= lib.einsum('MEaf,ME->af', OVvv, r1b[p0:p1])
OVvv = tmpba = None
tau2ab = None
Hr2aa-= lib.einsum('af,ijfb->ijab', tmpa, t2aa)
Hr2bb-= lib.einsum('af,ijfb->ijab', tmpb, t2bb)
Hr2ab-= lib.einsum('af,iJfB->iJaB', tmpa, t2ab)
Hr2ab-= lib.einsum('AF,iJbF->iJbA', tmpb, t2ab)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
tau2aa = make_tau_aa(r2aa, r1a, t1a, 2)
tauaa = make_tau_aa(t2aa, t1a, t1a)
tmpaa = lib.einsum('menf,ijef->mnij', eris_ovov, tau2aa)
Hr2aa += lib.einsum('mnij,mnab->ijab', tmpaa, tauaa) * 0.25
tau2aa = tauaa = None
tau2bb = make_tau_aa(r2bb, r1b, t1b, 2)
taubb = make_tau_aa(t2bb, t1b, t1b)
tmpbb = lib.einsum('menf,ijef->mnij', eris_OVOV, tau2bb)
Hr2bb += lib.einsum('mnij,mnab->ijab', tmpbb, taubb) * 0.25
tau2bb = taubb = None
tau2ab = make_tau_ab(r2ab, r1 , t1 , 2)
tauab = make_tau_ab(t2ab, t1 , t1)
tmpab = lib.einsum('meNF,iJeF->mNiJ', eris_ovOV, tau2ab)
Hr2ab += lib.einsum('mNiJ,mNaB->iJaB', tmpab, tauab)
tau2ab = tauab = None
tmpa = lib.einsum('menf,imef->ni', eris_ovov, r2aa)
tmpa-= lib.einsum('neMF,iMeF->ni', eris_ovOV, r2ab)
tmpb = lib.einsum('menf,imef->ni', eris_OVOV, r2bb)
tmpb-= lib.einsum('mfNE,mIfE->NI', eris_ovOV, r2ab)
Hr1a += lib.einsum('na,ni->ia', t1a, tmpa)
Hr1b += lib.einsum('na,ni->ia', t1b, tmpb)
Hr2aa+= lib.einsum('mj,imab->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('mj,imab->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('MJ,iMaB->iJaB', tmpb, t2ab)
Hr2ab+= lib.einsum('mj,mIaB->jIaB', tmpa, t2ab)
tmp1a = np.einsum('menf,mf->en', eris_ovov, r1a)
tmp1a-= np.einsum('mfne,mf->en', eris_ovov, r1a)
tmp1a-= np.einsum('neMF,MF->en', eris_ovOV, r1b)
tmp1b = np.einsum('menf,mf->en', eris_OVOV, r1b)
tmp1b-= np.einsum('mfne,mf->en', eris_OVOV, r1b)
tmp1b-= np.einsum('mfNE,mf->EN', eris_ovOV, r1a)
tmpa = np.einsum('en,nb->eb', tmp1a, t1a)
tmpa+= lib.einsum('menf,mnfb->eb', eris_ovov, r2aa)
tmpa-= lib.einsum('meNF,mNbF->eb', eris_ovOV, r2ab)
tmpb = np.einsum('en,nb->eb', tmp1b, t1b)
tmpb+= lib.einsum('menf,mnfb->eb', eris_OVOV, r2bb)
tmpb-= lib.einsum('nfME,nMfB->EB', eris_ovOV, r2ab)
Hr2aa+= lib.einsum('eb,ijae->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('eb,ijae->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('EB,iJaE->iJaB', tmpb, t2ab)
Hr2ab+= lib.einsum('eb,iJeA->iJbA', tmpa, t2ab)
eirs_ovov = eris_ovOV = eris_OVOV = None
Hr2aa-= lib.einsum('mbij,ma->ijab', imds.wovoo, r1a)
Hr2bb-= lib.einsum('mbij,ma->ijab', imds.wOVOO, r1b)
Hr2ab-= lib.einsum('mBiJ,ma->iJaB', imds.woVoO, r1a)
Hr2ab-= lib.einsum('MbJi,MA->iJbA', imds.wOvOo, r1b)
Hr1a-= 0.5*lib.einsum('mnie,mnae->ia', imds.wooov, r2aa)
Hr1a-= lib.einsum('mNiE,mNaE->ia', imds.woOoV, r2ab)
Hr1b-= 0.5*lib.einsum('mnie,mnae->ia', imds.wOOOV, r2bb)
Hr1b-= lib.einsum('MnIe,nMeA->IA', imds.wOoOv, r2ab)
tmpa = lib.einsum('mnie,me->ni', imds.wooov, r1a)
tmpa-= lib.einsum('nMiE,ME->ni', imds.woOoV, r1b)
tmpb = lib.einsum('mnie,me->ni', imds.wOOOV, r1b)
tmpb-= lib.einsum('NmIe,me->NI', imds.wOoOv, r1a)
Hr2aa+= lib.einsum('ni,njab->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('ni,njab->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('ni,nJaB->iJaB', tmpa, t2ab)
Hr2ab+= lib.einsum('NI,jNaB->jIaB', tmpb, t2ab)
for p0, p1 in lib.prange(0, nvira, nocca):
Hr2aa+= lib.einsum('ejab,ie->ijab', imds.wvovv[p0:p1], r1a[:,p0:p1])
Hr2ab+= lib.einsum('eJaB,ie->iJaB', imds.wvOvV[p0:p1], r1a[:,p0:p1])
for p0, p1 in lib.prange(0, nvirb, noccb):
Hr2bb+= lib.einsum('ejab,ie->ijab', imds.wVOVV[p0:p1], r1b[:,p0:p1])
Hr2ab+= lib.einsum('EjBa,IE->jIaB', imds.wVoVv[p0:p1], r1b[:,p0:p1])
Hr1a += np.einsum('maei,me->ia',imds.wovvo,r1a)
Hr1a += np.einsum('MaEi,ME->ia',imds.wOvVo,r1b)
Hr1b += np.einsum('maei,me->ia',imds.wOVVO,r1b)
Hr1b += np.einsum('mAeI,me->IA',imds.woVvO,r1a)
Hr2aa+= lib.einsum('mbej,imae->ijab', imds.wovvo, r2aa) * 2
Hr2aa+= lib.einsum('MbEj,iMaE->ijab', imds.wOvVo, r2ab) * 2
Hr2bb+= lib.einsum('mbej,imae->ijab', imds.wOVVO, r2bb) * 2
Hr2bb+= lib.einsum('mBeJ,mIeA->IJAB', imds.woVvO, r2ab) * 2
Hr2ab+= lib.einsum('mBeJ,imae->iJaB', imds.woVvO, r2aa)
Hr2ab+= lib.einsum('MBEJ,iMaE->iJaB', imds.wOVVO, r2ab)
Hr2ab+= lib.einsum('mBEj,mIaE->jIaB', imds.woVVo, r2ab)
Hr2ab+= lib.einsum('mbej,mIeA->jIbA', imds.wovvo, r2ab)
Hr2ab+= lib.einsum('MbEj,IMAE->jIbA', imds.wOvVo, r2bb)
Hr2ab+= lib.einsum('MbeJ,iMeA->iJbA', imds.wOvvO, r2ab)
#:eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvirb)
#:eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
#:eris_vvVV = _restore(np.asarray(eris.vvVV), nvira, nvirb)
#:Hr2aa += lib.einsum('ijef,aebf->ijab', tau2aa, eris_vvvv) * .5
#:Hr2bb += lib.einsum('ijef,aebf->ijab', tau2bb, eris_VVVV) * .5
#:Hr2ab += lib.einsum('iJeF,aeBF->iJaB', tau2ab, eris_vvVV)
tau2aa, tau2ab, tau2bb = make_tau(r2, r1, t1, 2)
_add_vvvv_(self, (tau2aa,tau2ab,tau2bb), eris, (Hr2aa,Hr2ab,Hr2bb))
Hr2aa *= .5
Hr2bb *= .5
Hr2aa = Hr2aa - Hr2aa.transpose(0,1,3,2)
Hr2aa = Hr2aa - Hr2aa.transpose(1,0,2,3)
Hr2bb = Hr2bb - Hr2bb.transpose(0,1,3,2)
Hr2bb = Hr2bb - Hr2bb.transpose(1,0,2,3)
vector = self.amplitudes_to_vector((Hr1a,Hr1b), (Hr2aa,Hr2ab,Hr2bb))
return vector
def eomsf_ccsd_matvec(self, vector):
'''Spin flip EOM-CCSD'''
if not hasattr(self,'imds'):
self.imds = _IMDS(self)
if not self.imds.made_ee_imds:
self.imds.make_ee()
imds = self.imds
t1, t2, eris = self.t1, self.t2, self.eris
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
r1, r2 = self.vector_to_amplitudes_eomsf(vector, (nocca,noccb), (nvira,nvirb))
r1ab, r1ba = r1
r2baaa, r2aaba, r2abbb, r2bbab = r2
Hr1ab = np.einsum('ae,ie->ia', imds.Fvvb, r1ab)
Hr1ab -= np.einsum('mi,ma->ia', imds.Fooa, r1ab)
Hr1ab += np.einsum('me,imae->ia', imds.Fovb, r2abbb)
Hr1ab += np.einsum('me,imae->ia', imds.Fova, r2aaba)
Hr1ba = np.einsum('ae,ie->ia', imds.Fvva, r1ba)
Hr1ba -= np.einsum('mi,ma->ia', imds.Foob, r1ba)
Hr1ba += np.einsum('me,imae->ia', imds.Fova, r2baaa)
Hr1ba += np.einsum('me,imae->ia', imds.Fovb, r2bbab)
Hr2baaa = .5 *lib.einsum('nMjI,Mnab->Ijab', imds.woOoO, r2baaa)
Hr2aaba = .25*lib.einsum('mnij,mnAb->ijAb', imds.woooo, r2aaba)
Hr2abbb = .5 *lib.einsum('mNiJ,mNAB->iJAB', imds.woOoO, r2abbb)
Hr2bbab = .25*lib.einsum('MNIJ,MNaB->IJaB', imds.wOOOO, r2bbab)
Hr2baaa += lib.einsum('be,Ijae->Ijab', imds.Fvva , r2baaa)
Hr2baaa -= lib.einsum('mj,imab->ijab', imds.Fooa*.5, r2baaa)
Hr2baaa -= lib.einsum('MJ,Miab->Jiab', imds.Foob*.5, r2baaa)
Hr2bbab -= lib.einsum('mj,imab->ijab', imds.Foob , r2bbab)
Hr2bbab += lib.einsum('BE,IJaE->IJaB', imds.Fvvb*.5, r2bbab)
Hr2bbab += lib.einsum('be,IJeA->IJbA', imds.Fvva*.5, r2bbab)
Hr2aaba -= lib.einsum('mj,imab->ijab', imds.Fooa , r2aaba)
Hr2aaba += lib.einsum('be,ijAe->ijAb', imds.Fvva*.5, r2aaba)
Hr2aaba += lib.einsum('BE,ijEa->ijBa', imds.Fvvb*.5, r2aaba)
Hr2abbb += lib.einsum('BE,iJAE->iJAB', imds.Fvvb , r2abbb)
Hr2abbb -= lib.einsum('mj,imab->ijab', imds.Foob*.5, r2abbb)
Hr2abbb -= lib.einsum('mj,mIAB->jIAB', imds.Fooa*.5, r2abbb)
tau2baaa = np.einsum('ia,jb->ijab', r1ba, t1a)
tau2baaa = tau2baaa - tau2baaa.transpose(0,1,3,2)
tau2abbb = np.einsum('ia,jb->ijab', r1ab, t1b)
tau2abbb = tau2abbb - tau2abbb.transpose(0,1,3,2)
tau2aaba = np.einsum('ia,jb->ijab', r1ab, t1a)
tau2aaba = tau2aaba - tau2aaba.transpose(1,0,2,3)
tau2bbab = np.einsum('ia,jb->ijab', r1ba, t1b)
tau2bbab = tau2bbab - tau2bbab.transpose(1,0,2,3)
tau2baaa += r2baaa
tau2bbab += r2bbab
tau2abbb += r2abbb
tau2aaba += r2aaba
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:Hr1ba += einsum('mfae,Imef->Ia', eris_ovvv, r2baaa)
#:tmp1aaba = lib.einsum('meaf,Ijef->maIj', eris_ovvv, tau2baaa)
#:Hr2baaa += lib.einsum('mb,maIj->Ijab', t1a , tmp1aaba)
mem_now = lib.current_memory()[0]
max_memory = lib.param.MAX_MEMORY - mem_now
blksize = max(int(max_memory*1e6/8/(nvira**3*3)), 2)
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = np.asarray(eris.ovvv[p0:p1]).reshape((p1-p0)*nvira,-1)
ovvv = lib.unpack_tril(ovvv).reshape(-1,nvira,nvira,nvira)
Hr1ba += einsum('mfae,Imef->Ia', ovvv, r2baaa[:,p0:p1])
tmp1aaba = lib.einsum('meaf,Ijef->maIj', ovvv, tau2baaa)
Hr2baaa += lib.einsum('mb,maIj->Ijab', t1a[p0:p1], tmp1aaba)
ovvv = tmp1aaba = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:Hr1ab += einsum('MFAE,iMEF->iA', eris_OVVV, r2abbb)
#:tmp1bbab = lib.einsum('MEAF,iJEF->MAiJ', eris_OVVV, tau2abbb)
#:Hr2abbb += lib.einsum('MB,MAiJ->iJAB', t1b , tmp1bbab)
blksize = max(int(max_memory*1e6/8/(nvirb**3*3)), 2)
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = np.asarray(eris.OVVV[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVVV = lib.unpack_tril(OVVV).reshape(-1,nvirb,nvirb,nvirb)
Hr1ab += einsum('MFAE,iMEF->iA', OVVV, r2abbb[:,p0:p1])
tmp1bbab = lib.einsum('MEAF,iJEF->MAiJ', OVVV, tau2abbb)
Hr2abbb += lib.einsum('MB,MAiJ->iJAB', t1b[p0:p1], tmp1bbab)
OVVV = tmp1bbab = None
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:Hr1ab += einsum('mfAE,imEf->iA', eris_ovVV, r2aaba)
#:tmp1abaa = lib.einsum('meAF,ijFe->mAij', eris_ovVV, tau2aaba)
#:tmp1abbb = lib.einsum('meAF,IJeF->mAIJ', eris_ovVV, tau2bbab)
#:tmp1ba = lib.einsum('mfAE,mE->Af', eris_ovVV, r1ab)
#:Hr2bbab -= lib.einsum('mb,mAIJ->IJbA', t1a*.5, tmp1abbb)
#:Hr2aaba -= lib.einsum('mb,mAij->ijAb', t1a*.5, tmp1abaa)
tmp1ba = np.zeros((nvirb,nvira))
blksize = max(int(max_memory*1e6/8/(nvira*nvirb**2*3)), 2)
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = np.asarray(eris.ovVV[p0:p1]).reshape((p1-p0)*nvira,-1)
ovVV = lib.unpack_tril(ovVV).reshape(-1,nvira,nvirb,nvirb)
Hr1ab += einsum('mfAE,imEf->iA', ovVV, r2aaba[:,p0:p1])
tmp1abaa = lib.einsum('meAF,ijFe->mAij', ovVV, tau2aaba)
tmp1abbb = lib.einsum('meAF,IJeF->mAIJ', ovVV, tau2bbab)
tmp1ba += lib.einsum('mfAE,mE->Af', ovVV, r1ab[p0:p1])
Hr2bbab -= lib.einsum('mb,mAIJ->IJbA', t1a[p0:p1]*.5, tmp1abbb)
Hr2aaba -= lib.einsum('mb,mAij->ijAb', t1a[p0:p1]*.5, tmp1abaa)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:Hr1ba += einsum('MFae,IMeF->Ia', eris_OVvv, r2bbab)
#:tmp1baaa = lib.einsum('MEaf,ijEf->Maij', eris_OVvv, tau2aaba)
#:tmp1babb = lib.einsum('MEaf,IJfE->MaIJ', eris_OVvv, tau2bbab)
#:tmp1ab = lib.einsum('MFae,Me->aF', eris_OVvv, r1ba)
#:Hr2aaba -= lib.einsum('MB,Maij->ijBa', t1b*.5, tmp1baaa)
#:Hr2bbab -= lib.einsum('MB,MaIJ->IJaB', t1b*.5, tmp1babb)
tmp1ab = np.zeros((nvira,nvirb))
blksize = max(int(max_memory*1e6/8/(nvirb*nvira**2*3)), 2)
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = np.asarray(eris.OVvv[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVvv = lib.unpack_tril(OVvv).reshape(-1,nvirb,nvira,nvira)
Hr1ba += einsum('MFae,IMeF->Ia', OVvv, r2bbab[:,p0:p1])
tmp1baaa = lib.einsum('MEaf,ijEf->Maij', OVvv, tau2aaba)
tmp1babb = lib.einsum('MEaf,IJfE->MaIJ', OVvv, tau2bbab)
tmp1ab+= lib.einsum('MFae,Me->aF', OVvv, r1ba[p0:p1])
Hr2aaba -= lib.einsum('MB,Maij->ijBa', t1b[p0:p1]*.5, tmp1baaa)
Hr2bbab -= lib.einsum('MB,MaIJ->IJaB', t1b[p0:p1]*.5, tmp1babb)
Hr2baaa += lib.einsum('aF,jIbF->Ijba', tmp1ab , t2ab)
Hr2bbab -= lib.einsum('aF,IJFB->IJaB', tmp1ab*.5, t2bb)
Hr2abbb += lib.einsum('Af,iJfB->iJBA', tmp1ba , t2ab)
Hr2aaba -= lib.einsum('Af,ijfb->ijAb', tmp1ba*.5, t2aa)
Hr2baaa -= lib.einsum('MbIj,Ma->Ijab', imds.wOvOo, r1ba )
Hr2bbab -= lib.einsum('MBIJ,Ma->IJaB', imds.wOVOO, r1ba*.5)
Hr2abbb -= lib.einsum('mBiJ,mA->iJAB', imds.woVoO, r1ab )
Hr2aaba -= lib.einsum('mbij,mA->ijAb', imds.wovoo, r1ab*.5)
Hr1ab -= 0.5*lib.einsum('mnie,mnAe->iA', imds.wooov, r2aaba)
Hr1ab -= lib.einsum('mNiE,mNAE->iA', imds.woOoV, r2abbb)
Hr1ba -= 0.5*lib.einsum('MNIE,MNaE->Ia', imds.wOOOV, r2bbab)
Hr1ba -= lib.einsum('MnIe,Mnae->Ia', imds.wOoOv, r2baaa)
tmp1ab = lib.einsum('MnIe,Me->nI', imds.wOoOv, r1ba)
tmp1ba = lib.einsum('mNiE,mE->Ni', imds.woOoV, r1ab)
Hr2baaa += lib.einsum('nI,njab->Ijab', tmp1ab*.5, t2aa)
Hr2bbab += lib.einsum('nI,nJaB->IJaB', tmp1ab , t2ab)
Hr2abbb += lib.einsum('Ni,NJAB->iJAB', tmp1ba*.5, t2bb)
Hr2aaba += lib.einsum('Ni,jNbA->ijAb', tmp1ba , t2ab)
for p0, p1 in lib.prange(0, nvira, nocca):
Hr2baaa += lib.einsum('ejab,Ie->Ijab', imds.wvovv[p0:p1], r1ba[:,p0:p1]*.5)
Hr2bbab += lib.einsum('eJaB,Ie->IJaB', imds.wvOvV[p0:p1], r1ba[:,p0:p1] )
for p0, p1 in lib.prange(0, nvirb, noccb):
Hr2abbb += lib.einsum('EJAB,iE->iJAB', imds.wVOVV[p0:p1], r1ab[:,p0:p1]*.5)
Hr2aaba += lib.einsum('EjAb,iE->ijAb', imds.wVoVv[p0:p1], r1ab[:,p0:p1] )
Hr1ab += np.einsum('mAEi,mE->iA', imds.woVVo, r1ab)
Hr1ba += np.einsum('MaeI,Me->Ia', imds.wOvvO, r1ba)
Hr2baaa += lib.einsum('mbej,Imae->Ijab', imds.wovvo, r2baaa)
Hr2baaa += lib.einsum('MbeJ,Miae->Jiab', imds.wOvvO, r2baaa)
Hr2baaa += lib.einsum('MbEj,IMaE->Ijab', imds.wOvVo, r2bbab)
Hr2bbab += lib.einsum('MBEJ,IMaE->IJaB', imds.wOVVO, r2bbab)
Hr2bbab += lib.einsum('MbeJ,IMeA->IJbA', imds.wOvvO, r2bbab)
Hr2bbab += lib.einsum('mBeJ,Imae->IJaB', imds.woVvO, r2baaa)
Hr2aaba += lib.einsum('mbej,imAe->ijAb', imds.wovvo, r2aaba)
Hr2aaba += lib.einsum('mBEj,imEa->ijBa', imds.woVVo, r2aaba)
Hr2aaba += lib.einsum('MbEj,iMAE->ijAb', imds.wOvVo, r2abbb)
Hr2abbb += lib.einsum('MBEJ,iMAE->iJAB', imds.wOVVO, r2abbb)
Hr2abbb += lib.einsum('mBEj,mIAE->jIAB', imds.woVVo, r2abbb)
Hr2abbb += lib.einsum('mBeJ,imAe->iJAB', imds.woVvO, r2aaba)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
tauaa, tauab, taubb = make_tau(t2, t1, t1)
tmp1baaa = lib.einsum('nfME,ijEf->Mnij', eris_ovOV, tau2aaba)
tmp1aaba = lib.einsum('menf,Ijef->mnIj', eris_ovov, tau2baaa)
tmp1abbb = lib.einsum('meNF,IJeF->mNIJ', eris_ovOV, tau2bbab)
tmp1bbab = lib.einsum('MENF,iJEF->MNiJ', eris_OVOV, tau2abbb)
Hr2baaa += 0.5*.5*lib.einsum('mnIj,mnab->Ijab', tmp1aaba, tauaa)
Hr2bbab += .5*lib.einsum('nMIJ,nMaB->IJaB', tmp1abbb, tauab)
Hr2aaba += .5*lib.einsum('Nmij,mNbA->ijAb', tmp1baaa, tauab)
Hr2abbb += 0.5*.5*lib.einsum('MNiJ,MNAB->iJAB', tmp1bbab, taubb)
tauaa = tauab = taubb = None
tmpab = lib.einsum('menf,Imef->nI', eris_ovov, r2baaa)
tmpab -= lib.einsum('nfME,IMfE->nI', eris_ovOV, r2bbab)
tmpba = lib.einsum('MENF,iMEF->Ni', eris_OVOV, r2abbb)
tmpba -= lib.einsum('meNF,imFe->Ni', eris_ovOV, r2aaba)
Hr1ab += np.einsum('NA,Ni->iA', t1b, tmpba)
Hr1ba += np.einsum('na,nI->Ia', t1a, tmpab)
Hr2baaa -= lib.einsum('mJ,imab->Jiab', tmpab*.5, t2aa)
Hr2bbab -= lib.einsum('mJ,mIaB->IJaB', tmpab*.5, t2ab) * 2
Hr2aaba -= lib.einsum('Mj,iMbA->ijAb', tmpba*.5, t2ab) * 2
Hr2abbb -= lib.einsum('Mj,IMAB->jIAB', tmpba*.5, t2bb)
tmp1ab = np.einsum('meNF,mF->eN', eris_ovOV, r1ab)
tmp1ba = np.einsum('nfME,Mf->En', eris_ovOV, r1ba)
tmpab = np.einsum('eN,NB->eB', tmp1ab, t1b)
tmpba = np.einsum('En,nb->Eb', tmp1ba, t1a)
tmpab -= lib.einsum('menf,mnBf->eB', eris_ovov, r2aaba)
tmpab += lib.einsum('meNF,mNFB->eB', eris_ovOV, r2abbb)
tmpba -= lib.einsum('MENF,MNbF->Eb', eris_OVOV, r2bbab)
tmpba += lib.einsum('nfME,Mnfb->Eb', eris_ovOV, r2baaa)
Hr2baaa -= lib.einsum('Eb,jIaE->Ijab', tmpba*.5, t2ab) * 2
Hr2bbab -= lib.einsum('Eb,IJAE->IJbA', tmpba*.5, t2bb)
Hr2aaba -= lib.einsum('eB,ijae->ijBa', tmpab*.5, t2aa)
Hr2abbb -= lib.einsum('eB,iJeA->iJAB', tmpab*.5, t2ab) * 2
eris_ovov = eris_OVOV = eris_ovOV = None
#:eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvirb)
#:eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
#:eris_vvVV = _restore(np.asarray(eris.vvVV), nvira, nvirb)
#:Hr2baaa += .5*lib.einsum('Ijef,aebf->Ijab', tau2baaa, eris_vvvv)
#:Hr2abbb += .5*lib.einsum('iJEF,AEBF->iJAB', tau2abbb, eris_VVVV)
#:Hr2bbab += .5*lib.einsum('IJeF,aeBF->IJaB', tau2bbab, eris_vvVV)
#:Hr2aaba += .5*lib.einsum('ijEf,bfAE->ijAb', tau2aaba, eris_vvVV)
tau2baaa *= .5
rccsd._add_vvvv1_(self, tau2baaa, eris, Hr2baaa)
fakeri = lambda:None
fakeri.vvvv = eris.VVVV
tau2abbb *= .5
rccsd._add_vvvv1_(self, tau2abbb, fakeri, Hr2abbb)
fakeri.vvvv = eris.vvVV
tau2bbab *= .5
rccsd._add_vvvv1_(self, tau2bbab, fakeri, Hr2bbab)
fakeri = None
for i in range(nvira):
i0 = i*(i+1)//2
vvv = lib.unpack_tril(np.asarray(eris.vvVV[i0:i0+i+1]))
Hr2aaba[:,:,:,i ] += .5*lib.einsum('ijef,fae->ija', tau2aaba[:,:,:,:i+1], vvv)
Hr2aaba[:,:,:,:i] += .5*lib.einsum('ije,bae->ijab', tau2aaba[:,:,:,i], vvv[:i])
vvv = None
Hr2baaa = Hr2baaa - Hr2baaa.transpose(0,1,3,2)
Hr2bbab = Hr2bbab - Hr2bbab.transpose(1,0,2,3)
Hr2abbb = Hr2abbb - Hr2abbb.transpose(0,1,3,2)
Hr2aaba = Hr2aaba - Hr2aaba.transpose(1,0,2,3)
vector = self.amplitudes_to_vector_eomsf((Hr1ab, Hr1ba), (Hr2baaa,Hr2aaba,Hr2abbb,Hr2bbab))
return vector
def eeccsd_diag(self):
if not hasattr(self,'imds'):
self.imds = _IMDS(self)
if not self.imds.made_ee_imds:
self.imds.make_ee()
imds = self.imds
eris = self.eris
t1, t2 = self.t1, self.t2
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
tauaa, tauab, taubb = make_tau(t2, t1, t1)
nocca, noccb, nvira, nvirb = t2ab.shape
Foa = imds.Fooa.diagonal()
Fob = imds.Foob.diagonal()
Fva = imds.Fvva.diagonal()
Fvb = imds.Fvvb.diagonal()
Wovaa = np.einsum('iaai->ia', imds.wovvo)
Wovbb = np.einsum('iaai->ia', imds.wOVVO)
Wovab = np.einsum('iaai->ia', imds.woVVo)
Wovba = np.einsum('iaai->ia', imds.wOvvO)
Hr1aa = lib.direct_sum('-i+a->ia', Foa, Fva)
Hr1bb = lib.direct_sum('-i+a->ia', Fob, Fvb)
Hr1ab = lib.direct_sum('-i+a->ia', Foa, Fvb)
Hr1ba = lib.direct_sum('-i+a->ia', Fob, Fva)
Hr1aa += Wovaa
Hr1bb += Wovbb
Hr1ab += Wovab
Hr1ba += Wovba
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
Wvvaa = .5*np.einsum('mnab,manb->ab', tauaa, eris_ovov)
Wvvbb = .5*np.einsum('mnab,manb->ab', taubb, eris_OVOV)
Wvvab = np.einsum('mNaB,maNB->aB', tauab, eris_ovOV)
ijb = np.einsum('iejb,ijbe->ijb', ovov, t2aa)
IJB = np.einsum('iejb,ijbe->ijb', OVOV, t2bb)
iJB =-np.einsum('ieJB,iJeB->iJB', eris_ovOV, t2ab)
Ijb =-np.einsum('jbIE,jIbE->Ijb', eris_ovOV, t2ab)
iJb =-np.einsum('ibJE,iJbE->iJb', eris_ovOV, t2ab)
IjB =-np.einsum('jeIB,jIeB->IjB', eris_ovOV, t2ab)
jab = np.einsum('kajb,jkab->jab', ovov, t2aa)
JAB = np.einsum('kajb,jkab->jab', OVOV, t2bb)
jAb =-np.einsum('jbKA,jKbA->jAb', eris_ovOV, t2ab)
JaB =-np.einsum('kaJB,kJaB->JaB', eris_ovOV, t2ab)
jaB =-np.einsum('jaKB,jKaB->jaB', eris_ovOV, t2ab)
JAb =-np.einsum('kbJA,kJbA->JAb', eris_ovOV, t2ab)
eris_ovov = eris_ovOV = eris_OVOV = ovov = OVOV = None
Hr2aa = lib.direct_sum('ijb+a->ijba', ijb, Fva)
Hr2bb = lib.direct_sum('ijb+a->ijba', IJB, Fvb)
Hr2ab = lib.direct_sum('iJb+A->iJbA', iJb, Fvb)
Hr2ab+= lib.direct_sum('iJB+a->iJaB', iJB, Fva)
Hr2aa+= lib.direct_sum('-i+jab->ijab', Foa, jab)
Hr2bb+= lib.direct_sum('-i+jab->ijab', Fob, JAB)
Hr2ab+= lib.direct_sum('-i+JaB->iJaB', Foa, JaB)
Hr2ab+= lib.direct_sum('-I+jaB->jIaB', Fob, jaB)
Hr2aa = Hr2aa + Hr2aa.transpose(0,1,3,2)
Hr2aa = Hr2aa + Hr2aa.transpose(1,0,2,3)
Hr2bb = Hr2bb + Hr2bb.transpose(0,1,3,2)
Hr2bb = Hr2bb + Hr2bb.transpose(1,0,2,3)
Hr2aa *= .5
Hr2bb *= .5
Hr2baaa = lib.direct_sum('Ijb+a->Ijba', Ijb, Fva)
Hr2aaba = lib.direct_sum('ijb+A->ijAb', ijb, Fvb)
Hr2aaba+= Fva.reshape(1,1,1,-1)
Hr2abbb = lib.direct_sum('iJB+A->iJBA', iJB, Fvb)
Hr2bbab = lib.direct_sum('IJB+a->IJaB', IJB, Fva)
Hr2bbab+= Fvb.reshape(1,1,1,-1)
Hr2baaa = Hr2baaa + Hr2baaa.transpose(0,1,3,2)
Hr2abbb = Hr2abbb + Hr2abbb.transpose(0,1,3,2)
Hr2baaa+= lib.direct_sum('-I+jab->Ijab', Fob, jab)
Hr2baaa-= Foa.reshape(1,-1,1,1)
tmpaaba = lib.direct_sum('-i+jAb->ijAb', Foa, jAb)
Hr2abbb+= lib.direct_sum('-i+JAB->iJAB', Foa, JAB)
Hr2abbb-= Fob.reshape(1,-1,1,1)
tmpbbab = lib.direct_sum('-I+JaB->IJaB', Fob, JaB)
Hr2aaba+= tmpaaba + tmpaaba.transpose(1,0,2,3)
Hr2bbab+= tmpbbab + tmpbbab.transpose(1,0,2,3)
tmpaaba = tmpbbab = None
Hr2aa += Wovaa.reshape(1,nocca,1,nvira)
Hr2aa += Wovaa.reshape(nocca,1,1,nvira)
Hr2aa += Wovaa.reshape(nocca,1,nvira,1)
Hr2aa += Wovaa.reshape(1,nocca,nvira,1)
Hr2ab += Wovbb.reshape(1,noccb,1,nvirb)
Hr2ab += Wovab.reshape(nocca,1,1,nvirb)
Hr2ab += Wovaa.reshape(nocca,1,nvira,1)
Hr2ab += Wovba.reshape(1,noccb,nvira,1)
Hr2bb += Wovbb.reshape(1,noccb,1,nvirb)
Hr2bb += Wovbb.reshape(noccb,1,1,nvirb)
Hr2bb += Wovbb.reshape(noccb,1,nvirb,1)
Hr2bb += Wovbb.reshape(1,noccb,nvirb,1)
Hr2baaa += Wovaa.reshape(1,nocca,1,nvira)
Hr2baaa += Wovba.reshape(noccb,1,1,nvira)
Hr2baaa += Wovba.reshape(noccb,1,nvira,1)
Hr2baaa += Wovaa.reshape(1,nocca,nvira,1)
Hr2aaba += Wovaa.reshape(1,nocca,1,nvira)
Hr2aaba += Wovaa.reshape(nocca,1,1,nvira)
Hr2aaba += Wovab.reshape(nocca,1,nvirb,1)
Hr2aaba += Wovab.reshape(1,nocca,nvirb,1)
Hr2abbb += Wovbb.reshape(1,noccb,1,nvirb)
Hr2abbb += Wovab.reshape(nocca,1,1,nvirb)
Hr2abbb += Wovab.reshape(nocca,1,nvirb,1)
Hr2abbb += Wovbb.reshape(1,noccb,nvirb,1)
Hr2bbab += Wovbb.reshape(1,noccb,1,nvirb)
Hr2bbab += Wovbb.reshape(noccb,1,1,nvirb)
Hr2bbab += Wovba.reshape(noccb,1,nvira,1)
Hr2bbab += Wovba.reshape(1,noccb,nvira,1)
Wooaa = np.einsum('ijij->ij', imds.woooo).copy()
Wooaa -= np.einsum('ijji->ij', imds.woooo)
Woobb = np.einsum('ijij->ij', imds.wOOOO).copy()
Woobb -= np.einsum('ijji->ij', imds.wOOOO)
Wooab = np.einsum('ijij->ij', imds.woOoO)
Wooba = Wooab.T
Wooaa *= .5
Woobb *= .5
Hr2aa += Wooaa.reshape(nocca,nocca,1,1)
Hr2ab += Wooab.reshape(nocca,noccb,1,1)
Hr2bb += Woobb.reshape(noccb,noccb,1,1)
Hr2baaa += Wooba.reshape(noccb,nocca,1,1)
Hr2aaba += Wooaa.reshape(nocca,nocca,1,1)
Hr2abbb += Wooab.reshape(nocca,noccb,1,1)
Hr2bbab += Woobb.reshape(noccb,noccb,1,1)
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:Wvvaa += np.einsum('mb,maab->ab', t1a, eris_ovvv)
#:Wvvaa -= np.einsum('mb,mbaa->ab', t1a, eris_ovvv)
mem_now = lib.current_memory()[0]
max_memory = lib.param.MAX_MEMORY - mem_now
blksize = max(int(max_memory*1e6/8/(nvira**3*3)), 2)
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = np.asarray(eris.ovvv[p0:p1]).reshape((p1-p0)*nvira,-1)
ovvv = lib.unpack_tril(ovvv).reshape(-1,nvira,nvira,nvira)
Wvvaa += np.einsum('mb,maab->ab', t1a[p0:p1], ovvv)
Wvvaa -= np.einsum('mb,mbaa->ab', t1a[p0:p1], ovvv)
ovvv = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:Wvvbb += np.einsum('mb,maab->ab', t1b, eris_OVVV)
#:Wvvbb -= np.einsum('mb,mbaa->ab', t1b, eris_OVVV)
blksize = max(int(max_memory*1e6/8/(nvirb**3*3)), 2)
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = np.asarray(eris.OVVV[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVVV = lib.unpack_tril(OVVV).reshape(-1,nvirb,nvirb,nvirb)
Wvvbb += np.einsum('mb,maab->ab', t1b[p0:p1], OVVV)
Wvvbb -= np.einsum('mb,mbaa->ab', t1b[p0:p1], OVVV)
OVVV = None
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:Wvvab -= np.einsum('mb,mbaa->ba', t1a, eris_ovVV)
blksize = max(int(max_memory*1e6/8/(nvira*nvirb**2*3)), 2)
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = np.asarray(eris.ovVV[p0:p1]).reshape((p1-p0)*nvira,-1)
ovVV = lib.unpack_tril(ovVV).reshape(-1,nvira,nvirb,nvirb)
Wvvab -= np.einsum('mb,mbaa->ba', t1a[p0:p1], ovVV)
ovVV = None
blksize = max(int(max_memory*1e6/8/(nvirb*nvira**2*3)), 2)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:Wvvab -= np.einsum('mb,mbaa->ab', t1b, eris_OVvv)
idxa = np.arange(nvira)
idxa = idxa*(idxa+1)//2+idxa
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = np.asarray(eris.OVvv[p0:p1])
Wvvab -= np.einsum('mb,mba->ab', t1b[p0:p1], OVvv[:,:,idxa])
OVvv = None
Wvvaa = Wvvaa + Wvvaa.T
Wvvbb = Wvvbb + Wvvbb.T
#:eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvirb)
#:eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
#:eris_vvVV = _restore(np.asarray(eris.vvVV), nvira, nvirb)
#:Wvvaa += np.einsum('aabb->ab', eris_vvvv) - np.einsum('abba->ab', eris_vvvv)
#:Wvvbb += np.einsum('aabb->ab', eris_VVVV) - np.einsum('abba->ab', eris_VVVV)
#:Wvvab += np.einsum('aabb->ab', eris_vvVV)
for i in range(nvira):
i0 = i*(i+1)//2
vvv = lib.unpack_tril(np.asarray(eris.vvvv[i0:i0+i+1]))
tmp = np.einsum('bb->b', vvv[i])
Wvvaa[i] += tmp
tmp = np.einsum('bb->b', vvv[:,:i+1,i])
Wvvaa[i,:i+1] -= tmp
Wvvaa[:i ,i] -= tmp[:i]
vvv = lib.unpack_tril(np.asarray(eris.vvVV[i0:i0+i+1]))
Wvvab[i] += np.einsum('bb->b', vvv[i])
vvv = None
for i in range(nvirb):
i0 = i*(i+1)//2
vvv = lib.unpack_tril(np.asarray(eris.VVVV[i0:i0+i+1]))
tmp = np.einsum('bb->b', vvv[i])
Wvvbb[i] += tmp
tmp = np.einsum('bb->b', vvv[:,:i+1,i])
Wvvbb[i,:i+1] -= tmp
Wvvbb[:i ,i] -= tmp[:i]
vvv = None
Wvvba = Wvvab.T
Hr2aa += Wvvaa.reshape(1,1,nvira,nvira)
Hr2ab += Wvvab.reshape(1,1,nvira,nvirb)
Hr2bb += Wvvbb.reshape(1,1,nvirb,nvirb)
Hr2baaa += Wvvaa.reshape(1,1,nvira,nvira)
Hr2aaba += Wvvba.reshape(1,1,nvirb,nvira)
Hr2abbb += Wvvbb.reshape(1,1,nvirb,nvirb)
Hr2bbab += Wvvab.reshape(1,1,nvira,nvirb)
vec_ee = self.amplitudes_to_vector((Hr1aa,Hr1bb), (Hr2aa,Hr2ab,Hr2bb))
vec_sf = self.amplitudes_to_vector_eomsf((Hr1ab,Hr1ba), (Hr2baaa,Hr2aaba,Hr2abbb,Hr2bbab))
return vec_ee, vec_sf
def amplitudes_to_vector_ee(self, t1, t2, out=None):
return self.amplitudes_to_vector_s4(t1, t2, out)
def vector_to_amplitudes_ee(self, vector, nocc=None, nvir=None):
return self.vector_to_amplitudes_s4(vector, nocc, nvir)
def amplitudes_to_vector(self, t1, t2, out=None):
nocca, nvira = t1[0].shape
noccb, nvirb = t1[1].shape
sizea = nocca * nvira + nocca*(nocca-1)//2*nvira*(nvira-1)//2
sizeb = noccb * nvirb + noccb*(noccb-1)//2*nvirb*(nvirb-1)//2
sizeab = nocca * noccb * nvira * nvirb
vector = np.ndarray(sizea+sizeb+sizeab, t2[0].dtype, buffer=out)
self.amplitudes_to_vector_ee(t1[0], t2[0], out=vector[:sizea])
self.amplitudes_to_vector_ee(t1[1], t2[2], out=vector[sizea:])
vector[sizea+sizeb:] = t2[1].ravel()
return vector
def vector_to_amplitudes(self, vector, nocc=None, nvir=None):
if nocc is None:
nocca, noccb = self.get_nocc()
else:
nocca, noccb = nocc
if nvir is None:
nmoa, nmob = self.get_nmo()
nvira, nvirb = nmoa-nocca, nmob-noccb
else:
nvira, nvirb = nvir
nocc = nocca + noccb
nvir = nvira + nvirb
nov = nocc * nvir
size = nov + nocc*(nocc-1)//2*nvir*(nvir-1)//2
if vector.size == size:
return self.vector_to_amplitudes_ee(vector, nocc, nvir)
else:
sizea = nocca * nvira + nocca*(nocca-1)//2*nvira*(nvira-1)//2
sizeb = noccb * nvirb + noccb*(noccb-1)//2*nvirb*(nvirb-1)//2
sizeab = nocca * noccb * nvira * nvirb
t1a, t2aa = self.vector_to_amplitudes_ee(vector[:sizea], nocca, nvira)
t1b, t2bb = self.vector_to_amplitudes_ee(vector[sizea:sizea+sizeb], noccb, nvirb)
t2ab = vector[-sizeab:].copy().reshape(nocca,noccb,nvira,nvirb)
return (t1a,t1b), (t2aa,t2ab,t2bb)
def amplitudes_from_rccsd(self, t1, t2):
'''Convert spatial orbital T1,T2 to spin-orbital T1,T2'''
return addons.spatial2spin(t1), addons.spatial2spin(t2)
def spatial2spin(self, tx, orbspin=None):
if orbspin is None: orbspin = self.orbspin
return spatial2spin(tx, orbspin)
def spin2spatial(self, tx, orbspin=None):
if orbspin is None: orbspin = self.orbspin
return spin2spatial(tx, orbspin)
def amplitudes_to_vector_eomsf(self, t1, t2, out=None):
t1ab, t1ba = t1
t2baaa, t2aaba, t2abbb, t2bbab = t2
nocca, nvirb = t1ab.shape
noccb, nvira = t1ba.shape
nbaaa = noccb*nocca*nvira*(nvira-1)//2
naaba = nocca*(nocca-1)//2*nvirb*nvira
nabbb = nocca*noccb*nvirb*(nvirb-1)//2
nbbab = noccb*(noccb-1)//2*nvira*nvirb
size = t1ab.size + t1ba.size + nbaaa + naaba + nabbb + nbbab
vector = numpy.ndarray(size, t2baaa.dtype, buffer=out)
vector[:t1ab.size] = t1ab.ravel()
vector[t1ab.size:t1ab.size+t1ba.size] = t1ba.ravel()
pvec = vector[t1ab.size+t1ba.size:]
t2baaa = t2baaa.reshape(noccb*nocca,nvira*nvira)
t2aaba = t2aaba.reshape(nocca*nocca,nvirb*nvira)
t2abbb = t2abbb.reshape(nocca*noccb,nvirb*nvirb)
t2bbab = t2bbab.reshape(noccb*noccb,nvira*nvirb)
otrila = numpy.tril_indices(nocca, k=-1)
otrilb = numpy.tril_indices(noccb, k=-1)
vtrila = numpy.tril_indices(nvira, k=-1)
vtrilb = numpy.tril_indices(nvirb, k=-1)
oidxab = np.arange(nocca*noccb, dtype=numpy.int32)
vidxab = np.arange(nvira*nvirb, dtype=numpy.int32)
lib.take_2d(t2baaa, oidxab, vtrila[0]*nvira+vtrila[1], out=pvec)
lib.take_2d(t2aaba, otrila[0]*nocca+otrila[1], vidxab, out=pvec[nbaaa:])
lib.take_2d(t2abbb, oidxab, vtrilb[0]*nvirb+vtrilb[1], out=pvec[nbaaa+naaba:])
lib.take_2d(t2bbab, otrilb[0]*noccb+otrilb[1], vidxab, out=pvec[nbaaa+naaba+nabbb:])
return vector
def vector_to_amplitudes_eomsf(self, vector, nocc=None, nvir=None):
if nocc is None:
nocca, noccb = self.get_nocc()
else:
nocca, noccb = nocc
if nvir is None:
nmoa, nmob = self.get_nmo()
nvira, nvirb = nmoa-nocca, nmob-noccb
else:
nvira, nvirb = nvir
t1ab = vector[:nocca*nvirb].reshape(nocca,nvirb).copy()
t1ba = vector[nocca*nvirb:nocca*nvirb+noccb*nvira].reshape(noccb,nvira).copy()
pvec = vector[t1ab.size+t1ba.size:]
nbaaa = noccb*nocca*nvira*(nvira-1)//2
naaba = nocca*(nocca-1)//2*nvirb*nvira
nabbb = nocca*noccb*nvirb*(nvirb-1)//2
nbbab = noccb*(noccb-1)//2*nvira*nvirb
t2baaa = np.zeros((noccb*nocca,nvira*nvira), dtype=vector.dtype)
t2aaba = np.zeros((nocca*nocca,nvirb*nvira), dtype=vector.dtype)
t2abbb = np.zeros((nocca*noccb,nvirb*nvirb), dtype=vector.dtype)
t2bbab = np.zeros((noccb*noccb,nvira*nvirb), dtype=vector.dtype)
otrila = numpy.tril_indices(nocca, k=-1)
otrilb = numpy.tril_indices(noccb, k=-1)
vtrila = numpy.tril_indices(nvira, k=-1)
vtrilb = numpy.tril_indices(nvirb, k=-1)
oidxab = np.arange(nocca*noccb, dtype=numpy.int32)
vidxab = np.arange(nvira*nvirb, dtype=numpy.int32)
v = pvec[:nbaaa].reshape(noccb*nocca,-1)
lib.takebak_2d(t2baaa, v, oidxab, vtrila[0]*nvira+vtrila[1])
lib.takebak_2d(t2baaa,-v, oidxab, vtrila[1]*nvira+vtrila[0])
v = pvec[nbaaa:nbaaa+naaba].reshape(-1,nvirb*nvira)
lib.takebak_2d(t2aaba, v, otrila[0]*nocca+otrila[1], vidxab)
lib.takebak_2d(t2aaba,-v, otrila[1]*nocca+otrila[0], vidxab)
v = pvec[nbaaa+naaba:nbaaa+naaba+nabbb].reshape(nocca*noccb,-1)
lib.takebak_2d(t2abbb, v, oidxab, vtrilb[0]*nvirb+vtrilb[1])
lib.takebak_2d(t2abbb,-v, oidxab, vtrilb[1]*nvirb+vtrilb[0])
v = pvec[nbaaa+naaba+nabbb:].reshape(-1,nvira*nvirb)
lib.takebak_2d(t2bbab, v, otrilb[0]*noccb+otrilb[1], vidxab)
lib.takebak_2d(t2bbab,-v, otrilb[1]*noccb+otrilb[0], vidxab)
t2baaa = t2baaa.reshape(noccb,nocca,nvira,nvira)
t2aaba = t2aaba.reshape(nocca,nocca,nvirb,nvira)
t2abbb = t2abbb.reshape(nocca,noccb,nvirb,nvirb)
t2bbab = t2bbab.reshape(noccb,noccb,nvira,nvirb)
return (t1ab,t1ba), (t2baaa, t2aaba, t2abbb, t2bbab)
def spatial2spin_eomsf(self, rx, orbspin):
'''Convert EOM spatial R1,R2 to spin-orbital R1,R2'''
if len(rx) == 2: # r1
r1ab, r1ba = rx
nocca, nvirb = r1ab.shape
noccb, nvira = r1ba.shape
else:
r2baaa,r2aaba,r2abbb,r2bbab = rx
noccb, nocca, nvira = r2baaa.shape[:3]
nvirb = r2aaba.shape[2]
nocc = nocca + noccb
nvir = nvira + nvirb
idxoa = np.where(orbspin[:nocc] == 0)[0]
idxob = np.where(orbspin[:nocc] == 1)[0]
idxva = np.where(orbspin[nocc:] == 0)[0]
idxvb = np.where(orbspin[nocc:] == 1)[0]
if len(rx) == 2: # r1
r1 = np.zeros((nocc,nvir), dtype=r1ab.dtype)
lib.takebak_2d(r1, r1ab, idxoa, idxvb)
lib.takebak_2d(r1, r1ba, idxob, idxva)
return r1
else:
r2 = np.zeros((nocc**2,nvir**2), dtype=r2aaba.dtype)
idxoaa = idxoa[:,None] * nocc + idxoa
idxoab = idxoa[:,None] * nocc + idxob
idxoba = idxob[:,None] * nocc + idxoa
idxobb = idxob[:,None] * nocc + idxob
idxvaa = idxva[:,None] * nvir + idxva
idxvab = idxva[:,None] * nvir + idxvb
idxvba = idxvb[:,None] * nvir + idxva
idxvbb = idxvb[:,None] * nvir + idxvb
r2baaa = r2baaa.reshape(noccb*nocca,nvira*nvira)
r2aaba = r2aaba.reshape(nocca*nocca,nvirb*nvira)
r2abbb = r2abbb.reshape(nocca*noccb,nvirb*nvirb)
r2bbab = r2bbab.reshape(noccb*noccb,nvira*nvirb)
lib.takebak_2d(r2, r2baaa, idxoba.ravel(), idxvaa.ravel())
lib.takebak_2d(r2, r2aaba, idxoaa.ravel(), idxvba.ravel())
lib.takebak_2d(r2, r2abbb, idxoab.ravel(), idxvbb.ravel())
lib.takebak_2d(r2, r2bbab, idxobb.ravel(), idxvab.ravel())
lib.takebak_2d(r2, r2baaa, idxoab.T.ravel(), idxvaa.T.ravel())
lib.takebak_2d(r2, r2aaba, idxoaa.T.ravel(), idxvab.T.ravel())
lib.takebak_2d(r2, r2abbb, idxoba.T.ravel(), idxvbb.T.ravel())
lib.takebak_2d(r2, r2bbab, idxobb.T.ravel(), idxvba.T.ravel())
return r2.reshape(nocc,nocc,nvir,nvir)
def spin2spatial_eomsf(self, rx, orbspin):
'''Convert EOM spin-orbital R1,R2 to spatial R1,R2'''
if rx.ndim == 2: # r1
nocc, nvir = rx.shape
else:
nocc, nvir = rx.shape[1:3]
idxoa = np.where(orbspin[:nocc] == 0)[0]
idxob = np.where(orbspin[:nocc] == 1)[0]
idxva = np.where(orbspin[nocc:] == 0)[0]
idxvb = np.where(orbspin[nocc:] == 1)[0]
nocca = len(idxoa)
noccb = len(idxob)
nvira = len(idxva)
nvirb = len(idxvb)
if rx.ndim == 2:
r1ab = lib.take_2d(rx, idxoa, idxvb)
r1ba = lib.take_2d(rx, idxob, idxva)
return r1ab, r1ba
else:
idxoaa = idxoa[:,None] * nocc + idxoa
idxoab = idxoa[:,None] * nocc + idxob
idxoba = idxob[:,None] * nocc + idxoa
idxobb = idxob[:,None] * nocc + idxob
idxvaa = idxva[:,None] * nvir + idxva
idxvab = idxva[:,None] * nvir + idxvb
idxvba = idxvb[:,None] * nvir + idxva
idxvbb = idxvb[:,None] * nvir + idxvb
r2 = rx.reshape(nocc**2,nvir**2)
r2baaa = lib.take_2d(r2, idxoba.ravel(), idxvaa.ravel())
r2aaba = lib.take_2d(r2, idxoaa.ravel(), idxvba.ravel())
r2abbb = lib.take_2d(r2, idxoab.ravel(), idxvbb.ravel())
r2bbab = lib.take_2d(r2, idxobb.ravel(), idxvab.ravel())
r2baaa = r2baaa.reshape(noccb,nocca,nvira,nvira)
r2aaba = r2aaba.reshape(nocca,nocca,nvirb,nvira)
r2abbb = r2abbb.reshape(nocca,noccb,nvirb,nvirb)
r2bbab = r2bbab.reshape(noccb,noccb,nvira,nvirb)
return r2baaa,r2aaba,r2abbb,r2bbab
class _ERISspin:
def __init__(self, cc, mo_coeff=None, method='incore',
ao2mofn=ao2mo.outcore.general_iofree):
cput0 = (time.clock(), time.time())
log = logger.Logger(cc.stdout, cc.verbose)
moidx = get_umoidx(cc)
if mo_coeff is None:
self.mo_coeff = mo_coeff = [cc.mo_coeff[0][:,moidx[0]],
cc.mo_coeff[1][:,moidx[1]]]
else:
self.mo_coeff = mo_coeff = [mo_coeff[0][:,moidx[0]],
mo_coeff[1][:,moidx[1]]]
nocc = cc.nocc
nmo = cc.nmo
nvir = nmo - nocc
mem_incore, mem_outcore, mem_basic = rccsd._mem_usage(nocc, nvir)
mem_now = lib.current_memory()[0]
self.fock, so_coeff, self.orbspin = uspatial2spin(cc, moidx, mo_coeff)
if (cc.orbspin is None or cc.orbspin.size != self.orbspin.size or
any(cc.orbspin != self.orbspin)):
log.warn('Overwrite cc.orbspin by _ERIS.')
cc.orbspin = self.orbspin
self.feri = lib.H5TmpFile()
if 0 and hasattr(cc._scf, 'with_df') and cc._scf.with_df:
pass
elif (method == 'incore' and cc._scf._eri is not None and
(mem_incore+mem_now < cc.max_memory) or cc.mol.incore_anyway):
idxa = self.orbspin == 0
idxb = self.orbspin == 1
moa = so_coeff[:,idxa]
mob = so_coeff[:,idxb]
nmoa = moa.shape[1]
nmob = mob.shape[1]
maska = numpy.where((idxa.reshape(-1,1) & idxa).ravel())[0]
maskb = numpy.where((idxb.reshape(-1,1) & idxb).ravel())[0]
eri = numpy.zeros((nmo*nmo,nmo*nmo))
eri_aa = ao2mo.restore(1, ao2mo.full(cc._scf._eri, moa), nmoa)
lib.takebak_2d(eri, eri_aa.reshape(nmoa**2,-1), maska, maska)
eri_bb = ao2mo.restore(1, ao2mo.full(cc._scf._eri, mob), nmob)
lib.takebak_2d(eri, eri_bb.reshape(nmob**2,-1), maskb, maskb)
eri_ab = ao2mo.general(cc._scf._eri, (moa,moa,mob,mob), compact=False)
eri_ba = lib.transpose(eri_ab)
lib.takebak_2d(eri, eri_ab, maska, maskb)
lib.takebak_2d(eri, eri_ba, maskb, maska)
eri = eri.reshape(nmo,nmo,nmo,nmo)
self.oooo = eri[:nocc,:nocc,:nocc,:nocc].copy()
self.ooov = eri[:nocc,:nocc,:nocc,nocc:].copy()
self.ovoo = eri[:nocc,nocc:,:nocc,:nocc].copy()
self.oovo = eri[:nocc,:nocc,nocc:,:nocc].copy()
self.ovov = eri[:nocc,nocc:,:nocc,nocc:].copy()
self.oovv = eri[:nocc,:nocc,nocc:,nocc:].copy()
self.ovvo = eri[:nocc,nocc:,nocc:,:nocc].copy()
self.ovvv = eri[:nocc,nocc:,nocc:,nocc:].copy()
self.vvvv = eri[nocc:,nocc:,nocc:,nocc:].copy()
else:
orbo = so_coeff[:,:nocc]
orbv = so_coeff[:,nocc:]
self.dtype = so_coeff.dtype
ds_type = so_coeff.dtype.char
self.oooo = self.feri.create_dataset('oooo', (nocc,nocc,nocc,nocc), ds_type)
self.ooov = self.feri.create_dataset('ooov', (nocc,nocc,nocc,nvir), ds_type)
self.ovoo = self.feri.create_dataset('ovoo', (nocc,nvir,nocc,nocc), ds_type)
self.oovo = self.feri.create_dataset('oovo', (nocc,nocc,nvir,nocc), ds_type)
self.ovov = self.feri.create_dataset('ovov', (nocc,nvir,nocc,nvir), ds_type)
self.oovv = self.feri.create_dataset('oovv', (nocc,nocc,nvir,nvir), ds_type)
self.ovvo = self.feri.create_dataset('ovvo', (nocc,nvir,nvir,nocc), ds_type)
self.ovvv = self.feri.create_dataset('ovvv', (nocc,nvir,nvir,nvir), ds_type)
self.vvvv = self.feri.create_dataset('vvvv', (nvir,nvir,nvir,nvir), ds_type)
idxoa = self.orbspin[:nocc] == 0
idxob = self.orbspin[:nocc] == 1
idxva = self.orbspin[nocc:] == 0
idxvb = self.orbspin[nocc:] == 1
idxa = self.orbspin == 0
idxb = self.orbspin == 1
orbo_a = orbo[:,idxoa]
orbo_b = orbo[:,idxob]
orbv_a = orbv[:,idxva]
orbv_b = orbv[:,idxvb]
moa = so_coeff[:,idxa]
mob = so_coeff[:,idxb]
nocca = orbo_a.shape[1]
noccb = orbo_b.shape[1]
nvira = orbv_a.shape[1]
nvirb = orbv_b.shape[1]
nmoa = moa.shape[1]
nmob = mob.shape[1]
cput1 = time.clock(), time.time()
# <ij||pq> = <ij|pq> - <ij|qp> = (ip|jq) - (iq|jp)
tmpfile2 = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
ao2mo.general(cc.mol, (orbo_a,moa,moa,moa), tmpfile2.name, 'aa')
ao2mo.general(cc.mol, (orbo_a,moa,mob,mob), tmpfile2.name, 'ab')
ao2mo.general(cc.mol, (orbo_b,mob,moa,moa), tmpfile2.name, 'ba')
ao2mo.general(cc.mol, (orbo_b,mob,mob,mob), tmpfile2.name, 'bb')
with h5py.File(tmpfile2.name) as f:
maska1 = numpy.where(idxa)[0]
maskb1 = numpy.where(idxb)[0]
maska2 = numpy.where((idxa.reshape(-1,1) & idxa).ravel())[0]
maskb2 = numpy.where((idxb.reshape(-1,1) & idxb).ravel())[0]
bufv = numpy.empty((nmo*nmo*nmo))
for i in range(nocc):
buf = numpy.zeros((nmo,nmo*nmo))
if self.orbspin[i] == 0: # alpha
ia = numpy.count_nonzero(idxoa[:i])
v1 = f['aa'][ia*nmoa:ia*nmoa+nmoa]
v1 = lib.unpack_tril(v1, out=bufv).reshape(nmoa,-1)
lib.takebak_2d(buf, v1, maska1, maska2)
v1 = f['ab'][ia*nmoa:ia*nmoa+nmoa]
v1 = lib.unpack_tril(v1, out=bufv).reshape(nmoa,-1)
lib.takebak_2d(buf, v1, maska1, maskb2)
else:
ib = numpy.count_nonzero(idxob[:i])
v1 = f['ba'][ib*nmob:ib*nmob+nmob]
v1 = lib.unpack_tril(v1, out=bufv).reshape(nmob,-1)
lib.takebak_2d(buf, v1, maskb1, maska2)
v1 = f['bb'][ib*nmob:ib*nmob+nmob]
v1 = lib.unpack_tril(v1, out=bufv).reshape(nmob,-1)
lib.takebak_2d(buf, v1, maskb1, maskb2)
buf = buf.reshape(nmo,nmo,nmo)
self.oooo[i] = buf[:nocc,:nocc,:nocc]
self.ooov[i] = buf[:nocc,:nocc,nocc:]
self.ovoo[i] = buf[nocc:,:nocc,:nocc]
self.ovov[i] = buf[nocc:,:nocc,nocc:]
self.oovo[i] = buf[:nocc,nocc:,:nocc]
self.oovv[i] = buf[:nocc,nocc:,nocc:]
self.ovvo[i] = buf[nocc:,nocc:,:nocc]
self.ovvv[i] = buf[nocc:,nocc:,nocc:]
buf = None
bufv = None
cput1 = log.timer_debug1('transforming oopq, ovpq', *cput1)
ao2mo.full(cc.mol, orbv_a, tmpfile2.name, 'aa', compact=False)
ao2mo.full(cc.mol, orbv_b, tmpfile2.name, 'bb', compact=False)
ao2mo.general(cc.mol, (orbv_a,orbv_a,orbv_b,orbv_b), tmpfile2.name, 'ab', compact=False)
ao2mo.general(cc.mol, (orbv_b,orbv_b,orbv_a,orbv_a), tmpfile2.name, 'ba', compact=False)
with h5py.File(tmpfile2.name) as f:
maska1 = numpy.where(idxva)[0]
maskb1 = numpy.where(idxvb)[0]
maska2 = numpy.where((idxva.reshape(-1,1) & idxva).ravel())[0]
maskb2 = numpy.where((idxvb.reshape(-1,1) & idxvb).ravel())[0]
for i in range(nvir):
buf = numpy.zeros((nvir,nvir*nvir))
if idxva[i]: # alpha
ia = numpy.count_nonzero(idxva[:i])
v1 = f['aa'][ia*nvira:ia*nvira+nvira]
lib.takebak_2d(buf, v1, maska1, maska2)
v1 = f['ab'][ia*nvira:ia*nvira+nvira]
lib.takebak_2d(buf, v1, maska1, maskb2)
else:
ib = numpy.count_nonzero(idxvb[:i])
v1 = f['ba'][ib*nvirb:ib*nvirb+nvirb]
lib.takebak_2d(buf, v1, maskb1, maska2)
v1 = f['bb'][ib*nvirb:ib*nvirb+nvirb]
lib.takebak_2d(buf, v1, maskb1, maskb2)
buf = buf.reshape(nvir,nvir,nvir)
self.vvvv[i] = buf
buf = None
cput1 = log.timer_debug1('transforming vvvv', *cput1)
log.timer('CCSD integral transformation', *cput0)
class _ERIS:
def __init__(self, cc, mo_coeff=None, method='incore',
ao2mofn=ao2mo.outcore.general_iofree):
cput0 = (time.clock(), time.time())
log = logger.Logger(cc.stdout, cc.verbose)
moidx = get_umoidx(cc)
if mo_coeff is None:
self.mo_coeff = mo_coeff = [cc.mo_coeff[0][:,moidx[0]],
cc.mo_coeff[1][:,moidx[1]]]
else:
self.mo_coeff = mo_coeff = [mo_coeff[0][:,moidx[0]],
mo_coeff[1][:,moidx[1]]]
nocc = cc.nocc
nmo = cc.nmo
nvir = nmo - nocc
mem_incore, mem_outcore, mem_basic = rccsd._mem_usage(nocc, nvir)
mem_now = lib.current_memory()[0]
fock, so_coeff, self.orbspin = uspatial2spin(cc, moidx, mo_coeff)
idxa = self.orbspin == 0
idxb = self.orbspin == 1
self.focka = fock[idxa][:,idxa]
self.fockb = fock[idxb][:,idxb]
if (cc.orbspin is None or cc.orbspin.size != self.orbspin.size or
any(cc.orbspin != self.orbspin)):
log.warn('Overwrite cc.orbspin by _ERIS.')
cc.orbspin = self.orbspin
self.feri = lib.H5TmpFile()
if 0 and hasattr(cc._scf, 'with_df') and cc._scf.with_df:
pass
elif (method == 'incore' and cc._scf._eri is not None and
(mem_incore+mem_now < cc.max_memory) or cc.mol.incore_anyway):
moa = so_coeff[:,idxa]
mob = so_coeff[:,idxb]
nmoa = moa.shape[1]
nmob = mob.shape[1]
eri_aa = ao2mo.restore(1, ao2mo.full(cc._scf._eri, moa), nmoa)
eri_bb = ao2mo.restore(1, ao2mo.full(cc._scf._eri, mob), nmob)
eri_ab = ao2mo.general(cc._scf._eri, (moa,moa,mob,mob), compact=False)
eri_ba = lib.transpose(eri_ab)
nocca = np.count_nonzero(self.orbspin[:nocc] == 0)
noccb = np.count_nonzero(self.orbspin[:nocc] == 1)
nvira = np.count_nonzero(self.orbspin[nocc:] == 0)
nvirb = np.count_nonzero(self.orbspin[nocc:] == 1)
nmoa = nocca + nvira
nmob = noccb + nvirb
eri_aa = eri_aa.reshape(nmoa,nmoa,nmoa,nmoa)
eri_ab = eri_ab.reshape(nmoa,nmoa,nmob,nmob)
eri_ba = eri_ba.reshape(nmob,nmob,nmoa,nmoa)
eri_bb = eri_bb.reshape(nmob,nmob,nmob,nmob)
self.oooo = eri_aa[:nocca,:nocca,:nocca,:nocca].copy()
self.ooov = eri_aa[:nocca,:nocca,:nocca,nocca:].copy()
self.ovoo = eri_aa[:nocca,nocca:,:nocca,:nocca].copy()
self.oovo = eri_aa[:nocca,:nocca,nocca:,:nocca].copy()
self.ovov = eri_aa[:nocca,nocca:,:nocca,nocca:].copy()
self.oovv = eri_aa[:nocca,:nocca,nocca:,nocca:].copy()
self.ovvo = eri_aa[:nocca,nocca:,nocca:,:nocca].copy()
ovvv = eri_aa[:nocca,nocca:,nocca:,nocca:].reshape(-1,nvira,nvira)
self.ovvv = lib.pack_tril(ovvv).reshape(nocca,nvira,-1)
ovvv = None
self.vvvv = ao2mo.restore(4, eri_aa[nocca:,nocca:,nocca:,nocca:].copy(), nvira)
self.OOOO = eri_bb[:noccb,:noccb,:noccb,:noccb].copy()
self.OOOV = eri_bb[:noccb,:noccb,:noccb,noccb:].copy()
self.OVOO = eri_bb[:noccb,noccb:,:noccb,:noccb].copy()
self.OOVO = eri_bb[:noccb,:noccb,noccb:,:noccb].copy()
self.OVOV = eri_bb[:noccb,noccb:,:noccb,noccb:].copy()
self.OOVV = eri_bb[:noccb,:noccb,noccb:,noccb:].copy()
self.OVVO = eri_bb[:noccb,noccb:,noccb:,:noccb].copy()
OVVV = eri_bb[:noccb,noccb:,noccb:,noccb:].reshape(-1,nvirb,nvirb)
self.OVVV = lib.pack_tril(OVVV).reshape(noccb,nvirb,-1)
OVVV = None
self.VVVV = ao2mo.restore(4, eri_bb[noccb:,noccb:,noccb:,noccb:].copy(), nvirb)
self.ooOO = eri_ab[:nocca,:nocca,:noccb,:noccb].copy()
self.ooOV = eri_ab[:nocca,:nocca,:noccb,noccb:].copy()
self.ovOO = eri_ab[:nocca,nocca:,:noccb,:noccb].copy()
self.ooVO = eri_ab[:nocca,:nocca,noccb:,:noccb].copy()
self.ovOV = eri_ab[:nocca,nocca:,:noccb,noccb:].copy()
self.ooVV = eri_ab[:nocca,:nocca,noccb:,noccb:].copy()
self.ovVO = eri_ab[:nocca,nocca:,noccb:,:noccb].copy()
ovVV = eri_ab[:nocca,nocca:,noccb:,noccb:].reshape(-1,nvirb,nvirb)
self.ovVV = lib.pack_tril(ovVV).reshape(nocca,nvira,-1)
ovVV = None
vvVV = eri_ab[nocca:,nocca:,noccb:,noccb:].reshape(nvira**2,-1)
idxa = np.tril_indices(nvira)
idxb = np.tril_indices(nvirb)
self.vvVV = lib.take_2d(vvVV, idxa[0]*nvira+idxa[1], idxb[0]*nvirb+idxb[1])
#self.OOoo = eri_ba[:noccb,:noccb,:nocca,:nocca].copy()
self.OOov = eri_ba[:noccb,:noccb,:nocca,nocca:].copy()
self.OVoo = eri_ba[:noccb,noccb:,:nocca,:nocca].copy()
self.OOvo = eri_ba[:noccb,:noccb,nocca:,:nocca].copy()
#self.OVov = eri_ba[:noccb,noccb:,:nocca,nocca:].copy()
self.OOvv = eri_ba[:noccb,:noccb,nocca:,nocca:].copy()
self.OVvo = eri_ba[:noccb,noccb:,nocca:,:nocca].copy()
#self.OVvv = eri_ba[:noccb,noccb:,nocca:,nocca:].copy()
OVvv = eri_ba[:noccb,noccb:,nocca:,nocca:].reshape(-1,nvira,nvira)
self.OVvv = lib.pack_tril(OVvv).reshape(noccb,nvirb,-1)
OVvv = None
#self.VVvv = eri_ba[noccb:,noccb:,nocca:,nocca:].copy()
else:
moa = so_coeff[:,idxa]
mob = so_coeff[:,idxb]
nmoa = moa.shape[1]
nmob = mob.shape[1]
nocca = int(cc.mo_occ[0][moidx[0]].sum())
noccb = int(cc.mo_occ[1][moidx[1]].sum())
nvira = nmoa - nocca
nvirb = nmob - noccb
orboa = moa[:,:nocca]
orbob = mob[:,:noccb]
orbva = moa[:,nocca:]
orbvb = mob[:,noccb:]
self.dtype = so_coeff.dtype
ds_type = so_coeff.dtype.char
self.oooo = self.feri.create_dataset('oooo', (nocca,nocca,nocca,nocca), ds_type)
self.ooov = self.feri.create_dataset('ooov', (nocca,nocca,nocca,nvira), ds_type)
self.ovoo = self.feri.create_dataset('ovoo', (nocca,nvira,nocca,nocca), ds_type)
self.oovo = self.feri.create_dataset('oovo', (nocca,nocca,nvira,nocca), ds_type)
self.ovov = self.feri.create_dataset('ovov', (nocca,nvira,nocca,nvira), ds_type)
self.oovv = self.feri.create_dataset('oovv', (nocca,nocca,nvira,nvira), ds_type)
self.ovvo = self.feri.create_dataset('ovvo', (nocca,nvira,nvira,nocca), ds_type)
self.ovvv = self.feri.create_dataset('ovvv', (nocca,nvira,nvira*(nvira+1)//2), ds_type)
#self.vvvv = self.feri.create_dataset('vvvv', (nvira,nvira,nvira,nvira), ds_type)
self.OOOO = self.feri.create_dataset('OOOO', (noccb,noccb,noccb,noccb), ds_type)
self.OOOV = self.feri.create_dataset('OOOV', (noccb,noccb,noccb,nvirb), ds_type)
self.OVOO = self.feri.create_dataset('OVOO', (noccb,nvirb,noccb,noccb), ds_type)
self.OOVO = self.feri.create_dataset('OOVO', (noccb,noccb,nvirb,noccb), ds_type)
self.OVOV = self.feri.create_dataset('OVOV', (noccb,nvirb,noccb,nvirb), ds_type)
self.OOVV = self.feri.create_dataset('OOVV', (noccb,noccb,nvirb,nvirb), ds_type)
self.OVVO = self.feri.create_dataset('OVVO', (noccb,nvirb,nvirb,noccb), ds_type)
self.OVVV = self.feri.create_dataset('OVVV', (noccb,nvirb,nvirb*(nvirb+1)//2), ds_type)
#self.VVVV = self.feri.create_dataset('VVVV', (nvirb,nvirb,nvirb,nvirb), ds_type)
self.ooOO = self.feri.create_dataset('ooOO', (nocca,nocca,noccb,noccb), ds_type)
self.ooOV = self.feri.create_dataset('ooOV', (nocca,nocca,noccb,nvirb), ds_type)
self.ovOO = self.feri.create_dataset('ovOO', (nocca,nvira,noccb,noccb), ds_type)
self.ooVO = self.feri.create_dataset('ooVO', (nocca,nocca,nvirb,noccb), ds_type)
self.ovOV = self.feri.create_dataset('ovOV', (nocca,nvira,noccb,nvirb), ds_type)
self.ooVV = self.feri.create_dataset('ooVV', (nocca,nocca,nvirb,nvirb), ds_type)
self.ovVO = self.feri.create_dataset('ovVO', (nocca,nvira,nvirb,noccb), ds_type)
self.ovVV = self.feri.create_dataset('ovVV', (nocca,nvira,nvirb*(nvirb+1)//2), ds_type)
#self.vvVV = self.feri.create_dataset('vvVV', (nvira,nvira,nvirb,nvirb), ds_type)
self.OOov = self.feri.create_dataset('OOov', (noccb,noccb,nocca,nvira), ds_type)
self.OVoo = self.feri.create_dataset('OVoo', (noccb,nvirb,nocca,nocca), ds_type)
self.OOvo = self.feri.create_dataset('OOvo', (noccb,noccb,nvira,nocca), ds_type)
self.OOvv = self.feri.create_dataset('OOvv', (noccb,noccb,nvira,nvira), ds_type)
self.OVvo = self.feri.create_dataset('OVvo', (noccb,nvirb,nvira,nocca), ds_type)
self.OVvv = self.feri.create_dataset('OVvv', (noccb,nvirb,nvira*(nvira+1)//2), ds_type)
cput1 = time.clock(), time.time()
# <ij||pq> = <ij|pq> - <ij|qp> = (ip|jq) - (iq|jp)
tmpfile2 = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
ao2mo.general(cc.mol, (orboa,moa,moa,moa), tmpfile2.name, 'aa')
with h5py.File(tmpfile2.name) as f:
buf = numpy.empty((nmoa,nmoa,nmoa))
for i in range(nocca):
lib.unpack_tril(f['aa'][i*nmoa:(i+1)*nmoa], out=buf)
self.oooo[i] = buf[:nocca,:nocca,:nocca]
self.ooov[i] = buf[:nocca,:nocca,nocca:]
self.ovoo[i] = buf[nocca:,:nocca,:nocca]
self.ovov[i] = buf[nocca:,:nocca,nocca:]
self.oovo[i] = buf[:nocca,nocca:,:nocca]
self.oovv[i] = buf[:nocca,nocca:,nocca:]
self.ovvo[i] = buf[nocca:,nocca:,:nocca]
self.ovvv[i] = lib.pack_tril(buf[nocca:,nocca:,nocca:])
del(f['aa'])
buf = None
ao2mo.general(cc.mol, (orbob,mob,mob,mob), tmpfile2.name, 'bb')
with h5py.File(tmpfile2.name) as f:
buf = numpy.empty((nmob,nmob,nmob))
for i in range(noccb):
lib.unpack_tril(f['bb'][i*nmob:(i+1)*nmob], out=buf)
self.OOOO[i] = buf[:noccb,:noccb,:noccb]
self.OOOV[i] = buf[:noccb,:noccb,noccb:]
self.OVOO[i] = buf[noccb:,:noccb,:noccb]
self.OVOV[i] = buf[noccb:,:noccb,noccb:]
self.OOVO[i] = buf[:noccb,noccb:,:noccb]
self.OOVV[i] = buf[:noccb,noccb:,noccb:]
self.OVVO[i] = buf[noccb:,noccb:,:noccb]
self.OVVV[i] = lib.pack_tril(buf[noccb:,noccb:,noccb:])
del(f['bb'])
buf = None
ao2mo.general(cc.mol, (orboa,moa,mob,mob), tmpfile2.name, 'ab')
with h5py.File(tmpfile2.name) as f:
buf = numpy.empty((nmoa,nmob,nmob))
for i in range(nocca):
lib.unpack_tril(f['ab'][i*nmoa:(i+1)*nmoa], out=buf)
self.ooOO[i] = buf[:nocca,:noccb,:noccb]
self.ooOV[i] = buf[:nocca,:noccb,noccb:]
self.ovOO[i] = buf[nocca:,:noccb,:noccb]
self.ovOV[i] = buf[nocca:,:noccb,noccb:]
self.ooVO[i] = buf[:nocca,noccb:,:noccb]
self.ooVV[i] = buf[:nocca,noccb:,noccb:]
self.ovVO[i] = buf[nocca:,noccb:,:noccb]
self.ovVV[i] = lib.pack_tril(buf[nocca:,noccb:,noccb:])
del(f['ab'])
buf = None
ao2mo.general(cc.mol, (orbob,mob,moa,moa), tmpfile2.name, 'ba')
with h5py.File(tmpfile2.name) as f:
buf = numpy.empty((nmob,nmoa,nmoa))
for i in range(noccb):
lib.unpack_tril(f['ba'][i*nmob:(i+1)*nmob], out=buf)
self.OOov[i] = buf[:noccb,:nocca,nocca:]
self.OVoo[i] = buf[noccb:,:nocca,:nocca]
self.OOvo[i] = buf[:noccb,nocca:,:nocca]
self.OOvv[i] = buf[:noccb,nocca:,nocca:]
self.OVvo[i] = buf[noccb:,nocca:,:nocca]
self.OVvv[i] = lib.pack_tril(buf[noccb:,nocca:,nocca:])
del(f['ba'])
buf = None
cput1 = log.timer_debug1('transforming oopq, ovpq', *cput1)
ao2mo.full(cc.mol, orbva, self.feri, dataname='vvvv')
ao2mo.full(cc.mol, orbvb, self.feri, dataname='VVVV')
ao2mo.general(cc.mol, (orbva,orbva,orbvb,orbvb), self.feri, dataname='vvVV')
self.vvvv = self.feri['vvvv']
self.VVVV = self.feri['VVVV']
self.vvVV = self.feri['vvVV']
cput1 = log.timer_debug1('transforming vvvv', *cput1)
log.timer('CCSD integral transformation', *cput0)
def get_umoidx(cc):
'''Get MO boolean indices for unrestricted reference, accounting for frozen orbs.'''
if isinstance(cc.frozen, (int, numpy.integer)):
dm = cc._scf.make_rdm1(cc.mo_coeff, cc.mo_occ)
fockao = cc._scf.get_hcore() + cc._scf.get_veff(cc.mol, dm)
eab = list()
for a in range(2):
eab.append( np.diag(reduce(numpy.dot, (cc.mo_coeff[a].T, fockao[a], cc.mo_coeff[a]))) )
eab = np.array(eab)
#FIXME: if occ-energy > vir-energy, vir orbitals may be appeared in occ set and may be frozen
idxs = np.column_stack(np.unravel_index(np.argsort(eab.ravel()), (2, eab.shape[1])))
frozen = [[],[]]
for n, idx in zip(range(cc.frozen), idxs):
frozen[idx[0]].append(idx[1])
else:
frozen = cc.frozen
moidxa = numpy.ones(cc.mo_occ[0].size, dtype=bool)
moidxb = numpy.ones(cc.mo_occ[1].size, dtype=bool)
if len(frozen[0]) > 0:
moidxa[numpy.asarray(frozen[0])] = False
if len(frozen[1]) > 0:
moidxb[numpy.asarray(frozen[1])] = False
return moidxa,moidxb
def orbspin_of_sorted_mo_energy(mo_energy, mo_occ=None):
if isinstance(mo_energy, np.ndarray) and mo_energy.ndim == 1:
# RHF orbitals
orbspin = np.zeros(mo_energy.size*2, dtype=int)
orbspin[1::2] = 1
else: # UHF orbitals
if mo_occ is None:
mo_occ = np.zeros_like(mo_energy)
idxo = np.hstack([mo_energy[0][mo_occ[0]==1],
mo_energy[1][mo_occ[1]==1]]).argsort()
idxv = np.hstack([mo_energy[0][mo_occ[0]==0],
mo_energy[1][mo_occ[1]==0]]).argsort()
nocca = np.count_nonzero(mo_occ[0]==1)
nvira = np.count_nonzero(mo_occ[0]==0)
occspin = np.zeros(idxo.size, dtype=int)
occspin[nocca:] = 1 # label beta orbitals
virspin = np.zeros(idxv.size, dtype=int)
virspin[nvira:] = 1
orbspin = np.hstack([occspin[idxo], virspin[idxv]])
return orbspin
def uspatial2spin(cc, moidx, mo_coeff):
'''Convert the results of an unrestricted mean-field calculation to spin-orbital form.
Spin-orbital ordering is determined by orbital energy without regard for spin.
Returns:
fock : (nso,nso) ndarray
The Fock matrix in the basis of spin-orbitals
so_coeff : (nao, nso) ndarray
The matrix of spin-orbital coefficients in the AO basis
spin : (nso,) ndarary
The spin (0 or 1) of each spin-orbital
'''
dm = cc._scf.make_rdm1(cc.mo_coeff, cc.mo_occ)
fockao = cc._scf.get_hcore() + cc._scf.get_veff(cc.mol, dm)
fockab = [reduce(numpy.dot, (mo_coeff[0].T, fockao[0], mo_coeff[0])),
reduce(numpy.dot, (mo_coeff[1].T, fockao[1], mo_coeff[1]))]
mo_energy = [fockab[0].diagonal(), fockab[1].diagonal()]
mo_occa = cc.mo_occ[0][moidx[0]]
mo_occb = cc.mo_occ[1][moidx[1]]
spin = orbspin_of_sorted_mo_energy(mo_energy, (mo_occa,mo_occb))
sorta = np.hstack([np.where(mo_occa!=0)[0], np.where(mo_occa==0)[0]])
sortb = np.hstack([np.where(mo_occb!=0)[0], np.where(mo_occb==0)[0]])
idxa = np.where(spin == 0)[0]
idxb = np.where(spin == 1)[0]
nao = mo_coeff[0].shape[0]
nmo = mo_coeff[0].shape[1] + mo_coeff[1].shape[1]
fock = np.zeros((nmo,nmo), dtype=fockab[0].dtype)
lib.takebak_2d(fock, lib.take_2d(fockab[0], sorta, sorta), idxa, idxa)
lib.takebak_2d(fock, lib.take_2d(fockab[1], sortb, sortb), idxb, idxb)
so_coeff = np.zeros((nao, nmo), dtype=mo_coeff[0].dtype)
so_coeff[:,idxa] = mo_coeff[0][:,sorta]
so_coeff[:,idxb] = mo_coeff[1][:,sortb]
return fock, so_coeff, spin
class _IMDS:
# Exactly the same as RCCSD IMDS except
# -- rintermediates --> uintermediates
# -- Loo, Lvv, cc_Fov --> Foo, Fvv, Fov
# -- One less 2-virtual intermediate
def __init__(self, cc):
self.verbose = cc.verbose
self.stdout = cc.stdout
self.t1 = cc.t1
self.t2 = cc.t2
self.eris = cc.eris
self._made_shared = False
self.made_ip_imds = False
self.made_ea_imds = False
self.made_ee_imds = False
def _make_shared(self):
cput0 = (time.clock(), time.time())
log = logger.Logger(self.stdout, self.verbose)
t1,t2,eris = self.t1, self.t2, self.eris
t1 = spatial2spin(t1, eris.orbspin)
t2 = spatial2spin(t2, eris.orbspin)
nocc, nvir = t1.shape
fov = eris.fock[:nocc,nocc:]
foo = eris.fock[:nocc,:nocc]
fvv = eris.fock[nocc:,nocc:]
eris_ovvv = np.asarray(eris.ovvv)
Fvv = np.einsum('mf,mfae->ae', t1, eris_ovvv)
Fvv -= np.einsum('mf,meaf->ae', t1, eris_ovvv)
Wmbej = lib.einsum('jf,mebf->mbej', t1, eris_ovvv)
Wmbej -= lib.einsum('jf,mfbe->mbej', t1, eris_ovvv)
eris_ovvv = None
tau_tilde = imd.make_tau(t2,t1,t1,fac=0.5)
tau = t2 + np.einsum('jf,nb->jnfb', t1, t1)
eris_ovov = np.asarray(eris.ovov)
eris_ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
self.Fov = np.einsum('nf,menf->me',t1,eris_ovov)
tau_tilde = imd.make_tau(t2,t1,t1,fac=0.5)
Foo = 0.5*einsum('inef,menf->mi',tau_tilde,eris_ovov)
Fvv -= 0.5*einsum('mnaf,menf->ae',tau_tilde,eris_ovov)
Wmbej -= einsum('jnfb,menf->mbej', tau, eris_ovov)
eris_ovov = None
eris_ooov = np.asarray(eris.ooov)
Foo += np.einsum('ne,mine->mi',t1,eris_ooov)
Foo -= np.einsum('ne,nime->mi',t1,eris_ooov)
Wmbej += einsum('nb,mjne->mbej',t1,eris_ooov)
Wmbej -= einsum('nb,njme->mbej',t1,eris_ooov)
eris_ooov = None
Foo += foo + 0.5*einsum('me,ie->mi',fov,t1)
Foo += 0.5*einsum('me,ie->mi',self.Fov,t1)
self.Foo = Foo
Fvv += fvv - 0.5*einsum('me,ma->ae',fov,t1)
Fvv -= 0.5*einsum('ma,me->ae',t1,self.Fov)
self.Fvv = Fvv
Wmbej += np.asarray(eris.ovvo).transpose(0,2,1,3)
Wmbej -= np.asarray(eris.oovv).transpose(0,2,3,1)
self.Wovvo = Wmbej
self._made_shared = True
log.timer('EOM-CCSD shared intermediates', *cput0)
def make_ip(self):
if self._made_shared is False:
self._make_shared()
cput0 = (time.clock(), time.time())
log = logger.Logger(self.stdout, self.verbose)
t1,t2,eris = self.t1, self.t2, self.eris
t1 = spatial2spin(t1, eris.orbspin)
t2 = spatial2spin(t2, eris.orbspin)
nocc, nvir = t1.shape
tau = imd.make_tau(t2,t1,t1)
eris_ooov = np.asarray(eris.ooov)
eris_ooov = eris_ooov - eris_ooov.transpose(2,1,0,3)
Woooo = lib.einsum('je,mine->mnij', t1, eris_ooov)
Wmbij = lib.einsum('mine,jnbe->mbij', eris_ooov, t2)
self.Wooov = eris_ooov.transpose(0,2,1,3).copy()
eris_ooov = None
Woooo += np.asarray(eris.oooo).transpose(0,2,1,3)
self.Woooo = Woooo - Woooo.transpose(0,1,3,2)
eris_ovov = np.asarray(eris.ovov)
eris_ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
self.Woooo += 0.5*einsum('ijef,menf->mnij', tau, eris_ovov)
self.Wooov += lib.einsum('if,mfne->mnie', t1, eris_ovov)
tmp = lib.einsum('njbf,menf->mbej', t2, eris_ovov)
Wmbij -= einsum('ie,mbej->mbij', t1, tmp)
Wmbij += np.asarray(eris.oovo).transpose(0,2,1,3)
eris_ovov = None
Wmbij = Wmbij - Wmbij.transpose(0,1,3,2)
eris_ovvo = np.asarray(eris.ovvo)
eris_oovv = np.asarray(eris.oovv)
tmp = lib.einsum('ie,mebj->mbij',t1, eris_ovvo)
tmp-= lib.einsum('ie,mjbe->mbij',t1, eris_oovv)
Wmbij += tmp - tmp.transpose(0,1,3,2)
eris_oovv = eris_ovvo = None
Wmbij -= lib.einsum('me,ijbe->mbij', self.Fov, t2)
Wmbij -= lib.einsum('nb,mnij->mbij', t1, self.Woooo)
eris_ovvv = np.asarray(eris.ovvv)
Wmbij += 0.5 * einsum('mebf,ijef->mbij', eris_ovvv, tau)
Wmbij -= 0.5 * einsum('mfbe,ijef->mbij', eris_ovvv, tau)
self.Wovoo = Wmbij
self.made_ip_imds = True
log.timer('EOM-CCSD IP intermediates', *cput0)
def make_ea(self):
if self._made_shared is False:
self._make_shared()
cput0 = (time.clock(), time.time())
log = logger.Logger(self.stdout, self.verbose)
t1,t2,eris = self.t1, self.t2, self.eris
t1 = spatial2spin(t1, eris.orbspin)
t2 = spatial2spin(t2, eris.orbspin)
nocc, nvir = t1.shape
tau = imd.make_tau(t2,t1,t1)
eris_ooov = np.asarray(eris.ooov)
Wabei = einsum('nime,mnab->abei',eris_ooov,tau)
eris_ooov = None
eris_ovov = np.asarray(eris.ovov)
eris_ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
Wabei -= np.einsum('me,miab->abei', self.Fov, t2)
tmp = einsum('nibf,menf->mbei', t2, eris_ovov)
tmp = einsum('ma,mbei->abei', t1, tmp)
eris_ovov = None
eris_ovvo = np.asarray(eris.ovvo)
eris_oovv = np.asarray(eris.oovv)
tmp += einsum('ma,mibe->abei', t1, eris_oovv)
tmp -= einsum('ma,mebi->abei', t1, eris_ovvo)
eris_oovv = eris_ovvo = None
Wabei += tmp - tmp.transpose(1,0,2,3)
eris_ovvv = np.asarray(eris.ovvv)
eris_ovvv = eris_ovvv - eris_ovvv.transpose(0,3,2,1)
Wabei += eris_ovvv.transpose(3,1,2,0).conj()
tmp1 = lib.einsum('mebf,miaf->abei', eris_ovvv, t2)
Wabei -= tmp1 - tmp1.transpose(1,0,2,3)
self.Wvvvo = Wabei
self.made_ea_imds = True
log.timer('EOM-CCSD EA intermediates', *cput0)
def make_ee(self):
cput0 = (time.clock(), time.time())
log = logger.Logger(self.stdout, self.verbose)
t1,t2,eris = self.t1, self.t2, self.eris
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
fooa = eris.focka[:nocca,:nocca]
foob = eris.fockb[:noccb,:noccb]
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
fvva = eris.focka[nocca:,nocca:]
fvvb = eris.fockb[noccb:,noccb:]
self.Fooa = numpy.zeros((nocca,nocca))
self.Foob = numpy.zeros((noccb,noccb))
self.Fvva = numpy.zeros((nvira,nvira))
self.Fvvb = numpy.zeros((nvirb,nvirb))
wovvo = np.zeros((nocca,nvira,nvira,nocca))
wOVVO = np.zeros((noccb,nvirb,nvirb,noccb))
woVvO = np.zeros((nocca,nvirb,nvira,noccb))
woVVo = np.zeros((nocca,nvirb,nvirb,nocca))
wOvVo = np.zeros((noccb,nvira,nvirb,nocca))
wOvvO = np.zeros((noccb,nvira,nvira,noccb))
wovoo = np.zeros((nocca,nvira,nocca,nocca))
wOVOO = np.zeros((noccb,nvirb,noccb,noccb))
woVoO = np.zeros((nocca,nvirb,nocca,noccb))
wOvOo = np.zeros((noccb,nvira,noccb,nocca))
tauaa, tauab, taubb = make_tau(t2, t1, t1)
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:ovvv = eris_ovvv - eris_ovvv.transpose(0,3,2,1)
#:self.Fvva = np.einsum('mf,mfae->ae', t1a, ovvv)
#:self.wovvo = lib.einsum('jf,mebf->mbej', t1a, ovvv)
#:self.wovoo = 0.5 * einsum('mebf,ijef->mbij', eris_ovvv, tauaa)
#:self.wovoo -= 0.5 * einsum('mfbe,ijef->mbij', eris_ovvv, tauaa)
mem_now = lib.current_memory()[0]
max_memory = lib.param.MAX_MEMORY - mem_now
blksize = max(int(max_memory*1e6/8/(nvira**3*3)), 2)
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = np.asarray(eris.ovvv[p0:p1]).reshape((p1-p0)*nvira,-1)
ovvv = lib.unpack_tril(ovvv).reshape(-1,nvira,nvira,nvira)
ovvv = ovvv - ovvv.transpose(0,3,2,1)
self.Fvva += np.einsum('mf,mfae->ae', t1a[p0:p1], ovvv)
wovvo[p0:p1] = lib.einsum('jf,mebf->mbej', t1a, ovvv)
wovoo[p0:p1] = 0.5 * einsum('mebf,ijef->mbij', ovvv, tauaa)
ovvv = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:OVVV = eris_OVVV - eris_OVVV.transpose(0,3,2,1)
#:self.Fvvb = np.einsum('mf,mfae->ae', t1b, OVVV)
#:self.wOVVO = lib.einsum('jf,mebf->mbej', t1b, OVVV)
#:self.wOVOO = 0.5 * einsum('mebf,ijef->mbij', OVVV, taubb)
blksize = max(int(max_memory*1e6/8/(nvirb**3*3)), 2)
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = np.asarray(eris.OVVV[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVVV = lib.unpack_tril(OVVV).reshape(-1,nvirb,nvirb,nvirb)
OVVV = OVVV - OVVV.transpose(0,3,2,1)
self.Fvvb += np.einsum('mf,mfae->ae', t1b[p0:p1], OVVV)
wOVVO[p0:p1] = lib.einsum('jf,mebf->mbej', t1b, OVVV)
wOVOO[p0:p1] = 0.5 * einsum('mebf,ijef->mbij', OVVV, taubb)
OVVV = None
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:self.Fvvb += np.einsum('mf,mfAE->AE', t1a, eris_ovVV)
#:self.woVvO = lib.einsum('JF,meBF->mBeJ', t1b, eris_ovVV)
#:self.woVVo = lib.einsum('jf,mfBE->mBEj',-t1a, eris_ovVV)
#:self.woVoO = 0.5 * einsum('meBF,iJeF->mBiJ', eris_ovVV, tauab)
#:self.woVoO += 0.5 * einsum('mfBE,iJfE->mBiJ', eris_ovVV, tauab)
blksize = max(int(max_memory*1e6/8/(nvira*nvirb**2*3)), 2)
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = np.asarray(eris.ovVV[p0:p1]).reshape((p1-p0)*nvira,-1)
ovVV = lib.unpack_tril(ovVV).reshape(-1,nvira,nvirb,nvirb)
self.Fvvb += np.einsum('mf,mfAE->AE', t1a[p0:p1], ovVV)
woVvO[p0:p1] = lib.einsum('JF,meBF->mBeJ', t1b, ovVV)
woVVo[p0:p1] = lib.einsum('jf,mfBE->mBEj',-t1a, ovVV)
woVoO[p0:p1] = 0.5 * einsum('meBF,iJeF->mBiJ', ovVV, tauab)
woVoO[p0:p1]+= 0.5 * einsum('mfBE,iJfE->mBiJ', ovVV, tauab)
ovVV = None
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:self.Fvva += np.einsum('MF,MFae->ae', t1b, eris_OVvv)
#:self.wOvVo = lib.einsum('jf,MEbf->MbEj', t1a, eris_OVvv)
#:self.wOvvO = lib.einsum('JF,MFbe->MbeJ',-t1b, eris_OVvv)
#:self.wOvOo = 0.5 * einsum('MEbf,jIfE->MbIj', eris_OVvv, tauab)
#:self.wOvOo += 0.5 * einsum('MFbe,jIeF->MbIj', eris_OVvv, tauab)
blksize = max(int(max_memory*1e6/8/(nvirb*nvira**2*3)), 2)
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = np.asarray(eris.OVvv[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVvv = lib.unpack_tril(OVvv).reshape(-1,nvirb,nvira,nvira)
self.Fvva += np.einsum('MF,MFae->ae', t1b[p0:p1], OVvv)
wOvVo[p0:p1] = lib.einsum('jf,MEbf->MbEj', t1a, OVvv)
wOvvO[p0:p1] = lib.einsum('JF,MFbe->MbeJ',-t1b, OVvv)
wOvOo[p0:p1] = 0.5 * einsum('MEbf,jIfE->MbIj', OVvv, tauab)
wOvOo[p0:p1]+= 0.5 * einsum('MFbe,jIeF->MbIj', OVvv, tauab)
OVvv = None
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
self.Fova = np.einsum('nf,menf->me', t1a, ovov)
self.Fova+= np.einsum('NF,meNF->me', t1b, eris_ovOV)
self.Fovb = np.einsum('nf,menf->me', t1b, OVOV)
self.Fovb+= np.einsum('nf,nfME->ME', t1a, eris_ovOV)
tilaa, tilab, tilbb = make_tau(t2,t1,t1,fac=0.5)
self.Fooa = einsum('inef,menf->mi', tilaa, eris_ovov)
self.Fooa += einsum('iNeF,meNF->mi', tilab, eris_ovOV)
self.Foob = einsum('inef,menf->mi', tilbb, eris_OVOV)
self.Foob += einsum('nIfE,nfME->MI', tilab, eris_ovOV)
self.Fvva -= einsum('mnaf,menf->ae', tilaa, eris_ovov)
self.Fvva -= einsum('mNaF,meNF->ae', tilab, eris_ovOV)
self.Fvvb -= einsum('mnaf,menf->ae', tilbb, eris_OVOV)
self.Fvvb -= einsum('nMfA,nfME->AE', tilab, eris_ovOV)
wovvo -= einsum('jnfb,menf->mbej', t2aa, ovov)
wovvo += einsum('jNbF,meNF->mbej', t2ab, eris_ovOV)
wOVVO -= einsum('jnfb,menf->mbej', t2bb, OVOV)
wOVVO += einsum('nJfB,nfME->MBEJ', t2ab, eris_ovOV)
woVvO += einsum('nJfB,menf->mBeJ', t2ab, ovov)
woVvO -= einsum('JNFB,meNF->mBeJ', t2bb, eris_ovOV)
wOvVo -= einsum('jnfb,nfME->MbEj', t2aa, eris_ovOV)
wOvVo += einsum('jNbF,MENF->MbEj', t2ab, OVOV)
woVVo += einsum('jNfB,mfNE->mBEj', t2ab, eris_ovOV)
wOvvO += einsum('nJbF,neMF->MbeJ', t2ab, eris_ovOV)
eris_ooov = np.asarray(eris.ooov)
eris_OOOV = np.asarray(eris.OOOV)
eris_ooOV = np.asarray(eris.ooOV)
eris_OOov = np.asarray(eris.OOov)
self.Fooa += np.einsum('ne,mine->mi', t1a, eris_ooov)
self.Fooa -= np.einsum('ne,nime->mi', t1a, eris_ooov)
self.Fooa += np.einsum('NE,miNE->mi', t1b, eris_ooOV)
self.Foob += np.einsum('ne,mine->mi', t1b, eris_OOOV)
self.Foob -= np.einsum('ne,nime->mi', t1b, eris_OOOV)
self.Foob += np.einsum('ne,MIne->MI', t1a, eris_OOov)
eris_ooov = eris_ooov + np.einsum('jf,nfme->njme', t1a, eris_ovov)
eris_OOOV = eris_OOOV + np.einsum('jf,nfme->njme', t1b, eris_OVOV)
eris_ooOV = eris_ooOV + np.einsum('jf,nfme->njme', t1a, eris_ovOV)
eris_OOov = eris_OOov + np.einsum('jf,menf->njme', t1b, eris_ovOV)
ooov = eris_ooov - eris_ooov.transpose(2,1,0,3)
OOOV = eris_OOOV - eris_OOOV.transpose(2,1,0,3)
wovvo += lib.einsum('nb,mjne->mbej', t1a, ooov)
wOVVO += lib.einsum('nb,mjne->mbej', t1b, OOOV)
woVvO -= lib.einsum('NB,NJme->mBeJ', t1b, eris_OOov)
wOvVo -= lib.einsum('nb,njME->MbEj', t1a, eris_ooOV)
woVVo += lib.einsum('NB,mjNE->mBEj', t1b, eris_ooOV)
wOvvO += lib.einsum('nb,MJne->MbeJ', t1a, eris_OOov)
eris_ooov = eris_OOOV = eris_OOov = eris_ooOV = None
self.Fooa += fooa + 0.5*einsum('me,ie->mi', self.Fova+fova, t1a)
self.Foob += foob + 0.5*einsum('me,ie->mi', self.Fovb+fovb, t1b)
self.Fvva += fvva - 0.5*einsum('me,ma->ae', self.Fova+fova, t1a)
self.Fvvb += fvvb - 0.5*einsum('me,ma->ae', self.Fovb+fovb, t1b)
# 0 or 1 virtuals
eris_ooov = np.asarray(eris.ooov)
eris_OOOV = np.asarray(eris.OOOV)
eris_ooOV = np.asarray(eris.ooOV)
eris_OOov = np.asarray(eris.OOov)
ooov = eris_ooov - eris_ooov.transpose(2,1,0,3)
OOOV = eris_OOOV - eris_OOOV.transpose(2,1,0,3)
woooo = lib.einsum('je,mine->mnij', t1a, ooov)
wOOOO = lib.einsum('je,mine->mnij', t1b, OOOV)
woOoO = lib.einsum('JE,miNE->mNiJ', t1b, eris_ooOV)
woOOo = lib.einsum('je,NIme->mNIj',-t1a, eris_OOov)
tmpaa = lib.einsum('mine,jnbe->mbij', ooov, t2aa)
tmpaa+= lib.einsum('miNE,jNbE->mbij', eris_ooOV, t2ab)
tmpbb = lib.einsum('mine,jnbe->mbij', OOOV, t2bb)
tmpbb+= lib.einsum('MIne,nJeB->MBIJ', eris_OOov, t2ab)
woVoO += lib.einsum('mine,nJeB->mBiJ', ooov, t2ab)
woVoO += lib.einsum('miNE,JNBE->mBiJ', eris_ooOV, t2bb)
woVoO -= lib.einsum('NIme,jNeB->mBjI', eris_OOov, t2ab)
wOvOo += lib.einsum('MINE,jNbE->MbIj', OOOV, t2ab)
wOvOo += lib.einsum('MIne,jnbe->MbIj', eris_OOov, t2aa)
wOvOo -= lib.einsum('niME,nJbE->MbJi', eris_ooOV, t2ab)
wovoo += tmpaa - tmpaa.transpose(0,1,3,2)
wOVOO += tmpbb - tmpbb.transpose(0,1,3,2)
self.wooov = ooov.transpose(0,2,1,3).copy()
self.wOOOV = OOOV.transpose(0,2,1,3).copy()
self.woOoV = eris_ooOV.transpose(0,2,1,3).copy()
self.wOoOv = eris_OOov.transpose(0,2,1,3).copy()
self.wOooV =-eris_ooOV.transpose(2,0,1,3).copy()
self.woOOv =-eris_OOov.transpose(2,0,1,3).copy()
eris_ooov = eris_OOOV = eris_OOov = eris_ooOV = None
woooo += np.asarray(eris.oooo).transpose(0,2,1,3)
wOOOO += np.asarray(eris.OOOO).transpose(0,2,1,3)
woOoO += np.asarray(eris.ooOO).transpose(0,2,1,3)
self.woooo = woooo - woooo.transpose(0,1,3,2)
self.wOOOO = wOOOO - wOOOO.transpose(0,1,3,2)
self.woOoO = woOoO - woOOo.transpose(0,1,3,2)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
tauaa, tauab, taubb = make_tau(t2,t1,t1)
self.woooo += 0.5*lib.einsum('ijef,menf->mnij', tauaa, ovov)
self.wOOOO += 0.5*lib.einsum('ijef,menf->mnij', taubb, OVOV)
self.woOoO += lib.einsum('iJeF,meNF->mNiJ', tauab, eris_ovOV)
self.wooov += lib.einsum('if,mfne->mnie', t1a, ovov)
self.wOOOV += lib.einsum('if,mfne->mnie', t1b, OVOV)
self.woOoV += lib.einsum('if,mfNE->mNiE', t1a, eris_ovOV)
self.wOoOv += lib.einsum('IF,neMF->MnIe', t1b, eris_ovOV)
self.wOooV -= lib.einsum('if,nfME->MniE', t1a, eris_ovOV)
self.woOOv -= lib.einsum('IF,meNF->mNIe', t1b, eris_ovOV)
tmp1aa = lib.einsum('njbf,menf->mbej', t2aa, ovov)
tmp1aa-= lib.einsum('jNbF,meNF->mbej', t2ab, eris_ovOV)
tmp1bb = lib.einsum('njbf,menf->mbej', t2bb, OVOV)
tmp1bb-= lib.einsum('nJfB,nfME->MBEJ', t2ab, eris_ovOV)
tmp1ab = lib.einsum('NJBF,meNF->mBeJ', t2bb, eris_ovOV)
tmp1ab-= lib.einsum('nJfB,menf->mBeJ', t2ab, ovov)
tmp1ba = lib.einsum('njbf,nfME->MbEj', t2aa, eris_ovOV)
tmp1ba-= lib.einsum('jNbF,MENF->MbEj', t2ab, OVOV)
tmp1abba =-lib.einsum('jNfB,mfNE->mBEj', t2ab, eris_ovOV)
tmp1baab =-lib.einsum('nJbF,neMF->MbeJ', t2ab, eris_ovOV)
tmpaa = einsum('ie,mbej->mbij', t1a, tmp1aa)
tmpbb = einsum('ie,mbej->mbij', t1b, tmp1bb)
tmpab = einsum('ie,mBeJ->mBiJ', t1a, tmp1ab)
tmpab-= einsum('IE,mBEj->mBjI', t1b, tmp1abba)
tmpba = einsum('IE,MbEj->MbIj', t1b, tmp1ba)
tmpba-= einsum('ie,MbeJ->MbJi', t1a, tmp1baab)
wovoo -= tmpaa - tmpaa.transpose(0,1,3,2)
wOVOO -= tmpbb - tmpbb.transpose(0,1,3,2)
woVoO -= tmpab
wOvOo -= tmpba
eris_ovov = eris_OVOV = eris_ovOV = None
eris_oovo = numpy.asarray(eris.oovo)
eris_OOVO = numpy.asarray(eris.OOVO)
eris_OOvo = numpy.asarray(eris.OOvo)
eris_ooVO = numpy.asarray(eris.ooVO)
wovoo += eris_oovo.transpose(0,2,1,3) - eris_oovo.transpose(0,2,3,1)
wOVOO += eris_OOVO.transpose(0,2,1,3) - eris_OOVO.transpose(0,2,3,1)
woVoO += eris_ooVO.transpose(0,2,1,3)
wOvOo += eris_OOvo.transpose(0,2,1,3)
eris_oovo = eris_OOVO = eris_OOvo = eris_ooVO = None
eris_ovvo = np.asarray(eris.ovvo)
eris_OVVO = np.asarray(eris.OVVO)
eris_OVvo = np.asarray(eris.OVvo)
eris_ovVO = np.asarray(eris.ovVO)
eris_oovv = np.asarray(eris.oovv)
eris_OOVV = np.asarray(eris.OOVV)
eris_OOvv = np.asarray(eris.OOvv)
eris_ooVV = np.asarray(eris.ooVV)
wovvo += eris_ovvo.transpose(0,2,1,3)
wOVVO += eris_OVVO.transpose(0,2,1,3)
woVvO += eris_ovVO.transpose(0,2,1,3)
wOvVo += eris_OVvo.transpose(0,2,1,3)
wovvo -= eris_oovv.transpose(0,2,3,1)
wOVVO -= eris_OOVV.transpose(0,2,3,1)
woVVo -= eris_ooVV.transpose(0,2,3,1)
wOvvO -= eris_OOvv.transpose(0,2,3,1)
tmpaa = lib.einsum('ie,mebj->mbij', t1a, eris_ovvo)
tmpbb = lib.einsum('ie,mebj->mbij', t1b, eris_OVVO)
tmpaa-= lib.einsum('ie,mjbe->mbij', t1a, eris_oovv)
tmpbb-= lib.einsum('ie,mjbe->mbij', t1b, eris_OOVV)
woVoO += lib.einsum('ie,meBJ->mBiJ', t1a, eris_ovVO)
woVoO -= lib.einsum('IE,mjBE->mBjI',-t1b, eris_ooVV)
wOvOo += lib.einsum('IE,MEbj->MbIj', t1b, eris_OVvo)
wOvOo -= lib.einsum('ie,MJbe->MbJi',-t1a, eris_OOvv)
wovoo += tmpaa - tmpaa.transpose(0,1,3,2)
wOVOO += tmpbb - tmpbb.transpose(0,1,3,2)
wovoo -= lib.einsum('me,ijbe->mbij', self.Fova, t2aa)
wOVOO -= lib.einsum('me,ijbe->mbij', self.Fovb, t2bb)
woVoO += lib.einsum('me,iJeB->mBiJ', self.Fova, t2ab)
wOvOo += lib.einsum('ME,jIbE->MbIj', self.Fovb, t2ab)
wovoo -= lib.einsum('nb,mnij->mbij', t1a, self.woooo)
wOVOO -= lib.einsum('nb,mnij->mbij', t1b, self.wOOOO)
woVoO -= lib.einsum('NB,mNiJ->mBiJ', t1b, self.woOoO)
wOvOo -= lib.einsum('nb,nMjI->MbIj', t1a, self.woOoO)
eris_ovvo = eris_OVVO = eris_OVvo = eris_ovVO = None
eris_oovv = eris_OOVV = eris_OOvv = eris_ooVV = None
self.saved = lib.H5TmpFile()
self.saved['ovvo'] = wovvo
self.saved['OVVO'] = wOVVO
self.saved['oVvO'] = woVvO
self.saved['OvVo'] = wOvVo
self.saved['oVVo'] = woVVo
self.saved['OvvO'] = wOvvO
self.wovvo = self.saved['ovvo']
self.wOVVO = self.saved['OVVO']
self.woVvO = self.saved['oVvO']
self.wOvVo = self.saved['OvVo']
self.woVVo = self.saved['oVVo']
self.wOvvO = self.saved['OvvO']
self.saved['ovoo'] = wovoo
self.saved['OVOO'] = wOVOO
self.saved['oVoO'] = woVoO
self.saved['OvOo'] = wOvOo
self.wovoo = self.saved['ovoo']
self.wOVOO = self.saved['OVOO']
self.woVoO = self.saved['oVoO']
self.wOvOo = self.saved['OvOo']
self.wvovv = self.saved.create_dataset('vovv', (nvira,nocca,nvira,nvira), t1a.dtype.char)
self.wVOVV = self.saved.create_dataset('VOVV', (nvirb,noccb,nvirb,nvirb), t1a.dtype.char)
self.wvOvV = self.saved.create_dataset('vOvV', (nvira,noccb,nvira,nvirb), t1a.dtype.char)
self.wVoVv = self.saved.create_dataset('VoVv', (nvirb,nocca,nvirb,nvira), t1a.dtype.char)
# 3 or 4 virtuals
eris_ooov = np.asarray(eris.ooov)
eris_ovov = np.asarray(eris.ovov)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
eris_oovv = np.asarray(eris.oovv)
eris_ovvo = np.asarray(eris.ovvo)
oovv = eris_oovv - eris_ovvo.transpose(0,3,2,1)
eris_oovv = eris_ovvo = None
#:wvovv = .5 * lib.einsum('nime,mnab->eiab', eris_ooov, tauaa)
#:wvovv -= .5 * lib.einsum('me,miab->eiab', self.Fova, t2aa)
#:tmp1aa = lib.einsum('nibf,menf->mbei', t2aa, ovov)
#:tmp1aa-= lib.einsum('iNbF,meNF->mbei', t2ab, eris_ovOV)
#:wvovv+= lib.einsum('ma,mbei->eiab', t1a, tmp1aa)
#:wvovv+= einsum('ma,mibe->eiab', t1a, oovv)
for p0, p1 in lib.prange(0, nvira, nocca):
wvovv = .5*lib.einsum('nime,mnab->eiab', eris_ooov[:,:,:,p0:p1], tauaa)
wvovv -= .5*lib.einsum('me,miab->eiab', self.Fova[:,p0:p1], t2aa)
tmp1aa = lib.einsum('nibf,menf->mbei', t2aa, ovov[:,p0:p1])
tmp1aa-= lib.einsum('iNbF,meNF->mbei', t2ab, eris_ovOV[:,p0:p1])
wvovv += lib.einsum('ma,mbei->eiab', t1a, tmp1aa)
wvovv += einsum('ma,mibe->eiab', t1a, oovv[:,:,:,p0:p1])
self.wvovv[p0:p1] = wvovv
tmp1aa = None
eris_ovov = eris_ooov = eris_ovOV = None
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:ovvv = eris_ovvv - eris_ovvv.transpose(0,3,2,1)
#:wvovv += lib.einsum('mebf,miaf->eiab', ovvv, t2aa)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:wvovv += lib.einsum('MFbe,iMaF->eiab', eris_OVvv, t2ab)
#:wvovv += eris_ovvv.transpose(2,0,3,1).conj()
#:self.wvovv -= wvovv - wvovv.transpose(0,1,3,2)
mem_now = lib.current_memory()[0]
max_memory = lib.param.MAX_MEMORY - mem_now
blksize = max(int(max_memory*1e6/8/(nvira**3*6)), 2)
for i0,i1 in lib.prange(0, nocca, blksize):
wvovv = self.wvovv[:,i0:i1]
for p0,p1 in lib.prange(0, noccb, blksize):
OVvv = np.asarray(eris.OVvv[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVvv = lib.unpack_tril(OVvv).reshape(-1,nvirb,nvira,nvira)
wvovv -= lib.einsum('MFbe,iMaF->eiab', OVvv, t2ab[i0:i1,p0:p1])
OVvv = None
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = np.asarray(eris.ovvv[p0:p1]).reshape((p1-p0)*nvira,-1)
ovvv = lib.unpack_tril(ovvv).reshape(-1,nvira,nvira,nvira)
if p0 == i0:
wvovv += ovvv.transpose(2,0,3,1).conj()
ovvv = ovvv - ovvv.transpose(0,3,2,1)
wvovv -= lib.einsum('mebf,miaf->eiab', ovvv, t2aa[p0:p1,i0:i1])
ovvv = None
wvovv = wvovv - wvovv.transpose(0,1,3,2)
self.wvovv[:,i0:i1] = wvovv
eris_OOOV = np.asarray(eris.OOOV)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
eris_OOVV = np.asarray(eris.OOVV)
eris_OVVO = np.asarray(eris.OVVO)
OOVV = eris_OOVV - eris_OVVO.transpose(0,3,2,1)
eris_OOVV = eris_OVVO = None
#:wVOVV = .5*lib.einsum('nime,mnab->eiab', eris_OOOV, taubb)
#:wVOVV -= .5*lib.einsum('me,miab->eiab', self.Fovb, t2bb)
#:tmp1bb = lib.einsum('nibf,menf->mbei', t2bb, OVOV)
#:tmp1bb-= lib.einsum('nIfB,nfME->MBEI', t2ab, eris_ovOV)
#:wVOVV += lib.einsum('ma,mbei->eiab', t1b, tmp1bb)
#:wVOVV += einsum('ma,mibe->eiab', t1b, OOVV)
for p0, p1 in lib.prange(0, nvirb, noccb):
wVOVV = .5*lib.einsum('nime,mnab->eiab', eris_OOOV[:,:,:,p0:p1], taubb)
wVOVV -= .5*lib.einsum('me,miab->eiab', self.Fovb[:,p0:p1], t2bb)
tmp1bb = lib.einsum('nibf,menf->mbei', t2bb, OVOV[:,p0:p1])
tmp1bb-= lib.einsum('nIfB,nfME->MBEI', t2ab, eris_ovOV[:,:,:,p0:p1])
wVOVV += lib.einsum('ma,mbei->eiab', t1b, tmp1bb)
wVOVV += einsum('ma,mibe->eiab', t1b, OOVV[:,:,:,p0:p1])
self.wVOVV[p0:p1] = wVOVV
tmp1bb = None
eris_OVOV = eris_OOOV = eris_ovOV = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:OVVV = eris_OVVV - eris_OVVV.transpose(0,3,2,1)
#:wVOVV -= lib.einsum('MEBF,MIAF->EIAB', OVVV, t2bb)
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:wVOVV -= lib.einsum('mfBE,mIfA->EIAB', eris_ovVV, t2ab)
#:wVOVV += eris_OVVV.transpose(2,0,3,1).conj()
#:self.wVOVV += wVOVV - wVOVV.transpose(0,1,3,2)
blksize = max(int(max_memory*1e6/8/(nvirb**3*6)), 2)
for i0,i1 in lib.prange(0, noccb, blksize):
wVOVV = self.wVOVV[:,i0:i1]
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = np.asarray(eris.ovVV[p0:p1]).reshape((p1-p0)*nvira,-1)
ovVV = lib.unpack_tril(ovVV).reshape(-1,nvira,nvirb,nvirb)
wVOVV -= lib.einsum('mfBE,mIfA->EIAB', ovVV, t2ab[p0:p1,i0:i1])
ovVV = None
for p0,p1 in lib.prange(0, noccb, blksize):
OVVV = np.asarray(eris.OVVV[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVVV = lib.unpack_tril(OVVV).reshape(-1,nvirb,nvirb,nvirb)
if p0 == i0:
wVOVV += OVVV.transpose(2,0,3,1).conj()
OVVV = OVVV - OVVV.transpose(0,3,2,1)
wVOVV -= lib.einsum('mebf,miaf->eiab', OVVV, t2bb[p0:p1,i0:i1])
OVVV = None
wVOVV = wVOVV - wVOVV.transpose(0,1,3,2)
self.wVOVV[:,i0:i1] = wVOVV
eris_ovOV = np.asarray(eris.ovOV)
eris_OOov = np.asarray(eris.OOov)
eris_OOvv = np.asarray(eris.OOvv)
eris_ovVO = np.asarray(eris.ovVO)
#:self.wvOvV = einsum('NIme,mNaB->eIaB', eris_OOov, tauab)
#:self.wvOvV -= lib.einsum('me,mIaB->eIaB', self.Fova, t2ab)
#:tmp1ab = lib.einsum('NIBF,meNF->mBeI', t2bb, eris_ovOV)
#:tmp1ab-= lib.einsum('nIfB,menf->mBeI', t2ab, ovov)
#:tmp1baab = lib.einsum('nIbF,neMF->MbeI', t2ab, eris_ovOV)
#:tmpab = lib.einsum('ma,mBeI->eIaB', t1a, tmp1ab)
#:tmpab+= lib.einsum('MA,MbeI->eIbA', t1b, tmp1baab)
#:tmpab-= einsum('MA,MIbe->eIbA', t1b, eris_OOvv)
#:tmpab-= einsum('ma,meBI->eIaB', t1a, eris_ovVO)
#:self.wvOvV += tmpab
for p0, p1 in lib.prange(0, nvira, nocca):
wvOvV = einsum('NIme,mNaB->eIaB', eris_OOov[:,:,:,p0:p1], tauab)
wvOvV -= lib.einsum('me,mIaB->eIaB', self.Fova[:,p0:p1], t2ab)
tmp1ab = lib.einsum('NIBF,meNF->mBeI', t2bb, eris_ovOV[:,p0:p1])
tmp1ab-= lib.einsum('nIfB,menf->mBeI', t2ab, ovov[:,p0:p1])
wvOvV+= lib.einsum('ma,mBeI->eIaB', t1a, tmp1ab)
tmp1ab = None
tmp1baab = lib.einsum('nIbF,neMF->MbeI', t2ab, eris_ovOV[:,p0:p1])
wvOvV+= lib.einsum('MA,MbeI->eIbA', t1b, tmp1baab)
tmp1baab = None
wvOvV-= einsum('MA,MIbe->eIbA', t1b, eris_OOvv[:,:,:,p0:p1])
wvOvV-= einsum('ma,meBI->eIaB', t1a, eris_ovVO[:,p0:p1])
self.wvOvV[p0:p1] = wvOvV
eris_ovOV = eris_OOov = eris_OOvv = eris_ovVO = None
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:ovvv = eris_ovvv - eris_ovvv.transpose(0,3,2,1)
#:self.wvOvV -= lib.einsum('mebf,mIfA->eIbA', ovvv, t2ab)
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:self.wvOvV -= lib.einsum('meBF,mIaF->eIaB', eris_ovVV, t2ab)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:self.wvOvV -= lib.einsum('MFbe,MIAF->eIbA', eris_OVvv, t2bb)
#:self.wvOvV += eris_OVvv.transpose(2,0,3,1).conj()
blksize = max(int(max_memory*1e6/8/(nvira**3*6)), 2)
for i0,i1 in lib.prange(0, nocca, blksize):
wvOvV = self.wvOvV[:,i0:i1]
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = np.asarray(eris.ovVV[p0:p1]).reshape((p1-p0)*nvira,-1)
ovVV = lib.unpack_tril(ovVV).reshape(-1,nvira,nvirb,nvirb)
wvOvV -= lib.einsum('meBF,mIaF->eIaB', ovVV, t2ab[p0:p1,i0:i1])
ovVV = None
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = np.asarray(eris.ovvv[p0:p1]).reshape((p1-p0)*nvira,-1)
ovvv = lib.unpack_tril(ovvv).reshape(-1,nvira,nvira,nvira)
ovvv = ovvv - ovvv.transpose(0,3,2,1)
wvOvV -= lib.einsum('mebf,mIfA->eIbA',ovvv, t2ab[p0:p1,i0:i1])
ovvv = None
self.wvOvV[:,i0:i1] = wvOvV
blksize = max(int(max_memory*1e6/8/(nvirb*nvira**2*3)), 2)
for i0,i1 in lib.prange(0, nocca, blksize):
wvOvV = self.wvOvV[:,i0:i1]
for p0,p1 in lib.prange(0, nocca, blksize):
OVvv = np.asarray(eris.OVvv[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVvv = lib.unpack_tril(OVvv).reshape(-1,nvirb,nvira,nvira)
if p0 == i0:
wvOvV += OVvv.transpose(2,0,3,1).conj()
wvOvV -= lib.einsum('MFbe,MIAF->eIbA', OVvv, t2bb[p0:p1,i0:i1])
OVvv = None
self.wvOvV[:,i0:i1] = wvOvV
eris_ovOV = np.asarray(eris.ovOV)
eris_ooOV = np.asarray(eris.ooOV)
eris_ooVV = np.asarray(eris.ooVV)
eris_OVvo = np.asarray(eris.OVvo)
#:self.wVoVv = einsum('niME,nMbA->EiAb', eris_ooOV, tauab)
#:self.wVoVv -= lib.einsum('ME,iMbA->EiAb', self.Fovb, t2ab)
#:tmp1ba = lib.einsum('nibf,nfME->MbEi', t2aa, eris_ovOV)
#:tmp1ba-= lib.einsum('iNbF,MENF->MbEi', t2ab, OVOV)
#:tmp1abba = lib.einsum('iNfB,mfNE->mBEi', t2ab, eris_ovOV)
#:tmpba = lib.einsum('MA,MbEi->EiAb', t1b, tmp1ba)
#:tmpba+= lib.einsum('ma,mBEi->EiBa', t1a, tmp1abba)
#:tmpba-= einsum('ma,miBE->EiBa', t1a, eris_ooVV)
#:tmpba-= einsum('MA,MEbi->EiAb', t1b, eris_OVvo)
#:self.wVoVv += tmpba
for p0, p1 in lib.prange(0, nvirb, noccb):
wVoVv = einsum('niME,nMbA->EiAb', eris_ooOV[:,:,:,p0:p1], tauab)
wVoVv -= lib.einsum('ME,iMbA->EiAb', self.Fovb[:,p0:p1], t2ab)
tmp1ba = lib.einsum('nibf,nfME->MbEi', t2aa, eris_ovOV[:,:,:,p0:p1])
tmp1ba-= lib.einsum('iNbF,MENF->MbEi', t2ab, OVOV[:,p0:p1])
wVoVv += lib.einsum('MA,MbEi->EiAb', t1b, tmp1ba)
tmp1ba = None
tmp1abba = lib.einsum('iNfB,mfNE->mBEi', t2ab, eris_ovOV[:,:,:,p0:p1])
wVoVv += lib.einsum('ma,mBEi->EiBa', t1a, tmp1abba)
tmp1abba = None
wVoVv -= einsum('ma,miBE->EiBa', t1a, eris_ooVV[:,:,:,p0:p1])
wVoVv -= einsum('MA,MEbi->EiAb', t1b, eris_OVvo[:,p0:p1])
self.wVoVv[p0:p1] = wVoVv
eris_ovOV = eris_ooOV = eris_ooVV = eris_OVvo = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:OVVV = eris_OVVV - eris_OVVV.transpose(0,3,2,1)
#:self.wVoVv -= lib.einsum('MEBF,iMaF->EiBa', OVVV, t2ab)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:self.wVoVv -= lib.einsum('MEbf,iMfA->EiAb', eris_OVvv, t2ab)
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:self.wVoVv -= lib.einsum('mfBE,miaf->EiBa', eris_ovVV, t2aa)
#:self.wVoVv += eris_ovVV.transpose(2,0,3,1).conj()
blksize = max(int(max_memory*1e6/8/(nvirb**3*6)), 2)
for i0,i1 in lib.prange(0, noccb, blksize):
wVoVv = self.wVoVv[:,i0:i1]
for p0,p1 in lib.prange(0, noccb, blksize):
OVvv = np.asarray(eris.OVvv[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVvv = lib.unpack_tril(OVvv).reshape(-1,nvirb,nvira,nvira)
wVoVv -= lib.einsum('MEbf,iMfA->EiAb', OVvv, t2ab[i0:i1,p0:p1])
OVvv = None
for p0,p1 in lib.prange(0, noccb, blksize):
OVVV = np.asarray(eris.OVVV[p0:p1]).reshape((p1-p0)*nvirb,-1)
OVVV = lib.unpack_tril(OVVV).reshape(-1,nvirb,nvirb,nvirb)
OVVV = OVVV - OVVV.transpose(0,3,2,1)
wVoVv -= lib.einsum('MEBF,iMaF->EiBa', OVVV, t2ab[i0:i1,p0:p1])
OVVV = None
self.wVoVv[:,i0:i1] = wVoVv
blksize = max(int(max_memory*1e6/8/(nvira*nvirb**2*3)), 2)
for i0,i1 in lib.prange(0, noccb, blksize):
wVoVv = self.wVoVv[:,i0:i1]
for p0,p1 in lib.prange(0, noccb, blksize):
ovVV = np.asarray(eris.ovVV[p0:p1]).reshape((p1-p0)*nvira,-1)
ovVV = lib.unpack_tril(ovVV).reshape(-1,nvira,nvirb,nvirb)
if p0 == i0:
wVoVv += ovVV.transpose(2,0,3,1).conj()
wVoVv -= lib.einsum('mfBE,miaf->EiBa', ovVV, t2aa[p0:p1,i0:i1])
ovVV = None
self.wVoVv[:,i0:i1] = wVoVv
self.made_ee_imds = True
log.timer('EOM-CCSD EE intermediates', *cput0)
def make_tau(t2, t1, r1, fac=1, out=None):
t1a, t1b = t1
r1a, r1b = r1
tau1aa = make_tau_aa(t2[0], t1a, r1a, fac, out)
tau1bb = make_tau_aa(t2[2], t1b, r1b, fac, out)
tau1ab = make_tau_ab(t2[1], t1, r1, fac, out)
return tau1aa, tau1ab, tau1bb
def make_tau_aa(t2aa, t1a, r1a, fac=1, out=None):
tau1aa = np.einsum('ia,jb->ijab', t1a, r1a)
tau1aa-= np.einsum('ia,jb->jiab', t1a, r1a)
tau1aa = tau1aa - tau1aa.transpose(0,1,3,2)
tau1aa *= fac * .5
tau1aa += t2aa
return tau1aa
def make_tau_ab(t2ab, t1, r1, fac=1, out=None):
t1a, t1b = t1
r1a, r1b = r1
tau1ab = np.einsum('ia,jb->ijab', t1a, r1b)
tau1ab+= np.einsum('ia,jb->ijab', r1a, t1b)
tau1ab *= fac * .5
tau1ab += t2ab
return tau1ab
def _add_vvvv_(cc, t2, eris, Ht2):
t2aa, t2ab, t2bb = t2
u2aa, u2ab, u2bb = Ht2
rccsd._add_vvvv_(cc, t2aa, eris, u2aa)
fakeri = lambda:None
fakeri.vvvv = eris.VVVV
rccsd._add_vvvv_(cc, t2bb, fakeri, u2bb)
fakeri.vvvv = eris.vvVV
rccsd._add_vvvv1_(cc, t2ab, fakeri, u2ab)
return (u2aa,u2ab,u2bb)
if __name__ == '__main__':
from pyscf import scf
from pyscf import gto
mol = gto.Mole()
mol.atom = [['O', (0., 0., 0.)],
['O', (1.21, 0., 0.)]]
mol.basis = 'cc-pvdz'
mol.spin = 2
mol.build()
mf = scf.UHF(mol)
print(mf.scf())
# Freeze 1s electrons
frozen = [[0,1], [0,1]]
# also acceptable
#frozen = 4
ucc = UCCSD(mf, frozen=frozen)
ecc, t1, t2 = ucc.kernel()
print(ecc - -0.3486987472235819)
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = 'cc-pvdz'
mol.spin = 0
mol.build()
mf = scf.UHF(mol)
print(mf.scf())
mycc = UCCSD(mf)
ecc, t1, t2 = mycc.kernel()
print(ecc - -0.2133432712431435)
e,v = mycc.ipccsd(nroots=8)
print(e[0] - 0.4335604332073799)
print(e[2] - 0.5187659896045407)
print(e[4] - 0.6782876002229172)
e,v = mycc.eaccsd(nroots=8)
print(e[0] - 0.16737886338859731)
print(e[2] - 0.24027613852009164)
print(e[4] - 0.51006797826488071)
e,v = mycc.eeccsd(nroots=4)
print(e[0] - 0.2757159395886167)
print(e[1] - 0.2757159395886167)
print(e[2] - 0.2757159395886167)
print(e[3] - 0.3005716731825082)
|
#!/usr/bin/env python3
data = open('mystery3.png','rb').read()
print(hex(len(data.split(b'IDAT')[1])-8))
|
from collections import Counter
import logging
from cdeid.utils.resources import PACKAGE_NAME
logger = logging.getLogger(PACKAGE_NAME)
# This function is modified from the function of stanza/models/ner/scorer.py
# This function to calculate the separate metrics of each entity.
#
# Copyright 2019 The Board of Trustees of The Leland Stanford Junior University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def score_by_entity(pred_tag_sequences, gold_tag_sequences, verbose=True):
assert (len(gold_tag_sequences) == len(pred_tag_sequences)), \
"Number of predicted tag sequences does not match gold sequences."
def decode_all(tag_sequences):
# decode from all sequences, each sequence with a unique id
ents = []
for sent_id, tags in enumerate(tag_sequences):
for ent in decode_from_bio2(tags):
ent['sent_id'] = sent_id
ents += [ent]
return ents
gold_ents = decode_all(gold_tag_sequences)
pred_ents = decode_all(pred_tag_sequences)
correct_by_type = Counter()
guessed_by_type = Counter()
gold_by_type = Counter()
# Added. Store all the entities
entities = set()
# records the details of fp and fn
fp = []
fn = []
for p in pred_ents:
if p not in gold_ents:
fp.append(p)
for p in gold_ents:
if p not in pred_ents:
fn.append(p)
logger.info('Predict entities in total: {}'.format(len(pred_ents)))
logger.info('Gold entities in total: {}'.format(len(gold_ents)))
logger.info('False Positive: {}'.format(len(fp)))
logger.info('False Negative: {}'.format(len(fn)))
for p in pred_ents:
guessed_by_type[p['type']] += 1
if p in gold_ents:
correct_by_type[p['type']] += 1
for g in gold_ents:
gold_by_type[g['type']] += 1
entities.add(g['type'])
prec_micro = 0.0
if sum(guessed_by_type.values()) > 0:
prec_micro = sum(correct_by_type.values()) * 1.0 / sum(guessed_by_type.values())
rec_micro = 0.0
if sum(gold_by_type.values()) > 0:
rec_micro = sum(correct_by_type.values()) * 1.0 / sum(gold_by_type.values())
f_micro = 0.0
if prec_micro + rec_micro > 0:
f_micro = 2.0 * prec_micro * rec_micro / (prec_micro + rec_micro)
if verbose:
logger.info("Prec.\tRec.\tF1")
logger.info("{:.2f}\t{:.2f}\t{:.2f}".format(prec_micro * 100, rec_micro * 100, f_micro * 100))
# metrics for entities
if verbose:
logger.info("Entity\tPrec.\tRec.\tF1")
for entity in entities:
prec_ent = 0.0
if guessed_by_type[entity] > 0:
prec_ent = correct_by_type[entity] * 1.0 / guessed_by_type[entity]
rec_ent = 0.0
if gold_by_type[entity] > 0:
rec_ent = correct_by_type[entity] * 1.0 / gold_by_type[entity]
f_ent = 0.0
if prec_ent + rec_ent > 0:
f_ent = 2.0 * prec_ent * rec_ent / (prec_ent + rec_ent)
logger.info("{}\t{:.2f}\t{:.2f}\t{:.2f}".format(entity, prec_ent * 100, rec_ent * 100, f_ent * 100))
return prec_micro, rec_micro, f_micro, fp, fn
# This function is modified from the function of stanza/models/ner/utils.py
# This function to decode all the entities in the sentence tags
#
# Copyright 2019 The Board of Trustees of The Leland Stanford Junior University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def decode_from_bio2(tags):
res = []
ent_idxs = []
cur_type = None
def flush():
if len(ent_idxs) > 0:
res.append({
'start': ent_idxs[0],
'end': ent_idxs[-1],
'type': cur_type})
for idx, tag in enumerate(tags):
if tag is None:
tag = 'O'
if tag == 'O':
flush()
ent_idxs.clear()
elif tag.startswith('B-'): # start of new ent
flush()
ent_idxs.clear()
ent_idxs.append(idx)
cur_type = tag[2:]
elif tag.startswith('I-'): # continue last ent
ent_idxs.append(idx)
cur_type = tag[2:]
# flush after whole sentence
flush()
return res
|
"""
Routines for extracting data from Siemens DICOM files.
The simplest way to read a file is to call read(filename). If you like you
can also call lower level functions like read_data().
Except for the map of internal data types to numpy type strings (which
doesn't require an import of numpy), this code is deliberately ignorant of
numpy. It returns native Python types that are easy to convert into
numpy types.
"""
# Python modules
from __future__ import division
import struct
import exceptions
import math
# 3rd party modules
import dicom
# Our modules
import util_mrs_file
import constants
TYPE_NONE = 0
TYPE_IMAGE = 1
TYPE_SPECTROSCOPY = 2
# Change to True to enable the assert() statements sprinkled through the code
ASSERTIONS_ENABLED = False
# THese are some Siemens-specific tags
TAG_CONTENT_TYPE = (0x0029, 0x1008)
TAG_SPECTROSCOPY_DATA = (0x7fe1, 0x1010)
# I (Philip) ported much of the private tag parsing code from the IDL routines
# dicom_fill_rsp.pro and dicom_fill_util.pro, except for the CSA header
# parsing which is a port of C++ code in the GDCM project.
# Since a lot (all?) of the Siemens format is undocumented, there are magic
# numbers and logic in here that I can't explain. Sorry! Where appropriate
# I have copied or paraphrased comments from the IDL code; they're marked
# with [IDL]. Unmarked comments are mine. Where ambiguous, I labelled my
# comments with [PS] (Philip Semanchuk).
def read(filename, ignore_data=False):
""" This is the simplest (and recommended) way for our code to read a
Siemens DICOM file.
It returns a tuple of (parameters, data). The parameters are a dict.
The data is in a Python list.
"""
# Since a DICOM file is params + data together, it's not so simple to
# ignore the data part. The best we can do is tell PyDicom to apply
# lazy evaluation which is probably less efficient in the long run.
defer_size = 4096 if ignore_data else 0
dataset = dicom.read_file(filename)
params = read_parameters_from_dataset(dataset)
data = read_data_from_dataset(dataset)
return params, data
def read_parameters(filename):
return read_parameters_from_dataset(dicom.read_file(filename))
def read_data(filename):
return read_data_from_dataset(dicom.read_file(filename))
def read_data_from_dataset(dataset):
"""Given a PyDicom dataset, returns the data in the Siemens DICOM
spectroscopy data tag (0x7fe1, 0x1010) as a list of complex numbers.
"""
data = _get(dataset, TAG_SPECTROSCOPY_DATA)
if data:
# Big simplifying assumptions --
# 1) Data is a series of complex numbers organized as ririri...
# where r = real and i = imaginary.
# 2) Each real & imaginary number is a 4 byte float.
# 3) Data is little endian.
data = struct.unpack("<%df" % (len(data) / 4), data)
data = util_mrs_file.collapse_complexes(data)
else:
data = [ ]
return data
def read_parameters_from_dataset(dataset):
"""Given a PyDicom dataset, returns a fairly extensive subset of the
parameters therein as a dictionary.
"""
params = { }
# The code below refers to slice_index as a variable, but here it is
# hardcoded to one. It could vary, in theory, but in practice I don't
# know how it would actually be used. How would the slice index or
# indices be passed? How would the data be returned? For now, I'll
# leave the slice code active but hardcode the index to 1.
slice_index = 1
# [PS] - Even after porting this code I still can't figure out what
# ptag_img and ptag_ser stand for, so I left the names as is.
ptag_img = { }
ptag_ser = { }
# (0x0029, 0x__10) is one of several possibilities
# - SIEMENS CSA NON-IMAGE, CSA Data Info
# - SIEMENS CSA HEADER, CSA Image Header Info
# - SIEMENS CSA ENVELOPE, syngo Report Data
# - SIEMENS MEDCOM HEADER, MedCom Header Info
# - SIEMENS MEDCOM OOG, MedCom OOG Info (MEDCOM Object Oriented Graphics)
# Pydicom identifies it as "CSA Image Header Info"
for tag in ( (0x0029, 0x1010), (0x0029, 0x1210), (0x0029, 0x1110) ):
tag_data = dataset.get(tag, None)
if tag_data:
break
if tag_data:
ptag_img = _parse_csa_header(tag_data.value)
# [IDL] Access the SERIES Shadow Data
# [PS] I don't know what makes this "shadow" data.
for tag in ( (0x0029, 0x1020), (0x0029, 0x1220), (0x0029, 0x1120) ):
tag_data = dataset.get(tag, None)
if tag_data:
break
if tag_data:
ptag_ser = _parse_csa_header(tag_data.value)
# [IDL] "MrProtocol" (VA25) and "MrPhoenixProtocol" (VB13) are special
# elements that contain many parameters.
if ptag_ser.get("MrProtocol", ""):
prot_ser = _parse_protocol_data(ptag_ser["MrProtocol"])
if ptag_ser.get("MrPhoenixProtocol", ""):
prot_ser = _parse_protocol_data(ptag_ser["MrPhoenixProtocol"])
# [IDL] Determine if file is SVS,SI,EPSI, or OTHER
# [PS] IDL code doesn't match comments. Possibilities appear to
# include EPSI, SVS, CSI, JPRESS and SVSLIP2. "OTHER" isn't
# considered.
# EPSI = Echo-Planar Spectroscopic Imaging
# SVS = Single voxel spectroscopy
# CSI = Chemical Shift Imaging
# JPRESS = J-resolved spectroscopy
# SVSLIP2 = No idea!
is_epsi = False
is_svs = False
is_csi = False
is_jpress = False
is_svslip2 = False
# [IDL] Protocol name
parameter_filename = _extract_from_quotes(prot_ser.get("tProtocolName", ""))
parameter_filename = parameter_filename.strip()
# [IDL] Sequence file name
sequence_filename = _extract_from_quotes(prot_ser.get("tSequenceFileName", ""))
sequence_filename = sequence_filename.strip()
sequence_filename2 = ptag_img.get("SequenceName", "")
sequence_filename2 = sequence_filename2.strip()
parameter_filename_lower = parameter_filename.lower()
sequence_filename_lower = sequence_filename.lower()
sequence_filename2_lower = sequence_filename2.lower()
is_epsi = ("epsi" in (parameter_filename_lower, sequence_filename_lower))
is_svs = ("svs" in (parameter_filename_lower, sequence_filename_lower,
sequence_filename2_lower))
if "fid" in (parameter_filename_lower, sequence_filename_lower):
if "csi" in (parameter_filename_lower, sequence_filename_lower):
is_csi = True
else:
is_svs = True
if "csi" in (parameter_filename_lower, sequence_filename_lower):
is_csi = True
is_jpress = ("jpress" in (parameter_filename_lower,
sequence_filename_lower))
is_svslip2 = ("svs_li2" in (parameter_filename_lower,
sequence_filename2_lower))
# Patient Info
params["patient_name"] = _get(dataset, (0x0010, 0x0010), "")
params["patient_id"] = _get(dataset, (0x0010, 0x0020))
params["patient_birthdate"] = _get(dataset, (0x0010, 0x0030))
params["patient_sex"] = _get(dataset, (0x0010, 0x0040), "")
# [PS] Siemens stores the age as nnnY where 'n' is a digit, e.g. 042Y
params["patient_age"] = \
int(_get(dataset, (0x0010, 0x1010), "000Y")[:3])
params["patient_weight"] = round(_get(dataset, (0x0010, 0x1030), 0))
params["study_code"] = _get(dataset, (0x0008, 0x1030), "")
# Identification info
params["bed_move_fraction"] = 0.0
s = _get(dataset, (0x0008, 0x0080), "")
if s:
s = " " + s
s += _get(dataset, (0x0008, 0x1090), "")
params["institution_id"] = s
params["parameter_filename"] = parameter_filename
params["study_type"] = "spec"
# DICOM date format is YYYYMMDD
params["bed_move_date"] = _get(dataset, (0x0008, 0x0020), "")
params["measure_date"] = params["bed_move_date"]
# DICOM time format is hhmmss.fraction
params["bed_move_time"] = _get(dataset, (0x0008, 0x0030), "")
params["comment_1"] = _get(dataset, (0x0008, 0x0031), "")
if not params["comment_1"]:
params["comment_1"] = _get(dataset, (0x0020, 0x4000), "")
# DICOM time format is hhmmss.fraction
params["measure_time"] = _get(dataset, (0x0008, 0x0032), "")
params["sequence_filename"] = ptag_img.get("SequenceName", "")
params["sequence_type"] = ptag_img.get("SequenceName", "")
# Measurement info
params["echo_position"] = "0.0"
params["image_contrast_mode"] = "unknown"
params["kspace_mode"] = "unknown"
params["measured_slices"] = "1"
params["saturation_bands"] = "0"
# Seems to me that a quantity called "NumberOfAverages" would be an
# int, but it is stored as a float, e.g. "128.0000" which makes
# Python's int() choke unless I run it through float() first.
params["averages"] = int(_float(ptag_img.get("NumberOfAverages", "")))
params["flip_angle"] = _float(ptag_img.get("FlipAngle", ""))
# [PS] DICOM stores frequency as MHz, we store it as Hz. Mega = 1x10(6)
params["frequency"] = float(ptag_img.get("ImagingFrequency", 0)) * 1e6
inversion_time = float(ptag_img.get("InversionTime", 0))
params["inversion_time_1"] = inversion_time
params["number_inversions"] = 1 if inversion_time else 0
params["measured_echoes"] = ptag_img.get("EchoTrainLength", "1")
params["nucleus"] = ptag_img.get("ImagedNucleus", "")
params["prescans"] = prot_ser.get("sSpecPara.lPreparingScans", 0)
# Gain
gain = prot_ser.get("sRXSPEC.lGain", None)
if gain == 0:
gain = "-20.0"
elif gain == 1:
gain = "0.0"
else:
gain = ""
params["receiver_gain"] = gain
params["ft_scale_factor"] = \
float(prot_ser.get("sRXSPEC.aFFT_SCALE[0].flFactor", 0))
# Receiver Coil
coil = prot_ser.get("sCOIL_SELECT_MEAS.asList[0].sCoilElementID.tCoilID", "")
params["receiver_coil"] = _extract_from_quotes(coil)
# [IDL] differs in EPSI
params["repetition_time_1"] = float(prot_ser.get("alTR[0]", 0)) * 0.001
sweep_width = ""
remove_oversample_flag = prot_ser.get("sSpecPara.ucRemoveOversampling", "")
remove_oversample_flag = (remove_oversample_flag.strip() == "0x1")
readout_os = float(ptag_ser.get("ReadoutOS", 1.0))
dwelltime = float(ptag_img.get("RealDwellTime", 1.0)) * 1e-9
if dwelltime:
sweep_width = 1 / dwelltime
if not remove_oversample_flag:
sweep_width *= readout_os
sweep_width = str(sweep_width)
params["transmitter_voltage"] = \
prot_ser.get("sTXSPEC.asNucleusInfo[0].flReferenceAmplitude", "0.0")
params["total_duration"] = \
prot_ser.get("lTotalScanTimeSec", "0.0")
prefix = "sSliceArray.asSlice[%d]." % slice_index
image_parameters = (
("image_dimension_line", "dPhaseFOV"),
("image_dimension_column", "dReadoutFOV"),
("image_dimension_partition", "dThickness"),
("image_position_sagittal", "sPosition.dSag"),
("image_position_coronal", "sPosition.dCor"),
("image_position_transverse", "sPosition.dTra"),
)
for key, name in image_parameters:
params[key] = float(prot_ser.get(prefix + name, "0.0"))
# [IDL] Image Normal/Column
image_orientation = ptag_img.get("ImageOrientationPatient", "")
if not image_orientation:
slice_orientation_pitch = ""
slice_distance = ""
else:
# image_orientation is a list of strings, e.g. --
# ['-1.00000000', '0.00000000', '0.00000000', '0.00000000',
# '1.00000000', '0.00000000']
# [IDL] If the data we are processing is a Single Voxel
# Spectroscopy data, interchange rows and columns. Due to an error
# in the protocol used.
if is_svs:
image_orientation = image_orientation[3:] + image_orientation[:3]
# Convert the values to float and discard ones smaller than 1e-4
f = lambda value: 0.0 if abs(value) < 1e-4 else value
image_orientation = [f(float(value)) for value in image_orientation]
row = image_orientation[:3]
column = image_orientation[3:6]
normal = ( ((row[1] * column[2]) - (row[2] * column[1])),
((row[2] * column[0]) - (row[0] * column[2])),
((row[0] * column[1]) - (row[1] * column[0])),
)
params["image_normal_sagittal"] = normal[0]
params["image_normal_coronal"] = normal[1]
params["image_normal_transverse"] = normal[2]
params["image_column_sagittal"] = column[0]
params["image_column_coronal"] = column[0]
params["image_column_transverse"] = column[0]
# Second part of the return tuple is orientation; we don't use it.
slice_orientation_pitch, _ = _dicom_orientation_string(normal)
# Slice distance
# http://en.wikipedia.org/wiki/Dot_product
keys = ("image_position_sagittal", "image_position_coronal",
"image_position_transverse")
a = [params[key] for key in keys]
b = normal
bb = math.sqrt(sum([value ** 2 for value in normal]))
slice_distance = ((a[0] * b[0]) + (a[1] * b[1]) + (a[2] * b[2])) / bb
params["slice_orientation_pitch"] = slice_orientation_pitch
params["slice_distance"] = slice_distance
regions = ( ("region_dimension_line", "dPhaseFOV"),
("region_dimension_column", "dReadoutFOV"),
("region_dimension_partition", "dThickness"),
("region_position_sagittal", "sPosition.dSag"),
("region_position_coronal", "sPosition.dCor"),
("region_position_transverse", "sPosition.dTra"),
)
for key, name in regions:
name = "sSpecPara.sVoI." + name
params[key] = float(prot_ser.get(name, 0))
# 'DATA INFORMATION'
params["measure_size_spectral"] = \
long(prot_ser.get('sSpecPara.lVectorSize', 0))
params["slice_thickness"] = _float(ptag_img.get("SliceThickness", 0))
params["current_slice"] = "1"
params["number_echoes"] = "1"
params["number_slices"] = "1"
params["data_size_spectral"] = params["measure_size_spectral"]
# ;------------------------------------------------------
# [IDL] Sequence Specific Changes
if not is_epsi:
# [IDL] Echo time - JPRESS handling added by Dragan
echo_time = 0.0
if is_jpress:
# [IDL] Yingjian saves echo time in a private 'echotime' field
# [PS] The IDL code didn't use a dict to store these values
# but instead did a brute force case-insensitive search over
# an array of strings. In that context, key case didn't matter
# but here it does.
keys = prot_ser.keys()
for key in keys:
if key.upper() == "ECHOTIME":
echo_time = float(prot_ser[key])
if is_svslip2:
# [IDL] BJS found TE value set in ICE to be updated in
# 'echotime' field
# [PS] The IDL code didn't use a dict to store these values
# but instead did a brute force case-insensitive search over
# an array of strings. In that context, key case didn't matter
# but here it does.
keys = ptag_img.keys()
for key in keys:
if key.upper() == "ECHOTIME":
echo_time = float(ptag_img[key])
if not echo_time:
# [IDL] still no echo time - try std place
echo_time = float(prot_ser.get('alTE[0]', 0.0))
echo_time /= 1000
params["echo_time"] = echo_time
params["data_size_line"] = \
int(prot_ser.get('sSpecPara.lFinalMatrixSizePhase', 1))
params["data_size_column"] = \
int(prot_ser.get('sSpecPara.lFinalMatrixSizeRead', 1))
params["data_size_partition"] = \
int(prot_ser.get('sSpecPara.lFinalMatrixSizeSlice', 1))
if is_svs:
# [IDL] For Single Voxel Spectroscopy data (SVS) only
params["image_dimension_line"] = \
params["region_dimension_line"]
params["image_dimension_column"] = \
params["region_dimension_column"]
params["image_dimension_partition"] = \
params["region_dimension_partition"]
# [IDL] For SVS data the following three parameters cannot be
# anything other than 1
params["measure_size_line"] = 1
params["measure_size_column"] = 1
params["measure_size_partition"] = 1
else:
# Not SVS
# ;--------------------------------------------------
# ; [IDL] For CSI or OTHER Spectroscopy data only
# ;--------------------------------------------------
measure_size_line = int(prot_ser.get('sKSpace.lPhaseEncodingLines', 1))
params["measure_size_line"] = str(measure_size_line)
measure_size_column = int(prot_ser.get('sKSpace.lPhaseEncodingLines', 0))
params["measure_size_column"] = str(measure_size_column)
measure_size_partition = int(prot_ser.get('sKSpace.lPartitions', '0'))
kspace_dimension = prot_ser.get('sKSpace.ucDimension', '')
if kspace_dimension.strip() == "0x2":
measure_size_partition = 1
params["data_size_partition"] = 1
data_size_partition = 1
params["measure_size_partition"] = measure_size_partition
if sequence_filename in ("svs_cp_press", "svs_se_ir", "svs_tavg"):
# [IDL] Inversion Type 0-Volume,1-None
s = prot_ser.get("SPREPPULSES.UCINVERSION", "")
if s == "0x1":
params["number_inversions"] = 1
elif s == "0x2":
params["number_inversions"] = 0
# else:
# params["number_inversions"] doesn't get set at all.
# This matches the behavior of the IDL code. Note that
# params["number_inversions"] is also populated
# unconditionally in code many lines above.
if sequence_filename in ("svs_se", "svs_st", "fid", "fid3", "fid_var",
"csi_se", "csi_st", "csi_fid", "csi_fidvar",
"epsi"):
# [IDL] FOR EPSI Measure_size and Data_size parameters are the same
params["region_dimension_line"] = \
params["image_dimension_line"]
params["region_dimension_column"] = \
params["image_dimension_column"]
params["ft_scale_factor"] = "1.0"
params["data_size_line"] = \
int(prot_ser.get('sKSpace.lPhaseEncodingLines', 0))
params["data_size_column"] = \
int(prot_ser.get('sKSpace.lBaseResolution', 0)) * readout_os
params["data_size_partition"] = \
int(prot_ser.get('sKSpace.lPartitions', 0))
params["measure_size_line"] = params["data_size_line"]
measure_size_column = params["data_size_column"]
measure_size_partition = params["data_size_partition"]
index = 0 if ((int(dataset.get("InstanceNumber", 0)) % 2) == 1) else 1
echo_time = float(prot_ser.get('alTE[%d]' % index, 0)) / 1000
repetition_time_1 = float(prot_ser.get('alTR[%d]' % index, 0)) / 1000
params["echo_time"] = str(echo_time)
params["repetition_time_1"] = str(repetition_time_1)
dwelltime = float(ptag_img.get("RealDwellTime", 0.0))
if dwelltime and base_resolution:
sweep_width = 1 / (dwelltime * base_resolution * readout_os)
else:
sweep_width = ""
params["sweep_width"] = sweep_width
# Added by BTA
ip_rot = prot_ser.get("sSliceArray.asSlice[0].dInPlaneRot", None)
pol_swap = prot_ser.get("sWipMemBlock.alFree[40]", None)
if ip_rot:
try:
ip_rot = float(ip_rot)
params["in_plane_rotation"] = ip_rot
except Exception, e:
pass
if pol_swap:
try:
pol_swap = int(pol_swap)
params["polarity_swap"] = pol_swap
except Exception, e:
raise e
return params
def _my_assert(expression):
if ASSERTIONS_ENABLED:
assert(expression)
def _dicom_orientation_string(normal):
"""Given a 3-item list (or other iterable) that represents a normal vector
to the "imaging" plane, this function determines the orientation of the
vector in 3-dimensional space. It returns a tuple of (angle, orientation)
in which angle is e.g. "Tra" or "Tra>Cor -6" or "Tra>Sag 14.1 >Cor 9.3"
and orientation is e.g. "Sag" or "Cor-Tra".
For double angulation, errors in secondary angle occur that may be due to
rounding errors in internal Siemens software, which calculates row and
column vectors.
"""
# docstring paraphrases IDL comments
TOLERANCE = 1.e-4
orientations = ('Sag', 'Cor', 'Tra')
final_angle = ""
final_orientation = ""
# [IDL] evaluate orientation of normal vector:
#
# Find principal direction of normal vector (i.e. axis with its largest
# component)
# Find secondary direction (second largest component)
# Calc. angle btw. projection of normal vector into the plane that
# includes both principal and secondary directions on the one hand
# and the principal direction on the other hand ==> 1st angulation:
# "principal>secondary = angle"
# Calc. angle btw. projection into plane perpendicular to principal
# direction on the one hand and secondary direction on the other
# hand ==> 2nd angulation: "secondary>third dir. = angle"
# get principal, secondary and ternary directions
sorted_normal = sorted(normal)
for i, value in enumerate(normal):
if value == sorted_normal[2]:
# [IDL] index of principal direction
principal = i
if value == sorted_normal[1]:
# [IDL] index of secondary direction
secondary = i
if value == sorted_normal[0]:
# [IDL] index of ternary direction
ternary = i
# [IDL] calc. angle between projection into third plane (spawned by
# principle & secondary directions) and principal direction:
angle_1 = math.atan2(normal[secondary], normal[principal]) * \
constants.RADIANS_TO_DEGREES
# [IDL] calc. angle btw. projection on rotated principle direction and
# secondary direction:
# projection on rotated principle dir.
new_normal_ip = math.sqrt((normal[principal] ** 2) + (normal[secondary] ** 2))
angle_2 = math.atan2(normal[ternary], new_normal_ip) * \
constants.RADIANS_TO_DEGREES
# [IDL] SIEMENS notation requires modifications IF principal dir. indxs SAG !
# [PS] In IDL, indxs is the name of the variable that is "secondary" here.
# Even with that substitution, I don't understand the comment above.
if not principal:
if abs(angle_1) > 0:
sign1 = angle_1 / abs(angle_1)
else:
sign1 = 1.0
angle_1 -= (sign1 * 180.0)
angle_2 *= -1
if (abs(angle_2) < TOLERANCE) or (abs(abs(angle_2) - 180) < TOLERANCE):
if (abs(angle_1) < TOLERANCE) or (abs(abs(angle_1) - 180) < TOLERANCE):
# [IDL] NON-OBLIQUE:
final_angle = orientations[principal]
final_orientation = ang
else:
# [IDL] SINGLE-OBLIQUE:
final_angle = "%s>%s %.3f" % \
(orientations[principal], orientations[secondary],
(-1 * angle_1)
)
final_orientation = orientations[principal] + '-' + orientations[secondary]
else:
# [IDL] DOUBLE-OBLIQUE:
final_angle = "%s>%s %.3f >%s %f" % \
(orientations[principal], orientations[secondary],
(-1 * angle_1), orientations[ternary], (-1 * angle_2))
final_orientation = "%s-%s-%s" % \
(orientations[principal], orientations[secondary],
orientations[ternary])
return final_angle, final_orientation
def _float(value):
"""Attempts to return value as a float. No different from Python's
built-in float(), except that it accepts None and "" (for which it
returns 0.0).
"""
return float(value) if value else 0.0
def _extract_from_quotes(s):
"""Given a string, returns the portion between the first and last
double quote (ASCII 34). If there aren't at least two quote characters,
the original string is returned."""
start = s.find('"')
end = s.rfind('"')
if (start != -1) and (end != -1):
s = s[start + 1 : end]
return s
def _null_truncate(s):
"""Given a string, returns a version truncated at the first '\0' if
there is one. If not, the original string is returned."""
i = s.find(chr(0))
if i != -1:
s = s[:i]
return s
def _scrub(item):
"""Given a string, returns a version truncated at the first '\0' and
stripped of leading/trailing whitespace. If the param is not a string,
it is returned unchanged."""
if isinstance(item, basestring):
return _null_truncate(item).strip()
else:
return item
def _get_chunks(tag, index, format, little_endian=True):
"""Given a CSA tag string, an index into that string, and a format
specifier compatible with Python's struct module, returns a tuple
of (size, chunks) where size is the number of bytes read and
chunks are the data items returned by struct.unpack(). Strings in the
list of chunks have been run through _scrub().
"""
# The first character of the format string indicates endianness.
format = ('<' if little_endian else '>') + format
size = struct.calcsize(format)
chunks = struct.unpack(format, tag[index:index + size])
chunks = [_scrub(item) for item in chunks]
return (size, chunks)
def _parse_protocol_data(protocol_data):
"""Returns a dictionary containing the name/value pairs inside the
"ASCCONV" section of the MrProtocol or MrPhoenixProtocol elements
of a Siemens CSA Header tag.
"""
# Protocol_data is a large string (e.g. 32k) that lists a lot of
# variables in a JSONish format with which I'm not familiar. Following
# that there's another chunk of data delimited by the strings you see
# below.
# That chunk is a list of name=value pairs, INI file style. We
# ignore everything outside of the ASCCONV delimiters. Everything inside
# we parse and return as a dictionary.
start = protocol_data.find("### ASCCONV BEGIN ###")
end = protocol_data.find("### ASCCONV END ###")
_my_assert(start != -1)
_my_assert(end != -1)
start += len("### ASCCONV BEGIN ###")
protocol_data = protocol_data[start:end]
lines = protocol_data.split('\n')
# The two lines of code below turn the 'lines' list into a list of
# (name, value) tuples in which name & value have been stripped and
# all blank lines have been discarded.
f = lambda pair: (pair[0].strip(), pair[1].strip())
lines = [f(line.split('=')) for line in lines if '=' in line]
return dict(lines)
def _get(dataset, tag, default=None):
"""Returns the value of a dataset tag, or the default if the tag isn't
in the dataset.
PyDicom datasets already have a .get() method, but it returns a
dicom.DataElement object. In practice it's awkward to call dataset.get()
and then figure out if the result is the default or a DataElement,
and if it is the latter _get the .value attribute. This function allows
me to avoid all that mess.
It is also a workaround for this bug (which I submitted) which should be
fixed in PyDicom > 0.9.3:
http://code.google.com/p/pydicom/issues/detail?id=72
"""
return default if tag not in dataset else dataset[tag].value
def _parse_csa_header(tag, little_endian = True):
"""The CSA header is a Siemens private tag that should be passed as
a string. Any of the following tags should work: (0x0029, 0x1010),
(0x0029, 0x1210), (0x0029, 0x1110), (0x0029, 0x1020), (0x0029, 0x1220),
(0x0029, 0x1120).
The function returns a dictionary keyed by element name.
"""
# Let's have a bit of fun, shall we? A Siemens CSA header is a mix of
# binary glop, ASCII, binary masquerading as ASCII, and noise masquerading
# as signal. It's also undocumented, so there's no specification to which
# to refer.
# The format is a good one to show to anyone who complains about XML being
# verbose or hard to read. Spend an afternoon with this and XML will
# look terse and read like a Shakespearean sonnet.
# The algorithm below is a translation of the GDCM project's
# CSAHeader::LoadFromDataElement() inside gdcmCSAHeader.cxx. I don't know
# how that code's author figured out what's in a CSA header, but the
# code works.
# I added comments and observations, but they're inferences. I might
# be wrong. YMMV.
# Some observations --
# - If you need to debug this code, a hexdump of the tag data will be
# your best friend.
# - The data in the tag is a list of elements, each of which contains
# zero or more subelements. The subelements can't be further divided
# and are either empty or contain a string.
# - Everything begins on four byte boundaries.
# - This code will break on big endian data. I don't know if this data
# can be big endian, and if that's possible I don't know what flag to
# read to indicate that. However, it's easy to pass an endianness flag
# to _get_chunks() should the need to parse big endian data arise.
# - Delimiters are thrown in here and there; they are 0x4d = 77 which is
# ASCII 'M' and 0xcd = 205 which has no ASCII representation.
# - Strings in the data are C-style NULL terminated.
# I sometimes read delimiters as strings and sometimes as longs.
DELIMITERS = ("M", "\xcd", 0x4d, 0xcd)
# This dictionary of elements is what this function returns
elements = { }
# I march through the tag data byte by byte (actually a minimum of four
# bytes at a time), and current points to my current position in the tag
# data.
current = 0
# The data starts with "SV10" followed by 0x04, 0x03, 0x02, 0x01.
# It's meaningless to me, so after reading it, I discard it.
size, chunks = _get_chunks(tag, current, "4s4s")
current += size
_my_assert(chunks[0] == "SV10")
_my_assert(chunks[1] == "\4\3\2\1")
# get the number of elements in the outer list
size, chunks = _get_chunks(tag, current, "L")
current += size
element_count = chunks[0]
# Eat a delimiter (should be 0x77)
size, chunks = _get_chunks(tag, current, "4s")
current += size
_my_assert(chunks[0] in DELIMITERS)
for i in range(element_count):
# Each element looks like this:
# - (64 bytes) Element name, e.g. ImagedNucleus, NumberOfFrames,
# VariableFlipAngleFlag, MrProtocol, etc. Only the data up to the
# first 0x00 is important. The rest is helpfully populated with
# noise that has enough pattern to make it look like something
# other than the garbage that it is.
# - (4 bytes) VM
# - (4 bytes) VR
# - (4 bytes) syngo_dt
# - (4 bytes) # of subelements in this element (often zero)
# - (4 bytes) a delimiter (0x4d or 0xcd)
size, chunks = _get_chunks(tag, current,
"64s" + "4s" + "4s" + "4s" + "L" + "4s")
current += size
name, vm, vr, syngo_dt, subelement_count, delimiter = chunks
_my_assert(delimiter in DELIMITERS)
# The subelements hold zero or more strings. Those strings are stored
# temporarily in the values list.
values = [ ]
for j in range(subelement_count):
# Each subelement looks like this:
# - (4 x 4 = 16 bytes) Call these four bytes A, B, C and D. For
# some strange reason, C is always a delimiter, while A, B and
# D are always equal to one another. They represent the length
# of the associated data string.
# - (n bytes) String data, the length of which is defined by
# A (and A == B == D).
# - (m bytes) Padding if length is not an even multiple of four.
size, chunks = _get_chunks(tag, current, "4L")
current += size
_my_assert(chunks[0] == chunks[1])
_my_assert(chunks[1] == chunks[3])
_my_assert(chunks[2] in DELIMITERS)
length = chunks[0]
# get a chunk-o-stuff, length indicated by code above.
# Note that length can be 0.
size, chunks = _get_chunks(tag, current, "%ds" % length)
current += size
if chunks[0]:
values.append(chunks[0])
# If we're not at a 4 byte boundary, move.
# Clever modulus code below swiped from GDCM
current += (4 - (length % 4)) % 4
# The value becomes a single string item (possibly "") or a list
# of strings
if len(values) == 0:
values = ""
if len(values) == 1:
values = values[0]
_my_assert(name not in elements)
elements[name] = values
return elements
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from collections import OrderedDict
def workspace_list_table_format(result):
"""Format workspace list as a table"""
table = []
for item in result:
table.append(workspace_show_table_format(item))
return table
def workspace_show_table_format(workspace):
"""Format the workspace as a table"""
row = OrderedDict()
row['Name'] = workspace['name']
row['Resource Group'] = workspace['resourceGroup']
row['Location'] = workspace['location']
row['State'] = workspace['provisioningState']
return row
def cluster_list_table_format(result):
"""Format cluster list as a table."""
table = []
for item in result:
table.append(cluster_show_table_format(item))
return table
def cluster_show_table_format(result):
"""Format cluster as a table."""
from msrestazure.tools import parse_resource_id
row = OrderedDict()
row['Name'] = result['name']
row['Resource Group'] = result['resourceGroup']
row['Workspace'] = parse_resource_id(result['id'])['name']
row['VM Size'] = result['vmSize']
if result['provisioningState'] == 'deleting':
row['State'] = 'deleting'
else:
row['State'] = result['allocationState']
row['Idle'] = str(result['nodeStateCounts']['idleNodeCount'])
row['Running'] = str(result['nodeStateCounts']['runningNodeCount'])
row['Preparing'] = str(result['nodeStateCounts']['preparingNodeCount'])
row['Leaving'] = str(result['nodeStateCounts']['leavingNodeCount'])
row['Unusable'] = str(result['nodeStateCounts']['unusableNodeCount'])
return row
def experiment_list_table_format(result):
"""Format experiment list as a table"""
table = []
for item in result:
table.append(experiment_show_table_format(item))
return table
def experiment_show_table_format(experiment):
"""Format the experiment as a table"""
from msrestazure.tools import parse_resource_id
row = OrderedDict()
row['Name'] = experiment['name']
row['Resource Group'] = experiment['resourceGroup']
row['Workspace'] = parse_resource_id(experiment['id'])['name']
row['State'] = experiment['provisioningState']
return row
def job_list_table_format(result):
"""Format job list as a table."""
table = []
for item in result:
table.append(job_show_table_format(item))
return table
def job_show_table_format(job):
"""Format job as a table."""
from msrestazure.tools import parse_resource_id
row = OrderedDict()
row['Name'] = job['name']
cluster = parse_resource_id(job['cluster']['id'])
row['Cluster'] = cluster['resource_name']
row['Cluster RG'] = job['cluster']['resourceGroup']
row['Cluster Workspace'] = cluster['name']
row['Tool'] = job['toolType']
row['Nodes'] = job['nodeCount']
if job['provisioningState'] == 'deleting':
row['State'] = 'deleting'
else:
row['State'] = job['executionState']
if job['executionInfo'] and \
job['executionInfo']['exitCode'] is not None:
row['Exit code'] = str(job['executionInfo']['exitCode'])
else:
row['Exit code'] = ''
return row
def file_list_table_format(result):
"""Format file list as a table."""
table = []
for item in result:
row = OrderedDict()
row['Name'] = item['name']
row['Type'] = item['fileType']
row['Size'] = '' if item['fileType'] == 'directory' else str(item['contentLength'])
row['Modified'] = item['lastModified'] or ' '
table.append(row)
return table
def file_server_list_table_format(result):
"""Format file server list as a table."""
table = []
for item in result:
table.append(file_server_show_table_format(item))
return table
def file_server_show_table_format(result):
"""Format file server list as a table."""
row = OrderedDict()
row['Name'] = result['name']
row['Resource Group'] = result['resourceGroup']
row['Size'] = result['vmSize']
disks = result['dataDisks']
if disks:
row['Disks'] = '{0} x {1} Gb'.format(disks['diskCount'], disks['diskSizeInGb'])
mount_settings = result['mountSettings']
if mount_settings:
row['Public IP'] = mount_settings['fileServerPublicIp']
row['Internal IP'] = mount_settings['fileServerInternalIp']
row['Mount Point'] = mount_settings['mountPoint']
return row
def remote_login_table_format(result):
"""Format remote login info list as a table."""
table = []
for item in result:
row = OrderedDict()
row['ID'] = item['nodeId']
row['IP'] = item['ipAddress']
row['SSH Port'] = int(item['port'])
table.append(row)
return table
def usage_table_format(result):
"""Format usage information as a table."""
table = []
for item in result:
row = OrderedDict()
row['Value'] = item['name']['localizedValue']
row['Usage'] = item['currentValue'] or "0"
row['Limit'] = item['limit'] or "0"
table.append(row)
return table
def node_setup_files_list_table_format(result):
"""Format list of node setup task files"""
table = []
for item in result:
row = OrderedDict()
row['Name'] = item['name']
row['Is directory'] = 'yes' if item['is_directory'] else 'no'
row['Size'] = '' if item['size'] is None else (item['size'] or '0')
table.append(row)
return table
|
from dppy.beta_ensembles import LaguerreEnsemble
laguerre = LaguerreEnsemble(beta=1) # beta in {0,1,2,4}, default beta=2
laguerre.sample_full_model(size_N=500, size_M=800) # M >= N
# laguerre.plot(normalization=True)
laguerre.hist(normalization=True)
# To compare with the sampling speed of the tridiagonal model simply use
# laguerre.sample_banded_model(size_N=500, size_M=800)
|
import matplotlib.pyplot as plt
import numpy as np
from numpy import linalg as LA
def parametersForGx(mu,covmatrix):
inverse = np.linalg.inv(covmatrix)
(sign, logdet) = np.linalg.slogdet(covmatrix)
Wi = -0.5 * (inverse)
wi = inverse.dot(mu)
print('--------------')
print(logdet)
print(mu.T.dot(inverse).dot(mu))
wi0 = -0.5*( mu.T.dot(inverse).dot(mu) + logdet )
print('--------------')
return Wi,wi,wi0
w1traindata = np.array([
[0,0],
[0,1],
[2,0],
[3,2],
[3,3],
[2,2],
[2,0]
])
w2traindata = np.array([
[7,7],
[8,6],
[9,7],
[8,10],
[7,10],
[8,9],
[7,11]
])
w1mean = np.mean(w1traindata, axis=0)
w1cov = (w1traindata - w1mean).T.dot((w1traindata - w1mean)) / (w1traindata.shape[0]-1)
W1,w1,w10 = parametersForGx(w1mean,w1cov)
print(W1)
print(w1)
print(w10)
print('----------')
w2mean = np.mean(w2traindata, axis=0)
w2cov = (w2traindata - w2mean).T.dot((w2traindata - w2mean)) / (w2traindata.shape[0]-1)
W2,w2,w20 = parametersForGx(w2mean,w2cov)
print(W2)
print(w2)
print(w20)
plt.scatter(w1traindata.T[0], w1traindata.T[1], label= "classA", color= "green", s=10)
plt.scatter(w2traindata.T[0], w2traindata.T[1], label= "classB", color= "red", s=10)
plt.legend()
plt.title("Plot with both the decision boundary, one in cyan and one in pink. almost similiar just shifted by log2")
# plt.show()
delta = 0.025
xrange = np.arange(-10.0, 70.0, delta)
yrange = np.arange(-10.0, 70.0, delta)
X, Y = np.meshgrid(xrange,yrange)
F = 16637*X**2 + 25932*X*Y - 9093 * Y**2 - 549468 * X - 156042 * Y + 2844200
import matplotlib.pyplot
matplotlib.pyplot.contour(X, Y, (F + np.log(2)), [0], colors=['pink'])
matplotlib.pyplot.contour(X, Y, (F), [0], colors=['cyan'],)
matplotlib.pyplot.show()
# def discr_func(x, cov_mat, mu_vec):
# print('Entering')
# x_vec = np.array([[x],[y]])
# W_i = (-1/2) * np.linalg.inv(cov_mat)
# w_i = np.linalg.inv(cov_mat).dot(mu_vec)
# omega_i_p1 = (((-1/2) * (mu_vec).T).dot(np.linalg.inv(cov_mat))).dot(mu_vec)
# omega_i_p2 = (-1/2) * np.log(np.linalg.det(cov_mat))
# omega_i = omega_i_p1 - omega_i_p2
# g = ((x_vec.T).dot(W_i)).dot(x_vec) + (w_i.T).dot(x_vec) + omega_i
# # print('g is', g)
# return g
# def decision_boundary(x_vec, mu_vec, mu_vec2):
# g1 = ()
# g2 = 2*( (x_vec-mu_vec2).T.dot((x_vec-mu_vec2)) )
# return g1 - g2
|
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.distributed.DistributedObjectGlobalUD import DistributedObjectGlobalUD
from direct.distributed.PyDatagram import *
from direct.fsm.FSM import FSM
from otp.ai.MagicWordGlobal import *
from otp.distributed import OtpDoGlobals
from toontown.makeatoon.NameGenerator import NameGenerator
from toontown.toon.ToonDNA import ToonDNA
from toontown.toonbase import TTLocalizer
from toontown.uberdog import NameJudgeBlacklist
from panda3d.core import *
import hashlib, hmac, json
import anydbm, math, os
import urllib2, time, urllib
import cookielib, socket
def rejectConfig(issue, securityIssue=True, retarded=True):
print
print
print 'Lemme get this straight....'
print 'You are trying to use remote account database type...'
print 'However,', issue + '!!!!'
if securityIssue:
print 'Do you want this server to get hacked?'
if retarded:
print '"Either down\'s or autism"\n - JohnnyDaPirate, 2015'
print 'Go fix that!'
exit()
def entropy(string):
prob = [float(string.count(c)) / len(string) for c in dict.fromkeys(list(string))]
entropy = -sum([p * math.log(p) / math.log(2.0) for p in prob])
return entropy
def entropyIdeal(length):
prob = 1.0 / length
return -length * prob * math.log(prob) / math.log(2.0)
accountDBType = config.GetString('accountdb-type', 'developer')
accountServerSecret = config.GetString('account-server-secret', 'dev')
accountServerHashAlgo = config.GetString('account-server-hash-algo', 'sha512')
apiSecret = accountServerSecret = config.GetString('api-key', 'dev')
if accountDBType == 'remote':
if accountServerSecret == 'dev':
rejectConfig('you have not changed the secret in config/local.prc')
if apiSecret == 'dev':
rejectConfig('you have not changed the api key in config/local.prc')
if len(accountServerSecret) < 16:
rejectConfig('the secret is too small! Make it 16+ bytes', retarded=False)
secretLength = len(accountServerSecret)
ideal = entropyIdeal(secretLength) / 2
entropy = entropy(accountServerSecret)
if entropy < ideal:
rejectConfig('the secret entropy is too low! For %d bytes,'
' it should be %d. Currently it is %d' % (secretLength, ideal, entropy),
retarded=False)
hashAlgo = getattr(hashlib, accountServerHashAlgo, None)
if not hashAlgo:
rejectConfig('%s is not a valid hash algo' % accountServerHashAlgo, securityIssue=False)
hashSize = len(hashAlgo('').digest())
minAccessLevel = config.GetInt('min-access-level', 100)
def executeHttpRequest(url, **extras):
# TO DO: THIS IS QUITE DISGUSTING
# MOVE THIS TO ToontownInternalRepository (this might be interesting for AI)
##### USE PYTHON 2.7.9 ON PROD WITH SSL AND CLOUDFLARE #####
_data = {}
if len(extras.items()) != 0:
for k, v in extras.items():
_data[k] = v
signature = hashlib.sha512(json.dumps(_data) + apiSecret).hexdigest()
data = urllib.urlencode({'data': json.dumps(_data), 'hmac': signature})
cookie_jar = cookielib.LWPCookieJar()
cookie = urllib2.HTTPCookieProcessor(cookie_jar)
opener = urllib2.build_opener(cookie)
req = urllib2.Request('http://192.168.1.212/api/' + url, data,
headers={"Content-Type" : "application/x-www-form-urlencoded"})
req.get_method = lambda: "POST"
try:
return opener.open(req).read()
except:
return None
notify = directNotify.newCategory('ClientServicesManagerUD')
def executeHttpRequestAndLog(url, **extras):
# SEE ABOVE
response = executeHttpRequest(url, extras)
if response is None:
notify.error('A request to ' + url + ' went wrong.')
return None
try:
data = json.loads(response)
except:
notify.error('Malformed response from ' + url + '.')
return None
if 'error' in data:
notify.warning('Error from ' + url + ':' + data['error'])
return data
#blacklist = executeHttpRequest('names/blacklist.json') # todo; create a better system for this
blacklist = json.dumps(["none"])
if blacklist:
blacklist = json.loads(blacklist)
def judgeName(name):
if not name:
return False
if blacklist:
for namePart in name.split(' '):
namePart = namePart.lower()
if len(namePart) < 1:
return False
for banned in blacklist:
if banned in namePart:
return False
return True
# --- ACCOUNT DATABASES ---
# These classes make up the available account databases for Toontown Stride.
# DeveloperAccountDB is a special database that accepts a username, and assigns
# each user with 700 access automatically upon login.
class AccountDB:
notify = directNotify.newCategory('AccountDB')
def __init__(self, csm):
self.csm = csm
filename = config.GetString('account-bridge-filename', 'account-bridge.db')
filename = os.path.join('dependencies', filename)
self.dbm = anydbm.open(filename, 'c')
def addNameRequest(self, avId, name, accountID = None):
return True
def getNameStatus(self, accountId, callback = None):
return 'APPROVED'
def removeNameRequest(self, avId):
pass
def lookup(self, data, callback):
userId = data['userId']
data['success'] = True
data['accessLevel'] = max(data['accessLevel'], minAccessLevel)
if str(userId) not in self.dbm:
data['accountId'] = 0
else:
data['accountId'] = int(self.dbm[str(userId)])
callback(data)
return data
def storeAccountID(self, userId, accountId, callback):
self.dbm[str(userId)] = str(accountId) # anydbm only allows strings.
if getattr(self.dbm, 'sync', None):
self.dbm.sync()
callback(True)
else:
self.notify.warning('Unable to associate user %s with account %d!' % (userId, accountId))
callback(False)
class DeveloperAccountDB(AccountDB):
notify = directNotify.newCategory('DeveloperAccountDB')
def lookup(self, userId, callback):
return AccountDB.lookup(self, {'userId': userId,
'accessLevel': 700,
'notAfter': 0},
callback)
class RemoteAccountDB:
# TO DO FOR NAMES:
# CURRENTLY IT MAKES n REQUESTS FOR EACH AVATAR
# IN THE FUTURE, MAKE ONLY 1 REQUEST
# WHICH RETURNS ALL PENDING AVS
# ^ done, check before removing todo note
notify = directNotify.newCategory('RemoteAccountDB')
def __init__(self, csm):
self.csm = csm
def addNameRequest(self, avId, name, accountID = None):
username = avId
if accountID is not None:
username = accountID
res = executeHttpRequest('names', action='set', username=str(username),
avId=str(avId), wantedName=name)
if res is not None:
return True
return False
def getNameStatus(self, accountId, callback = None):
r = executeHttpRequest('names', action='get', username=str(accountId))
try:
r = json.loads(r)
if callback is not None:
callback(r)
return True
except:
return False
def removeNameRequest(self, avId):
r = executeHttpRequest('names', action='del', avId=str(avId))
if r:
return 'SUCCESS'
return 'FAILURE'
def lookup(self, token, callback):
'''
Token format:
The token is obfuscated a bit, but nothing too hard to read.
Most of the security is based on the hash.
I. Data contained in a token:
A json-encoded dict, which contains timestamp, userid and extra info
II. Token format
X = BASE64(ROT13(DATA)[::-1])
H = HASH(X)[::-1]
Token = BASE64(H + X)
'''
cookie_check = executeHttpRequest('cookie', cookie=token)
try:
check = json.loads(cookie_check)
if check['success'] is not True:
raise ValueError(check['error'])
token = token.decode('base64')
hash, token = token[:hashSize], token[hashSize:]
correctHash = hashAlgo(token + accountServerSecret).digest()
if len(hash) != len(correctHash):
raise ValueError('Invalid hash.')
value = 0
for x, y in zip(hash[::-1], correctHash):
value |= ord(x) ^ ord(y)
if value:
raise ValueError('Invalid hash.')
token = json.loads(token.decode('base64')[::-1].decode('rot13'))
if token['notAfter'] < int(time.time()):
raise ValueError('Expired token.')
except:
resp = {'success': False}
callback(resp)
return resp
return self.account_lookup(token, callback)
def account_lookup(self, data, callback):
data['success'] = True
data['accessLevel'] = max(data['accessLevel'], minAccessLevel)
data['accountId'] = int(data['accountId'])
callback(data)
return data
def storeAccountID(self, userId, accountId, callback):
r = executeHttpRequest('associateuser', username=str(userId), accountId=str(accountId))
try:
r = json.loads(r)
if r['success']:
callback(True)
else:
self.notify.warning('Unable to associate user %s with account %d, got the return message of %s!' % (userId, accountId, r['error']))
callback(False)
except:
self.notify.warning('Unable to associate user %s with account %d!' % (userId, accountId))
callback(False)
# --- FSMs ---
class OperationFSM(FSM):
TARGET_CONNECTION = False
def __init__(self, csm, target):
self.csm = csm
self.target = target
FSM.__init__(self, self.__class__.__name__)
def enterKill(self, reason=''):
if self.TARGET_CONNECTION:
self.csm.killConnection(self.target, reason)
else:
self.csm.killAccount(self.target, reason)
self.demand('Off')
def enterOff(self):
if self.TARGET_CONNECTION:
del self.csm.connection2fsm[self.target]
else:
del self.csm.account2fsm[self.target]
class LoginAccountFSM(OperationFSM):
notify = directNotify.newCategory('LoginAccountFSM')
TARGET_CONNECTION = True
def enterStart(self, token):
self.token = token
self.demand('QueryAccountDB')
def enterQueryAccountDB(self):
self.csm.accountDB.lookup(self.token, self.__handleLookup)
def __handleLookup(self, result):
if not result.get('success'):
self.csm.air.writeServerEvent('tokenRejected', self.target, self.token)
self.demand('Kill', result.get('reason', 'The account server rejected your token.'))
return
self.userId = result.get('userId', 0)
self.accountId = result.get('accountId', 0)
self.accessLevel = result.get('accessLevel', 0)
self.notAfter = result.get('notAfter', 0)
if self.accountId:
self.demand('RetrieveAccount')
else:
self.demand('CreateAccount')
def enterRetrieveAccount(self):
self.csm.air.dbInterface.queryObject(
self.csm.air.dbId, self.accountId, self.__handleRetrieve)
def __handleRetrieve(self, dclass, fields):
if dclass != self.csm.air.dclassesByName['AccountUD']:
self.demand('Kill', 'Your account object was not found in the database!')
return
self.account = fields
if self.notAfter:
if self.account.get('LAST_LOGIN_TS', 0) > self.notAfter:
self.notify.debug('Rejecting old token: %d, notAfter=%d' % (self.account.get('LAST_LOGIN_TS', 0), self.notAfter))
return self.__handleLookup({'success': False})
self.demand('SetAccount')
def enterCreateAccount(self):
self.account = {
'ACCOUNT_AV_SET': [0] * 6,
'ESTATE_ID': 0,
'ACCOUNT_AV_SET_DEL': [],
'CREATED': time.ctime(),
'LAST_LOGIN': time.ctime(),
'LAST_LOGIN_TS': time.time(),
'ACCOUNT_ID': str(self.userId),
'ACCESS_LEVEL': self.accessLevel,
'CHAT_SETTINGS': [1, 1]
}
self.csm.air.dbInterface.createObject(
self.csm.air.dbId,
self.csm.air.dclassesByName['AccountUD'],
self.account,
self.__handleCreate)
def __handleCreate(self, accountId):
if self.state != 'CreateAccount':
self.notify.warning('Received a create account response outside of the CreateAccount state.')
return
if not accountId:
self.notify.warning('Database failed to construct an account object!')
self.demand('Kill', 'Your account object could not be created in the game database.')
return
self.accountId = accountId
self.csm.air.writeServerEvent('accountCreated', accountId)
self.demand('StoreAccountID')
def enterStoreAccountID(self):
self.csm.accountDB.storeAccountID(
self.userId,
self.accountId,
self.__handleStored)
def __handleStored(self, success=True):
if not success:
self.demand('Kill', 'The account server could not save your user ID!')
return
self.demand('SetAccount')
def enterSetAccount(self):
# If necessary, update their account information:
if self.accessLevel:
self.csm.air.dbInterface.updateObject(
self.csm.air.dbId,
self.accountId,
self.csm.air.dclassesByName['AccountUD'],
{'ACCESS_LEVEL': self.accessLevel})
# If there's anybody on the account, kill them for redundant login:
datagram = PyDatagram()
datagram.addServerHeader(
self.csm.GetAccountConnectionChannel(self.accountId),
self.csm.air.ourChannel,
CLIENTAGENT_EJECT)
datagram.addUint16(100)
datagram.addString('This account has been logged in from elsewhere.')
self.csm.air.send(datagram)
# Next, add this connection to the account channel.
datagram = PyDatagram()
datagram.addServerHeader(
self.target,
self.csm.air.ourChannel,
CLIENTAGENT_OPEN_CHANNEL)
datagram.addChannel(self.csm.GetAccountConnectionChannel(self.accountId))
self.csm.air.send(datagram)
# Subscribe to any "staff" channels that the account has access to.
access = self.account.get('ADMIN_ACCESS', 0)
if access >= 200:
# Subscribe to the moderator channel.
dg = PyDatagram()
dg.addServerHeader(self.target, self.csm.air.ourChannel, CLIENTAGENT_OPEN_CHANNEL)
dg.addChannel(OtpDoGlobals.OTP_MOD_CHANNEL)
self.csm.air.send(dg)
if access >= 400:
# Subscribe to the administrator channel.
dg = PyDatagram()
dg.addServerHeader(self.target, self.csm.air.ourChannel, CLIENTAGENT_OPEN_CHANNEL)
dg.addChannel(OtpDoGlobals.OTP_ADMIN_CHANNEL)
self.csm.air.send(dg)
if access >= 500:
# Subscribe to the system administrator channel.
dg = PyDatagram()
dg.addServerHeader(self.target, self.csm.air.ourChannel, CLIENTAGENT_OPEN_CHANNEL)
dg.addChannel(OtpDoGlobals.OTP_SYSADMIN_CHANNEL)
self.csm.air.send(dg)
# Now set their sender channel to represent their account affiliation:
datagram = PyDatagram()
datagram.addServerHeader(
self.target,
self.csm.air.ourChannel,
CLIENTAGENT_SET_CLIENT_ID)
# Account ID in high 32 bits, 0 in low (no avatar):
datagram.addChannel(self.accountId << 32)
self.csm.air.send(datagram)
# Un-sandbox them!
datagram = PyDatagram()
datagram.addServerHeader(
self.target,
self.csm.air.ourChannel,
CLIENTAGENT_SET_STATE)
datagram.addUint16(2) # ESTABLISHED
self.csm.air.send(datagram)
# Update the last login timestamp:
self.csm.air.dbInterface.updateObject(
self.csm.air.dbId,
self.accountId,
self.csm.air.dclassesByName['AccountUD'],
{'LAST_LOGIN': time.ctime(),
'LAST_LOGIN_TS': time.time(),
'ACCOUNT_ID': str(self.userId)})
# We're done.
self.csm.air.writeServerEvent('accountLogin', self.target, self.accountId, self.userId)
self.csm.sendUpdateToChannel(self.target, 'acceptLogin', [int(time.time())])
self.demand('Off')
class CreateAvatarFSM(OperationFSM):
notify = directNotify.newCategory('CreateAvatarFSM')
def enterStart(self, dna, index):
# Basic sanity-checking:
if index >= 6:
self.demand('Kill', 'Invalid index specified!')
return
if not ToonDNA().isValidNetString(dna):
self.demand('Kill', 'Invalid DNA specified!')
return
self.index = index
self.dna = dna
# Okay, we're good to go, let's query their account.
self.demand('RetrieveAccount')
def enterRetrieveAccount(self):
self.csm.air.dbInterface.queryObject(
self.csm.air.dbId, self.target, self.__handleRetrieve)
def __handleRetrieve(self, dclass, fields):
if dclass != self.csm.air.dclassesByName['AccountUD']:
self.demand('Kill', 'Your account object was not found in the database!')
return
self.account = fields
# For use in calling name requests:
self.accountID = self.account['ACCOUNT_ID']
self.avList = self.account['ACCOUNT_AV_SET']
# Sanitize:
self.avList = self.avList[:6]
self.avList += [0] * (6-len(self.avList))
# Make sure the index is open:
if self.avList[self.index]:
self.demand('Kill', 'This avatar slot is already taken by another avatar!')
return
# Okay, there's space. Let's create the avatar!
self.demand('CreateAvatar')
def enterCreateAvatar(self):
dna = ToonDNA()
dna.makeFromNetString(self.dna)
colorString = TTLocalizer.ColorfulToon
animalType = TTLocalizer.AnimalToSpecies[dna.getAnimal()]
name = ' '.join((colorString, animalType))
toonFields = {
'setName': (name,),
'setWishNameState': ('OPEN',),
'setWishName': ('',),
'setDNAString': (self.dna,),
'setDISLid': (self.target,),
}
self.csm.air.dbInterface.createObject(
self.csm.air.dbId,
self.csm.air.dclassesByName['DistributedToonUD'],
toonFields,
self.__handleCreate)
def __handleCreate(self, avId):
if not avId:
self.demand('Kill', 'Database failed to create the new avatar object!')
return
self.avId = avId
self.demand('StoreAvatar')
def enterStoreAvatar(self):
# Associate the avatar with the account...
self.avList[self.index] = self.avId
self.csm.air.dbInterface.updateObject(
self.csm.air.dbId,
self.target,
self.csm.air.dclassesByName['AccountUD'],
{'ACCOUNT_AV_SET': self.avList},
{'ACCOUNT_AV_SET': self.account['ACCOUNT_AV_SET']},
self.__handleStoreAvatar)
self.accountID = self.account['ACCOUNT_ID']
def __handleStoreAvatar(self, fields):
if fields:
self.demand('Kill', 'Database failed to associate the new avatar to your account!')
return
# Otherwise, we're done!
self.csm.air.writeServerEvent('avatarCreated', self.avId, self.target, self.dna.encode('hex'), self.index)
self.csm.sendUpdateToAccountId(self.target, 'createAvatarResp', [self.avId])
self.demand('Off')
class AvatarOperationFSM(OperationFSM):
POST_ACCOUNT_STATE = 'Off' # This needs to be overridden.
def enterRetrieveAccount(self):
# Query the account:
self.csm.air.dbInterface.queryObject(
self.csm.air.dbId, self.target, self.__handleRetrieve)
def __handleRetrieve(self, dclass, fields):
if dclass != self.csm.air.dclassesByName['AccountUD']:
self.demand('Kill', 'Your account object was not found in the database!')
return
self.account = fields
# For use in calling name requests:
self.accountID = self.account['ACCOUNT_ID']
self.avList = self.account['ACCOUNT_AV_SET']
# Sanitize:
self.avList = self.avList[:6]
self.avList += [0] * (6-len(self.avList))
self.demand(self.POST_ACCOUNT_STATE)
class GetAvatarsFSM(AvatarOperationFSM):
notify = directNotify.newCategory('GetAvatarsFSM')
POST_ACCOUNT_STATE = 'QueryAvatars'
def enterStart(self):
self.demand('RetrieveAccount')
self.nameStateData = None
def enterQueryAvatars(self):
self.pendingAvatars = set()
self.avatarFields = {}
for avId in self.avList:
if avId:
self.pendingAvatars.add(avId)
def response(dclass, fields, avId=avId):
if self.state != 'QueryAvatars':
return
if dclass != self.csm.air.dclassesByName['DistributedToonUD']:
self.demand('Kill', "One of the account's avatars is invalid!")
return
self.avatarFields[avId] = fields
self.pendingAvatars.remove(avId)
if not self.pendingAvatars:
self.demand('SendAvatars')
self.csm.air.dbInterface.queryObject(
self.csm.air.dbId,
avId,
response)
if not self.pendingAvatars:
self.demand('SendAvatars')
def enterSendAvatars(self):
potentialAvs = []
for avId, fields in self.avatarFields.items():
index = self.avList.index(avId)
wishNameState = fields.get('setWishNameState', [''])[0]
name = fields['setName'][0]
nameState = 0
if wishNameState == 'OPEN':
nameState = 1
elif wishNameState == 'PENDING':
if accountDBType == 'remote':
if self.nameStateData is None:
self.demand('QueryNameState')
return
actualNameState = self.nameStateData[str(avId)]
else:
actualNameState = self.csm.accountDB.getNameStatus(self.account['ACCOUNT_ID'])
self.csm.air.dbInterface.updateObject(
self.csm.air.dbId,
avId,
self.csm.air.dclassesByName['DistributedToonUD'],
{'setWishNameState': [actualNameState]}
)
if actualNameState == 'PENDING':
nameState = 2
if actualNameState == 'APPROVED':
nameState = 3
name = fields['setWishName'][0]
elif actualNameState == 'REJECTED':
nameState = 4
elif wishNameState == 'APPROVED':
nameState = 3
elif wishNameState == 'REJECTED':
nameState = 4
potentialAvs.append([avId, name, fields['setDNAString'][0],
index, nameState])
self.csm.sendUpdateToAccountId(self.target, 'setAvatars', [self.account['CHAT_SETTINGS'], potentialAvs])
self.demand('Off')
def enterQueryNameState(self):
def gotStates(data):
self.nameStateData = data
taskMgr.doMethodLater(0, GetAvatarsFSM.demand, 'demand-QueryAvatars',
extraArgs=[self, 'QueryAvatars'])
self.csm.accountDB.getNameStatus(self.account['ACCOUNT_ID'], gotStates)
# We should've called the taskMgr action by now.
# This inherits from GetAvatarsFSM, because the delete operation ends in a
# setAvatars message being sent to the client.
class DeleteAvatarFSM(GetAvatarsFSM):
notify = directNotify.newCategory('DeleteAvatarFSM')
POST_ACCOUNT_STATE = 'ProcessDelete'
def enterStart(self, avId):
self.avId = avId
GetAvatarsFSM.enterStart(self)
def enterProcessDelete(self):
if self.avId not in self.avList:
self.demand('Kill', 'Tried to delete an avatar not in the account!')
return
index = self.avList.index(self.avId)
self.avList[index] = 0
avsDeleted = list(self.account.get('ACCOUNT_AV_SET_DEL', []))
avsDeleted.append([self.avId, int(time.time())])
estateId = self.account.get('ESTATE_ID', 0)
if estateId != 0:
# This assumes that the house already exists, but it shouldn't
# be a problem if it doesn't.
self.csm.air.dbInterface.updateObject(
self.csm.air.dbId,
estateId,
self.csm.air.dclassesByName['DistributedEstateAI'],
{'setSlot%dToonId' % index: [0],
'setSlot%dGarden' % index: [[]]}
)
self.csm.air.dbInterface.updateObject(
self.csm.air.dbId,
self.target,
self.csm.air.dclassesByName['AccountUD'],
{'ACCOUNT_AV_SET': self.avList,
'ACCOUNT_AV_SET_DEL': avsDeleted},
{'ACCOUNT_AV_SET': self.account['ACCOUNT_AV_SET'],
'ACCOUNT_AV_SET_DEL': self.account['ACCOUNT_AV_SET_DEL']},
self.__handleDelete)
self.csm.accountDB.removeNameRequest(self.avId)
def __handleDelete(self, fields):
if fields:
self.demand('Kill', 'Database failed to mark the avatar as deleted!')
return
self.csm.air.friendsManager.clearList(self.avId)
self.csm.air.writeServerEvent('avatarDeleted', self.avId, self.target)
self.demand('QueryAvatars')
class SetNameTypedFSM(AvatarOperationFSM):
notify = directNotify.newCategory('SetNameTypedFSM')
POST_ACCOUNT_STATE = 'RetrieveAvatar'
def enterStart(self, avId, name):
self.avId = avId
self.name = name
self.set_account_id = None
if self.avId:
self.demand('RetrieveAccount')
return
# Hmm, self.avId was 0. Okay, let's just cut to the judging:
self.demand('JudgeName')
def enterRetrieveAvatar(self):
if self.accountID:
self.set_account_id = self.accountID
if self.avId and self.avId not in self.avList:
self.demand('Kill', 'Tried to name an avatar not in the account!')
return
self.csm.air.dbInterface.queryObject(self.csm.air.dbId, self.avId,
self.__handleAvatar)
def __handleAvatar(self, dclass, fields):
if dclass != self.csm.air.dclassesByName['DistributedToonUD']:
self.demand('Kill', "One of the account's avatars is invalid!")
return
if fields['setWishNameState'][0] != 'OPEN':
self.demand('Kill', 'Avatar is not in a namable state!')
return
self.demand('JudgeName')
def enterJudgeName(self):
# Let's see if the name is valid:
status = judgeName(self.name)
if self.avId and status:
if self.csm.accountDB.addNameRequest(self.avId, self.name, accountID=self.set_account_id):
self.csm.air.dbInterface.updateObject(
self.csm.air.dbId,
self.avId,
self.csm.air.dclassesByName['DistributedToonUD'],
{'setWishNameState': ('PENDING',),
'setWishName': (self.name,)})
else:
status = False
if self.avId:
self.csm.air.writeServerEvent('avatarWishname', self.avId, self.name)
self.csm.sendUpdateToAccountId(self.target, 'setNameTypedResp', [self.avId, status])
self.demand('Off')
class SetNamePatternFSM(AvatarOperationFSM):
notify = directNotify.newCategory('SetNamePatternFSM')
POST_ACCOUNT_STATE = 'RetrieveAvatar'
def enterStart(self, avId, pattern):
self.avId = avId
self.pattern = pattern
self.demand('RetrieveAccount')
def enterRetrieveAvatar(self):
if self.avId and self.avId not in self.avList:
self.demand('Kill', 'Tried to name an avatar not in the account!')
return
self.csm.air.dbInterface.queryObject(self.csm.air.dbId, self.avId,
self.__handleAvatar)
def __handleAvatar(self, dclass, fields):
if dclass != self.csm.air.dclassesByName['DistributedToonUD']:
self.demand('Kill', "One of the account's avatars is invalid!")
return
if fields['setWishNameState'][0] != 'OPEN':
self.demand('Kill', 'Avatar is not in a namable state!')
return
self.demand('SetName')
def enterSetName(self):
# Render the pattern into a string:
parts = []
for p, f in self.pattern:
part = self.csm.nameGenerator.nameDictionary.get(p, ('', ''))[1]
if f:
part = part[:1].upper() + part[1:]
else:
part = part.lower()
parts.append(part)
parts[2] += parts.pop(3) # Merge 2&3 (the last name) as there should be no space.
while '' in parts:
parts.remove('')
name = ' '.join(parts)
self.csm.air.dbInterface.updateObject(
self.csm.air.dbId,
self.avId,
self.csm.air.dclassesByName['DistributedToonUD'],
{'setWishNameState': ('',),
'setWishName': ('',),
'setName': (name,)})
self.csm.air.writeServerEvent('avatarNamed', self.avId, name)
self.csm.sendUpdateToAccountId(self.target, 'setNamePatternResp', [self.avId, 1])
self.demand('Off')
class AcknowledgeNameFSM(AvatarOperationFSM):
notify = directNotify.newCategory('AcknowledgeNameFSM')
POST_ACCOUNT_STATE = 'GetTargetAvatar'
def enterStart(self, avId):
self.avId = avId
self.demand('RetrieveAccount')
def enterGetTargetAvatar(self):
# Make sure the target avatar is part of the account:
if self.avId not in self.avList:
self.demand('Kill', 'Tried to acknowledge name on an avatar not in the account!')
return
self.csm.air.dbInterface.queryObject(self.csm.air.dbId, self.avId,
self.__handleAvatar)
def __handleAvatar(self, dclass, fields):
if dclass != self.csm.air.dclassesByName['DistributedToonUD']:
self.demand('Kill', "One of the account's avatars is invalid!")
return
# Process the WishNameState change.
wishNameState = fields['setWishNameState'][0]
wishName = fields['setWishName'][0]
name = fields['setName'][0]
if wishNameState == 'APPROVED':
wishNameState = ''
name = wishName
wishName = ''
self.csm.accountDB.removeNameRequest(self.avId)
elif wishNameState == 'REJECTED':
wishNameState = 'OPEN'
wishName = ''
self.csm.accountDB.removeNameRequest(self.avId)
else:
self.demand('Kill', "Tried to acknowledge name on an avatar in %s state!" % wishNameState)
return
# Push the change back through:
self.csm.air.dbInterface.updateObject(
self.csm.air.dbId,
self.avId,
self.csm.air.dclassesByName['DistributedToonUD'],
{'setWishNameState': (wishNameState,),
'setWishName': (wishName,),
'setName': (name,)},
{'setWishNameState': fields['setWishNameState'],
'setWishName': fields['setWishName'],
'setName': fields['setName']})
self.csm.sendUpdateToAccountId(self.target, 'acknowledgeAvatarNameResp', [])
self.demand('Off')
class LoadAvatarFSM(AvatarOperationFSM):
notify = directNotify.newCategory('LoadAvatarFSM')
POST_ACCOUNT_STATE = 'GetTargetAvatar'
def enterStart(self, avId):
self.avId = avId
self.demand('RetrieveAccount')
def enterGetTargetAvatar(self):
# Make sure the target avatar is part of the account:
if self.avId not in self.avList:
self.demand('Kill', 'Tried to play an avatar not in the account!')
return
self.csm.air.dbInterface.queryObject(self.csm.air.dbId, self.avId,
self.__handleAvatar)
def __handleAvatar(self, dclass, fields):
if dclass != self.csm.air.dclassesByName['DistributedToonUD']:
self.demand('Kill', "One of the account's avatars is invalid!")
return
self.avatar = fields
self.demand('SetAvatar')
def enterSetAvatarTask(self, channel, task):
# Finally, grant ownership and shut down.
datagram = PyDatagram()
datagram.addServerHeader(
self.avId,
self.csm.air.ourChannel,
STATESERVER_OBJECT_SET_OWNER)
datagram.addChannel(self.target<<32 | self.avId)
self.csm.air.send(datagram)
# Tell the GlobalPartyManager as well:
self.csm.air.globalPartyMgr.avatarJoined(self.avId)
fields = self.avatar
fields.update({'setAdminAccess': [self.account.get('ACCESS_LEVEL', 100)]})
self.csm.air.friendsManager.addToonData(self.avId, fields)
self.csm.air.writeServerEvent('avatarChosen', self.avId, self.target)
self.demand('Off')
return task.done
def enterSetAvatar(self):
channel = self.csm.GetAccountConnectionChannel(self.target)
# First, give them a POSTREMOVE to unload the avatar, just in case they
# disconnect while we're working.
datagramCleanup = PyDatagram()
datagramCleanup.addServerHeader(
self.avId,
channel,
STATESERVER_OBJECT_DELETE_RAM)
datagramCleanup.addUint32(self.avId)
datagram = PyDatagram()
datagram.addServerHeader(
channel,
self.csm.air.ourChannel,
CLIENTAGENT_ADD_POST_REMOVE)
datagram.addString(datagramCleanup.getMessage())
self.csm.air.send(datagram)
# Activate the avatar on the DBSS:
self.csm.air.sendActivate(
self.avId, 0, 0, self.csm.air.dclassesByName['DistributedToonUD'],
{'setAdminAccess': [self.account.get('ACCESS_LEVEL', 100)]})
# Next, add them to the avatar channel:
datagram = PyDatagram()
datagram.addServerHeader(
channel,
self.csm.air.ourChannel,
CLIENTAGENT_OPEN_CHANNEL)
datagram.addChannel(self.csm.GetPuppetConnectionChannel(self.avId))
self.csm.air.send(datagram)
# Now set their sender channel to represent their account affiliation:
datagram = PyDatagram()
datagram.addServerHeader(
channel,
self.csm.air.ourChannel,
CLIENTAGENT_SET_CLIENT_ID)
datagram.addChannel(self.target<<32 | self.avId)
self.csm.air.send(datagram)
# Eliminate race conditions.
taskMgr.doMethodLater(0.2, self.enterSetAvatarTask,
'avatarTask-%s' % self.avId, extraArgs=[channel],
appendTask=True)
class UnloadAvatarFSM(OperationFSM):
notify = directNotify.newCategory('UnloadAvatarFSM')
def enterStart(self, avId):
self.avId = avId
# We don't even need to query the account, we know the avatar is being played!
self.demand('UnloadAvatar')
def enterUnloadAvatar(self):
channel = self.csm.GetAccountConnectionChannel(self.target)
# Tell TTSFriendsManager somebody is logging off:
self.csm.air.friendsManager.toonOffline(self.avId)
# Clear off POSTREMOVE:
datagram = PyDatagram()
datagram.addServerHeader(
channel,
self.csm.air.ourChannel,
CLIENTAGENT_CLEAR_POST_REMOVES)
self.csm.air.send(datagram)
# Remove avatar channel:
datagram = PyDatagram()
datagram.addServerHeader(
channel,
self.csm.air.ourChannel,
CLIENTAGENT_CLOSE_CHANNEL)
datagram.addChannel(self.csm.GetPuppetConnectionChannel(self.avId))
self.csm.air.send(datagram)
# Reset sender channel:
datagram = PyDatagram()
datagram.addServerHeader(
channel,
self.csm.air.ourChannel,
CLIENTAGENT_SET_CLIENT_ID)
datagram.addChannel(self.target<<32)
self.csm.air.send(datagram)
# Unload avatar object:
datagram = PyDatagram()
datagram.addServerHeader(
self.avId,
channel,
STATESERVER_OBJECT_DELETE_RAM)
datagram.addUint32(self.avId)
self.csm.air.send(datagram)
# Done!
self.csm.air.writeServerEvent('avatarUnload', self.avId)
self.demand('Off')
# --- CLIENT SERVICES MANAGER UBERDOG ---
class ClientServicesManagerUD(DistributedObjectGlobalUD):
notify = directNotify.newCategory('ClientServicesManagerUD')
def announceGenerate(self):
DistributedObjectGlobalUD.announceGenerate(self)
# These keep track of the connection/account IDs currently undergoing an
# operation on the CSM. This is to prevent (hacked) clients from firing up more
# than one operation at a time, which could potentially lead to exploitation
# of race conditions.
self.connection2fsm = {}
self.account2fsm = {}
# For processing name patterns.
self.nameGenerator = NameGenerator()
# Temporary HMAC key:
self.key = 'c603c5833021ce79f734943f6e662250fd4ecf7432bf85905f71707dc4a9370c6ae15a8716302ead43810e5fba3cf0876bbbfce658e2767b88d916f5d89fd31'
# Instantiate our account DB interface:
if accountDBType == 'developer':
self.accountDB = DeveloperAccountDB(self)
elif accountDBType == 'remote':
self.accountDB = RemoteAccountDB(self)
else:
self.notify.error('Invalid accountdb-type: ' + accountDBType)
def killConnection(self, connId, reason):
datagram = PyDatagram()
datagram.addServerHeader(
connId,
self.air.ourChannel,
CLIENTAGENT_EJECT)
datagram.addUint16(101)
datagram.addString(reason)
self.air.send(datagram)
def killConnectionFSM(self, connId):
fsm = self.connection2fsm.get(connId)
if not fsm:
self.notify.warning('Tried to kill connection %d for duplicate FSM, but none exists!' % connId)
return
self.killConnection(connId, 'An operation is already underway: ' + fsm.name)
def killAccount(self, accountId, reason):
self.killConnection(self.GetAccountConnectionChannel(accountId), reason)
def killAccountFSM(self, accountId):
fsm = self.account2fsm.get(accountId)
if not fsm:
self.notify.warning('Tried to kill account %d for duplicate FSM, but none exists!' % accountId)
return
self.killAccount(accountId, 'An operation is already underway: ' + fsm.name)
def runAccountFSM(self, fsmtype, *args):
sender = self.air.getAccountIdFromSender()
if not sender:
self.killAccount(sender, 'Client is not logged in.')
if sender in self.account2fsm:
self.killAccountFSM(sender)
return
self.account2fsm[sender] = fsmtype(self, sender)
self.account2fsm[sender].request('Start', *args)
def login(self, cookie, authKey):
self.notify.debug('Received login cookie %r from %d' % (cookie, self.air.getMsgSender()))
sender = self.air.getMsgSender()
# Time to check this login to see if its authentic
digest_maker = hmac.new(self.key)
digest_maker.update(cookie)
serverKey = digest_maker.hexdigest()
if serverKey == authKey:
# This login is authentic!
pass
else:
# This login is not authentic.
self.killConnection(sender, ' ')
if sender >> 32:
self.killConnection(sender, 'Client is already logged in.')
return
if sender in self.connection2fsm:
self.killConnectionFSM(sender)
return
self.connection2fsm[sender] = LoginAccountFSM(self, sender)
self.connection2fsm[sender].request('Start', cookie)
def requestAvatars(self):
self.notify.debug('Received avatar list request from %d' % (self.air.getMsgSender()))
self.runAccountFSM(GetAvatarsFSM)
def createAvatar(self, dna, index):
self.runAccountFSM(CreateAvatarFSM, dna, index)
def deleteAvatar(self, avId):
self.runAccountFSM(DeleteAvatarFSM, avId)
def setNameTyped(self, avId, name):
self.runAccountFSM(SetNameTypedFSM, avId, name)
def setNamePattern(self, avId, p1, f1, p2, f2, p3, f3, p4, f4):
self.runAccountFSM(SetNamePatternFSM, avId, [(p1, f1), (p2, f2),
(p3, f3), (p4, f4)])
def acknowledgeAvatarName(self, avId):
self.runAccountFSM(AcknowledgeNameFSM, avId)
def chooseAvatar(self, avId):
currentAvId = self.air.getAvatarIdFromSender()
accountId = self.air.getAccountIdFromSender()
if currentAvId and avId:
self.killAccount(accountId, 'A Toon is already chosen!')
return
elif not currentAvId and not avId:
# This isn't really an error, the client is probably just making sure
# none of its Toons are active.
return
if avId:
self.runAccountFSM(LoadAvatarFSM, avId)
else:
self.runAccountFSM(UnloadAvatarFSM, currentAvId)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 09:19:42 2018
@author: r.dewinter
"""
import numpy as np
def OSY(x):
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
x5 = x[4]
x6 = x[5]
f1 = -25*(x1-2)**2 - (x2-2)**2 - (x3-1)**2 - (x4-4)**2 - (x5-1)**2
f2 = x1**2 + x2**2 + x3**2 + x4**2 + x5**2 + x6**2
g1 = x1 + x2 - 2
g2 = 6 - x1 - x2
g3 = 2 - x2 + x1
g4 = 2 - x1 + 3*x2
g5 = 4 - (x3-3)**2 - x4
g6 = (x5-3)**2 + x6 -4
objectives = np.array([f1, f2])
constraints = np.array([g1,g2,g3,g4,g5,g6])
constraints = -1*constraints #transform for sacobra
return np.array([objectives, constraints])
|
from setuptools import setup, find_packages
with open('README.md') as readme_file:
README = readme_file.read()
setup_args = dict(
name='gitam',
version="0.3.1",
description='Useful tools to extract data from GITAM University websites.',
long_description_content_type="text/markdown",
long_description=README,
license='MIT',
packages=find_packages(),
author='Rohit Ganji',
author_email='grohit.2001@gmail.com',
keywords=['GITAM', 'GITAM University', 'Gandhi Institute of Technology and Management'],
url='https://github.com/rohitganji/gitam',
download_url='https://pypi.org/project/gitam/'
)
install_requires = [
'requests',
'bs4',
'pandas',
'matplotlib',
]
if __name__ == '__main__':
setup(**setup_args, install_requires=install_requires)
|
#!/usr/bin/env python3
from experiment_database_manager import ExperimentDatabaseManager
import gzip
import numpy as np
import pickle
import sys
import sql_credentials
import hplots.hgcal_analysis_plotter as hp
import hplots.trackml_plotter as tp
with gzip.open(sys.argv[1], 'rb') as f:
graphs, metadata = pickle.load(f)
type = 'hgcal'
if len(sys.argv) == 4:
type = sys.argv[3]
if type == 'hgcal':
plotter = hp.HGCalAnalysisPlotter()
elif type =='trackml':
plotter = tp.TrackMLPlotter()
else:
raise NotImplementedError("Error")
pdfpath = sys.argv[2]
plotter.add_data_from_analysed_graph_list(graphs, metadata)
plotter.write_to_pdf(pdfpath)
|
from .uresnet_dense import UResNet as DenseUResNet
from .uresnet_dense import SegmentationLoss as DenseSegmentationLoss
from .uresnet_sparse import UResNet as SparseUResNet
from .uresnet_sparse import SegmentationLoss as SparseSegmentationLoss
|
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.forms.models import modelform_factory, modelformset_factory, inlineformset_factory
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.loading import get_model
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.decorators import login_required
from copy import copy
from collections import OrderedDict
from competencies.models import *
from competencies.forms import ForkForm
from competencies import my_admin
from . import utils
def index(request):
return render_to_response('competencies/index.html',
{},
context_instance=RequestContext(request))
# --- Authorization views ---
def no_edit_permission(request, school_id):
"""Displays message that user does not have permission to make requested edits."""
school = Organization.objects.get(id=school_id)
return render_to_response('competencies/no_edit_permission.html',
{'school': school},
context_instance=RequestContext(request))
# --- Simple views, for exploring system without changing it: ---
def organizations(request):
my_organizations, editor_organizations = [], []
if request.user.is_authenticated():
my_organizations = Organization.objects.filter(owner=request.user)
editor_organizations = request.user.organization_set.all()
# Remove owned orgs from editor_organizations
editor_organizations = [org for org in editor_organizations if org not in my_organizations]
public_organizations = Organization.objects.filter(public=True)
return render_to_response('competencies/organizations.html',
{'my_organizations': my_organizations,
'editor_organizations': editor_organizations,
'public_organizations': public_organizations,
},
context_instance=RequestContext(request))
def organization(request, organization_id):
"""Displays subject areas and subdiscipline areas for a given organization."""
organization = Organization.objects.get(id=organization_id)
editors = organization.editors.all()
if organization.subjectarea_set.all():
can_fork = False
else:
can_fork = True
kwargs = get_visibility_filter(request.user, organization)
sas = organization.subjectarea_set.filter(**kwargs)
sdas = [sda for sa in sas for sda in sa.subdisciplinearea_set.filter(**kwargs)]
return render_to_response('competencies/organization.html',
{'organization': organization, 'subject_areas': sas,
'sdas': sdas, 'can_fork': can_fork,
'editors': editors,
},
context_instance=RequestContext(request))
def sa_summary(request, sa_id):
"""Shows a simple summary for a subject area."""
sa = SubjectArea.objects.get(id=sa_id)
organization = sa.organization
kwargs = get_visibility_filter(request.user, organization)
sdas, cas, eus = get_sda_ca_eu_elements(sa, kwargs)
return render_to_response('competencies/sa_summary.html',
{'subject_area': sa, 'organization': organization,
'sdas': sdas, 'cas': cas, 'eus': eus,},
context_instance=RequestContext(request))
def sa_summary_pdf(request, sa_id):
"""Return a pdf of the sa_summary page."""
#print('Generating pdf of sa_summary...')
sa = SubjectArea.objects.get(id=sa_id)
org = sa.organization
kwargs = get_visibility_filter(request.user, org)
sdas, cas, eus = get_sda_ca_eu_elements(sa, kwargs)
response = HttpResponse(content_type='application/pdf')
filename = 'sa_summary_%s.pdf' % sa.subject_area
response['Content-Disposition'] = 'attachment; filename=%s' % filename
from competencies.sa_summary_pdf import PDFTest
pdf_test = PDFTest(response)
pdf = pdf_test.makeSummary(org, sa, sdas, cas, eus)
return pdf
@login_required
def organization_admin_summary(request, organization_id):
"""See an administrative summmary of an organization. Restricted to owners of the org."""
organization = Organization.objects.get(id=organization_id)
# Make sure user owns this org.
if request.user != organization.owner:
return redirect(reverse('competencies:organizations'))
editors = organization.editors.all()
return render_to_response('competencies/organization_admin_summary.html',
{'organization': organization, 'editors': editors,
},
context_instance=RequestContext(request))
# --- Views for editing content. ---
@login_required
def organization_admin_edit(request, organization_id):
"""Administer an organization. Restricted to owners of the org."""
# DEV: This page will need a list of the organization's editors.
organization = Organization.objects.get(id=organization_id)
# Make sure user owns this org.
if request.user != organization.owner:
return redirect(reverse('competencies:organizations'))
if request.method != 'POST':
organization_form = OrganizationAdminForm(instance=organization)
else:
organization_form = OrganizationAdminForm(request.POST, instance=organization)
if organization_form.is_valid():
organization_form.save()
# If org has been made private, set all elements private.
if 'public' in organization_form.changed_data:
if not organization_form.cleaned_data.get('public'):
utils.cascade_visibility_down(organization, 'private')
# Make sure no organization owner was not removed from editors.
# DEV: Should prevent this from happening at all, by overriding form.save()?
if organization.owner not in organization.editors.all():
organization.editors.add(organization.owner)
# Redirect to summary page after processing form.
return redirect(reverse('competencies:organization_admin_summary', args=[organization_id]))
return render_to_response('competencies/organization_admin_edit.html',
{'organization': organization, 'organization_form': organization_form,
},
context_instance=RequestContext(request))
@login_required
def fork(request, organization_id):
"""Fork an existing school."""
forking_organization = Organization.objects.get(id=organization_id)
if request.method != 'POST':
fork_form = ForkForm()
else:
fork_form = ForkForm(request.POST)
if fork_form.is_valid():
original_org = Organization.objects.get(pk=request.POST['organization'])
utils.fork_organization(forking_organization, original_org)
return redirect(reverse('competencies:organization', args=[organization_id,]))
else:
#print('\n\ninvalid:', fork_form)
# Report error appropriately.
pass
return render_to_response('competencies/fork.html',
{'forking_organization': forking_organization,
'organizations': organizations,
'fork_form': fork_form,
},
context_instance=RequestContext(request))
@login_required
def edit_sa_summary(request, sa_id):
"""Edit the elements in sa_summary."""
# This should work for a given sa_id, or with no sa_id.
# Have an id, edit a subject area.
# No id, create a new subject area.
subject_area = SubjectArea.objects.get(id=sa_id)
organization = subject_area.organization
kwargs = get_visibility_filter(request.user, organization)
# Test if user allowed to edit this organization.
if not has_edit_permission(request.user, organization):
redirect_url = '/no_edit_permission/' + str(organization.id)
return redirect(redirect_url)
sdas, cas, eus = get_sda_ca_eu_elements(subject_area, kwargs)
# Respond to submitted data.
if request.method == 'POST':
# Store elements that have had their privacy setting changed,
# for processing after all forms have been processed.
privacy_changed = []
process_form(request, subject_area, 'sa', privacy_changed)
for sda in sdas:
process_form(request, sda, 'sda', privacy_changed)
for ca in cas:
process_form(request, ca, 'ca', privacy_changed)
for eu in eus:
process_form(request, eu, 'eu', privacy_changed)
# Cascade privacy settings appropriately.
# Change to private takes precedence, so process changes to public first.
changed_to_public = [element for element in privacy_changed if element.public]
changed_to_private = [element for element in privacy_changed if not element.public]
# Cascading public happens upwards. Setting an element public makes all its
# ancestors public.
for element in changed_to_public:
utils.cascade_public_up(element)
# Cascading private happens downwards. Setting an element private hides all
# its descendants.
for element in changed_to_private:
utils.cascade_visibility_down(element, 'private')
# If any privacy settings were changed, need to refresh elements
# to make sure forms are based on updated elements.
subject_area = SubjectArea.objects.get(id=sa_id)
organization = subject_area.organization
sdas, cas, eus = get_sda_ca_eu_elements(subject_area, kwargs)
# Redirect back to view page.
return redirect('/sa_summary/%s' % sa_id)
# Build forms. Not in an else clause, because even POST requests need
# forms re-generated.
sa_form = generate_form(subject_area, 'sa')
sda_forms = []
for sda in sdas:
sda_form = generate_form(sda, 'sda')
sda_form.my_id = sda.id
sda_forms.append(sda_form)
zipped_sda_forms = list(zip(sdas, sda_forms))
ca_forms = []
for ca in cas:
ca_form = generate_form(ca, 'ca')
ca_form.my_id = ca.id
ca_forms.append(ca_form)
zipped_ca_forms = list(zip(cas, ca_forms))
eu_forms = []
for eu in eus:
eu_form = generate_form(eu, 'eu')
eu_form.my_id = eu.id
eu_forms.append(eu_form)
zipped_eu_forms = list(zip(eus, eu_forms))
return render_to_response('competencies/edit_sa_summary.html',
{'subject_area': subject_area, 'organization': organization,
'sdas': sdas, 'cas': cas, 'eus': eus,
'sa_form': sa_form,
'zipped_sda_forms': zipped_sda_forms,
'zipped_ca_forms': zipped_ca_forms,
'zipped_eu_forms': zipped_eu_forms,
},
context_instance=RequestContext(request))
def edit_sa_summary_order(request, sa_id):
"""Modify the order of sdas, cas, and eus within a subject area."""
subject_area = SubjectArea.objects.get(id=sa_id)
organization = subject_area.organization
kwargs = get_visibility_filter(request.user, organization)
# Test if user allowed to edit this organization.
if not has_edit_permission(request.user, organization):
redirect_url = '/no_edit_permission/' + str(organization.id)
return redirect(redirect_url)
sdas, cas, eus = get_sda_ca_eu_elements(subject_area, kwargs)
return render_to_response('competencies/edit_sa_summary_order.html',
{'subject_area': subject_area, 'organization': organization,
'sdas': sdas, 'cas': cas, 'eus': eus,
},
context_instance=RequestContext(request))
def move_element(request, element_type, element_id, direction, sa_id):
"""Modify the position of an element within its set of elements."""
# Get the element whose position is being changed, get its order,
# and modify the order if appropriate.
sa = SubjectArea.objects.get(id=sa_id)
edit_order_url = reverse('competencies:edit_sa_summary_order', args=[sa.id])
object_to_move = get_model('competencies', element_type).objects.get(id=element_id)
order = get_parent_order(object_to_move)
# Make sure user can edit this organization.
if request.user not in sa.organization.editors.all():
redirect_url = reverse('competencies:index')
return redirect(redirect_url)
# If element_type is ca, get group of cas with no sda or same sda,
# then get ca to switch with.
if element_type == 'CompetencyArea':
ca = object_to_move
if not ca.subdiscipline_area:
ca_group = sa.competencyarea_set.filter(subdiscipline_area=None)
else:
ca_group = sa.competencyarea_set.filter(subdiscipline_area=ca.subdiscipline_area)
for index, cand_ca in enumerate(ca_group):
if cand_ca == ca:
ca_index = index
if direction == 'up' and ca_index > 0:
ca_target = ca_group[ca_index-1]
elif direction == 'down' and ca_index < len(ca_group)-1:
ca_target = ca_group[ca_index+1]
else:
return(redirect(edit_order_url))
# Get indices in order, and swap positions.
original_index = order.index(ca.id)
target_index = order.index(ca_target.id)
order[original_index], order[target_index] = order[target_index], order[original_index]
set_parent_order(object_to_move, order)
return(redirect(edit_order_url))
# Get index of element_id, switch places with previous or next element.
index = order.index(int(element_id))
if direction == 'up' and index > 0:
order[index], order[index-1] = order[index-1], order[index]
set_parent_order(object_to_move, order)
elif direction == 'down' and index < len(order) - 1:
order[index], order[index+1] = order[index+1], order[index]
set_parent_order(object_to_move, order)
return redirect(edit_order_url)
def delete_element(request, element_type, element_id):
"""Confirm that user wants to delete an element, and all its descendants.
Option to cancel, which will go back to edit_sa_summary,
or delete element which will then redirect to sa_summary."""
# DEV: Can use a single delete_element page.
# GET request shows confirmation form; POST request follows through on delete.
# DEV: Should pass element type alias, which should be used on submit button.
# DEV: This needs to be generalized.
eu = EssentialUnderstanding.objects.get(id=element_id)
ca = eu.competency_area
sa = ca.subject_area
org = sa.organization
if request.method == 'POST' and request.POST['confirm_delete']:
eu.delete()
return redirect(reverse('competencies:sa_summary', args=[sa.id,]))
return render_to_response('competencies/delete_element.html',
{'organization': org, 'subject_area': sa,
'eu': eu,
},
context_instance=RequestContext(request))
def get_sda_ca_eu_elements(subject_area, kwargs):
"""Get all sdas, cas, and eus associated with a subject area."""
sdas = subject_area.subdisciplinearea_set.filter(**kwargs)
cas = subject_area.competencyarea_set.filter(**kwargs)
eus = []
for ca in cas:
for eu in ca.essentialunderstanding_set.filter(**kwargs):
eus.append(eu)
return (sdas, cas, eus)
def process_form(request, instance, element_type, privacy_changed):
"""Process a form for a single element."""
prefix = '%s_form_%d' % (element_type, instance.id)
if element_type == 'sa':
form = SubjectAreaForm(request.POST, instance=instance)
elif element_type == 'sda':
form = SubdisciplineAreaForm(request.POST, prefix=prefix, instance=instance)
elif element_type == 'ca':
form = CompetencyAreaForm(request.POST, prefix=prefix, instance=instance)
elif element_type == 'eu':
form = EssentialUnderstandingForm(request.POST, prefix=prefix, instance=instance)
if form.is_valid():
modified_element = form.save()
# If privacy setting changed, add to list for processing.
if 'public' in form.changed_data:
privacy_changed.append(modified_element)
return form
def generate_form(instance, element_type):
"""Generate a form for a single element."""
prefix = '%s_form_%d' % (element_type, instance.id)
if element_type == 'sa':
return SubjectAreaForm(instance=instance)
elif element_type == 'sda':
return SubdisciplineAreaForm(prefix=prefix, instance=instance)
elif element_type == 'ca':
return CompetencyAreaForm(prefix=prefix, instance=instance)
elif element_type == 'eu':
return EssentialUnderstandingForm(prefix=prefix, instance=instance)
def new_sa(request, org_id):
"""Create a new subject area for a given organization."""
organization = Organization.objects.get(id=org_id)
# Test if user allowed to edit this organization.
if not has_edit_permission(request.user, organization):
redirect_url = '/no_edit_permission/' + str(organization.id)
return redirect(redirect_url)
if request.method == 'POST':
sa_form = SubjectAreaForm(request.POST)
if sa_form.is_valid():
new_sa = sa_form.save(commit=False)
new_sa.organization = organization
new_sa.save()
return redirect('/edit_sa_summary/%d' % new_sa.id)
sa_form = SubjectAreaForm()
return render_to_response('competencies/new_sa.html',
{'organization': organization, 'sa_form': sa_form,},
context_instance=RequestContext(request))
def new_sda(request, sa_id):
"""Create a new subdiscipline area for a given subject area."""
sa = SubjectArea.objects.get(id=sa_id)
organization = sa.organization
# Test if user allowed to edit this organization.
if not has_edit_permission(request.user, organization):
redirect_url = '/no_edit_permission/' + str(organization.id)
return redirect(redirect_url)
if request.method == 'POST':
sda_form = SubdisciplineAreaForm(request.POST)
if sda_form.is_valid():
new_sda = sda_form.save(commit=False)
new_sda.subject_area = sa
new_sda.save()
return redirect('/edit_sa_summary/%d' % sa.id)
sda_form = SubdisciplineAreaForm()
return render_to_response('competencies/new_sda.html',
{'organization': organization, 'sa': sa,
'sda_form': sda_form,},
context_instance=RequestContext(request))
def new_ca(request, sa_id):
"""Create a new competency area for a given general subject area."""
sa = SubjectArea.objects.get(id=sa_id)
organization = sa.organization
# Test if user allowed to edit this organization.
if not has_edit_permission(request.user, organization):
redirect_url = '/no_edit_permission/' + str(organization.id)
return redirect(redirect_url)
if request.method == 'POST':
ca_form = CompetencyAreaForm(request.POST)
if ca_form.is_valid():
new_ca = ca_form.save(commit=False)
new_ca.subject_area = sa
new_ca.save()
return redirect('/edit_sa_summary/%d' % sa.id)
ca_form = CompetencyAreaForm()
return render_to_response('competencies/new_ca.html',
{'organization': organization, 'sa': sa, 'ca_form': ca_form,},
context_instance=RequestContext(request))
def new_sda_ca(request, sda_id):
"""Create a new competency area for a given subdiscipline area."""
sda = SubdisciplineArea.objects.get(id=sda_id)
sa = sda.subject_area
organization = sa.organization
# Test if user allowed to edit this organization.
if not has_edit_permission(request.user, organization):
redirect_url = '/no_edit_permission/' + str(organization.id)
return redirect(redirect_url)
if request.method == 'POST':
ca_form = CompetencyAreaForm(request.POST)
if ca_form.is_valid():
new_ca = ca_form.save(commit=False)
new_ca.subject_area = sa
new_ca.subdiscipline_area = sda
new_ca.save()
return redirect('/edit_sa_summary/%d' % sa.id)
ca_form = CompetencyAreaForm()
return render_to_response('competencies/new_sda_ca.html',
{'organization': organization, 'sa': sa, 'sda': sda,
'ca_form': ca_form,},
context_instance=RequestContext(request))
def new_eu(request, ca_id):
"""Create a new essential understanding for given ca."""
ca = CompetencyArea.objects.get(id=ca_id)
sa = ca.subject_area
organization = sa.organization
# Test if user allowed to edit this organization.
if not has_edit_permission(request.user, organization):
redirect_url = '/no_edit_permission/' + str(organization.id)
return redirect(redirect_url)
if request.method == 'POST':
eu_form = EssentialUnderstandingForm(request.POST)
if eu_form.is_valid():
new_eu = eu_form.save(commit=False)
new_eu.competency_area = ca
new_eu.save()
return redirect('/edit_sa_summary/%d' % sa.id)
eu_form = EssentialUnderstandingForm()
return render_to_response('competencies/new_eu.html',
{'organization': organization, 'sa': sa, 'ca': ca,
'eu_form': eu_form,},
context_instance=RequestContext(request))
# helper methods to get elements of the system.
def get_visibility_filter(user, organization):
# Get filter for visibility, based on logged-in status.
if user.is_authenticated() and user in organization.editors.all():
kwargs = {}
else:
kwargs = {'{0}'.format('public'): True}
return kwargs
# --- Edit views, for editing parts of the system ---
def has_edit_permission(user, organization):
"""Checks whether given user has permission to edit given object.
"""
# Returns True if allowed to edit, False if not allowed to edit
if user in organization.editors.all():
return True
else:
return False
# Methods to deal with ordering issue around order_with_respect_to
def check_parent_order(child_object, correct_order):
"""Hack to address ordering issue around order_with_respect_to."""
if get_parent_order(child_object) != correct_order:
set_parent_order(child_object, correct_order)
def get_parent_order(child_object):
parent_object = child_object.get_parent()
# DEV: May make ca.get_parent() always return sa?
if parent_object.__class__.__name__ == 'SubdisciplineArea':
parent_object = parent_object.subject_area
order_method = 'get_' + child_object.__class__.__name__.lower() + '_order'
parent_order = getattr(parent_object, order_method)()
return parent_order
def set_parent_order(child_object, order):
parent_object = child_object.get_parent()
# DEV: May make ca.get_parent() always return sa?
if parent_object.__class__.__name__ == 'SubdisciplineArea':
parent_object = parent_object.subject_area
order_method = 'set_' + child_object.__class__.__name__.lower() + '_order'
getattr(parent_object, order_method)(order)
@login_required
def new_organization(request):
"""Creates a new organization."""
if request.method == 'POST':
new_organization_form = OrganizationForm(request.POST)
if new_organization_form.is_valid():
new_organization = new_organization_form.save(commit=False)
new_organization.owner = request.user
new_organization.save()
new_organization.editors.add(request.user)
return redirect(reverse('competencies:organizations'))
new_organization_form = OrganizationForm()
return render_to_response('competencies/new_organization.html',
{'new_organization_form': new_organization_form,},
context_instance=RequestContext(request))
|
# Add main directory to enable imports
if __name__ == '__main__' :
import os
os.sys.path.append(os.path.abspath('../..'))
from libs.gui.iterate_file import IterateFile
import wx
import numpy as np
from operator import itemgetter
#########################################################################
class SpectralScanViewer (IterateFile) :
def IniLoad(self, data_file) :
# Find out what kind of pulse shapes are saved in the file
pulse_shaping_option = str( data_file["settings/ODD_GA/pulse_shaping_option"][...] )
self.plot_ampl_phase = ( pulse_shaping_option == "amplitude and phase")
if not self.plot_ampl_phase :
self.plot_title = { "amplitude only" : "Amplitude shapes",
"phase only" : "Phase shapes" }[pulse_shaping_option]
def UpdateFrame (self, event=None) :
# for each individual (within a given iteration) load fitness and pulse shape
loaded_data = [
( ind_grp["fitness"][...].sum(),
ind_grp["pulse_shape"][...] )
for ind_grp in self.GetCurrentFrame()["individuals"].itervalues()
]
# Arrange by fitness
loaded_data.sort(key=itemgetter(0), reverse=True)
loaded_data = loaded_data[:5]
# Break into fitness and pulse_shape
fitness, pulse_shapes = zip(*loaded_data)
# Displaying the pulse shapes
self.fig.clear()
self.fig.patch.set_facecolor('grey')
if self.plot_ampl_phase :
# Display amplitude and phase separately
ampl_axes = self.fig.add_subplot(211, axisbg='grey')
ampl_axes.set_title ("Amplitude")
phase_axes = self.fig.add_subplot(212, axisbg='grey')
phase_axes.set_title ("Phase")
for pulse in pulse_shapes :
ampl_axes.plot ( pulse[:pulse.size/2] )
phase_axes.plot ( pulse[pulse.size/2:] )
ampl_axes.legend( ["%.2e" % f for f in fitness] )
else :
# Phase/Ampl shaping only
axes = self.fig.add_subplot(111, axisbg='grey')
for pulse in pulse_shapes :
axes.plot (pulse)
axes.legend( ["%.2e" % f for f in fitness] )
axes.set_title (self.plot_title)
self.canvas.draw()
#########################################################################
if __name__ == '__main__' :
# Check weather the filename was given as a command line argument
import sys
if len(sys.argv) == 2 : filename = sys.argv[1]
else : filename = None
app = wx.App ()
SpectralScanViewer ("optimization_iterations", filename=filename)
app.MainLoop ()
|
import json
import os
from django.conf.urls import include, url
from django.db import models
from django.test import TestCase, override_settings
from django.conf import settings
from rest_framework import status
from rest_framework.response import Response
from rest_framework.test import APIRequestFactory
from rest_framework.viewsets import GenericViewSet
from unittest import mock
X_JWT_PAYLOAD=json.dumps({'uid': '1234abc', 'exp': 1722200316, 'iat': 1622193116, 'app_id': 'app_1234567'})
class CBVAllViewSetsTestCase(TestCase):
'''
测试类视图装饰器 access_control_cbv_all
'''
@classmethod
def setUpClass(cls):
settings.SPARROW_AUTHENTICATION = {"USER_CLASS_PATH": "sparrow_cloud.auth.user.User"}
# settings.SC_SKIP_ACCESS_CONTROL = False
os.environ.setdefault("SC_ACCESS_CONTROL_SVC","ac-svc:8001")
os.environ.setdefault("SC_ACCESS_CONTROL_API","/api/ac")
@classmethod
def tearDownClass(cls):
pass
@mock.patch('sparrow_cloud.access_control.access_verify.rest_client.get', return_value={"has_perm": False})
def test_access_control_cbv_all_no_perm(self, rest_client_get):
# 不跳过访问控制,没权限访问
settings.SC_SKIP_ACCESS_CONTROL = False
from sparrow_cloud.access_control.decorators import access_control_cbv_all
class Action(models.Model):
pass
@access_control_cbv_all("SparrowAdmin")
class CBVAllActionViewSet(GenericViewSet):
queryset = Action.objects.all()
def list(self, request, *args, **kwargs):
# import pdb; pdb.set_trace()
response = Response()
response.view = self
return response
view = CBVAllActionViewSet.as_view(actions={
'get': 'list',
})
factory = APIRequestFactory(HTTP_X_JWT_PAYLOAD=X_JWT_PAYLOAD)
response = view(factory.get('/'))
assert response.status_code == status.HTTP_403_FORBIDDEN
@mock.patch('sparrow_cloud.access_control.access_verify.rest_client.get', return_value={"has_perm": True})
def test_access_control_cbv_all_allow(self, rest_client_get):
# 不跳过访问控制,有权限访问
settings.SC_SKIP_ACCESS_CONTROL = False
from sparrow_cloud.access_control.decorators import access_control_cbv_all
class Action(models.Model):
pass
@access_control_cbv_all("SparrowAdmin")
class CBVAllActionViewSet(GenericViewSet):
queryset = Action.objects.all()
def list(self, request, *args, **kwargs):
response = Response()
response.view = self
return response
view = CBVAllActionViewSet.as_view(actions={
'get': 'list',
})
factory = APIRequestFactory(HTTP_X_JWT_PAYLOAD=X_JWT_PAYLOAD)
response = view(factory.get('/'))
assert response.status_code == status.HTTP_200_OK
# @mock.patch('sparrow_cloud.access_control.access_verify.rest_client.get', return_value={"has_perm": False})
def test_viewset_access_control_cbv_all_skip_ac(self):
# 跳过访问控制,没有mock远程,也没有认证
settings.SC_SKIP_ACCESS_CONTROL = True
from sparrow_cloud.access_control.decorators import access_control_cbv_all
class Action(models.Model):
pass
@access_control_cbv_all("SparrowAdmin")
class CBVAllActionViewSet(GenericViewSet):
queryset = Action.objects.all()
def list(self, request, *args, **kwargs):
# import pdb; pdb.set_trace()
response = Response()
response.view = self
return response
view = CBVAllActionViewSet.as_view(actions={
'get': 'list',
})
factory = APIRequestFactory()# 无认证,HTTP_X_JWT_PAYLOAD=X_JWT_PAYLOAD
response = view(factory.get('/'))
assert response.status_code == status.HTTP_200_OK
class CBVMethodViewSetsTestCase(TestCase):
'''
测试类视图装饰器 access_control_cbv_method
'''
@classmethod
def setUpClass(cls):
settings.SPARROW_AUTHENTICATION = {"USER_CLASS_PATH": "sparrow_cloud.auth.user.User"}
# settings.SC_SKIP_ACCESS_CONTROL = False
os.environ.setdefault("SC_ACCESS_CONTROL_SVC","ac-svc:8001")
os.environ.setdefault("SC_ACCESS_CONTROL_API","/api/ac")
@classmethod
def tearDownClass(cls):
pass
@mock.patch('sparrow_cloud.access_control.access_verify.rest_client.get', return_value={"has_perm": False})
def test_viewset_access_control_cbv_method(self, rest_client_mock):
# 不跳过访问控制,无权限访问
settings.SC_SKIP_ACCESS_CONTROL = False
from sparrow_cloud.access_control.decorators import access_control_cbv_method
class Action(models.Model):
pass
@access_control_cbv_method({
"get": "AdminGet",
"post": "AdminPost",
"delete": "AdminDelete",
"put": "AdminPut",
})
class CBVAllActionViewSet(GenericViewSet):
queryset = Action.objects.all()
def list(self, request, *args, **kwargs):
response = Response()
response.view = self
return response
def create(self, request, *args, **kwargs):
response = Response({"message": "ok"})
response.view = self
return response
def destroy(self, request, *args, **kwargs):
response = Response({"message": "ok"})
response.view = self
return response
def partial_update(self, request, *args, **kwargs):
response = Response({"message": "ok"})
response.view = self
return response
view = CBVAllActionViewSet.as_view(actions={
'get': 'list',
'post': 'create',
'delete': 'destroy',
'put': 'partial_update',
})
factory = APIRequestFactory(HTTP_X_JWT_PAYLOAD=X_JWT_PAYLOAD)
response = view(factory.get('/'))
assert response.status_code == status.HTTP_403_FORBIDDEN
response = view(factory.post('/'))
assert response.status_code == status.HTTP_403_FORBIDDEN
response = view(factory.delete('/'))
assert response.status_code == status.HTTP_403_FORBIDDEN
response = view(factory.put('/'))
assert response.status_code == status.HTTP_403_FORBIDDEN
@mock.patch('sparrow_cloud.access_control.access_verify.rest_client.get', return_value={"has_perm": True})
def test_access_control_cbv_method_allow(self, rest_client_mock):
# 不跳过访问控制,有权限访问
settings.SC_SKIP_ACCESS_CONTROL = False
from sparrow_cloud.access_control.decorators import access_control_cbv_method
class Action(models.Model):
pass
@access_control_cbv_method({
"get": "AdminGet",
"post": "AdminPost",
"delete": "AdminDelete",
"put": "AdminPut",
})
class CBVAllActionViewSet(GenericViewSet):
queryset = Action.objects.all()
def list(self, request, *args, **kwargs):
response = Response()
response.view = self
return response
def create(self, request, *args, **kwargs):
response = Response({"message": "ok"})
response.view = self
return response
def destroy(self, request, *args, **kwargs):
response = Response({"message": "ok"})
response.view = self
return response
def partial_update(self, request, *args, **kwargs):
response = Response({"message": "ok"})
response.view = self
return response
view = CBVAllActionViewSet.as_view(actions={
'get': 'list',
'post': 'create',
'delete': 'destroy',
'put': 'partial_update',
})
factory = APIRequestFactory(HTTP_X_JWT_PAYLOAD=X_JWT_PAYLOAD)
response = view(factory.get('/'))
assert response.status_code == status.HTTP_200_OK
response = view(factory.post('/'))
assert response.status_code == status.HTTP_200_OK
response = view(factory.delete('/'))
assert response.status_code == status.HTTP_200_OK
response = view(factory.put('/'))
assert response.status_code == status.HTTP_200_OK
# @mock.patch('sparrow_cloud.access_control.access_verify.rest_client.get', return_value={"has_perm": False})
def test_viewset_access_control_cbv_method_skip_ac(self):
# 跳过访问控制,没有mock远程,也没有认证
settings.SC_SKIP_ACCESS_CONTROL = True
from sparrow_cloud.access_control.decorators import access_control_cbv_method
class Action(models.Model):
pass
@access_control_cbv_method({
"get": "AdminGet",
"post": "AdminPost",
"delete": "AdminDelete",
"put": "AdminPut",
})
class CBVAllActionViewSet(GenericViewSet):
queryset = Action.objects.all()
def list(self, request, *args, **kwargs):
response = Response()
response.view = self
return response
def create(self, request, *args, **kwargs):
response = Response({"message": "ok"})
response.view = self
return response
def destroy(self, request, *args, **kwargs):
response = Response({"message": "ok"})
response.view = self
return response
def partial_update(self, request, *args, **kwargs):
response = Response({"message": "ok"})
response.view = self
return response
view = CBVAllActionViewSet.as_view(actions={
'get': 'list',
'post': 'create',
'delete': 'destroy',
'put': 'partial_update',
})
factory = APIRequestFactory()
response = view(factory.get('/'))
assert response.status_code == status.HTTP_200_OK
response = view(factory.post('/'))
assert response.status_code == status.HTTP_200_OK
response = view(factory.delete('/'))
assert response.status_code == status.HTTP_200_OK
response = view(factory.put('/'))
assert response.status_code == status.HTTP_200_OK
|
import argparse
import logging
import time
import cv2
import numpy as np
from estimator import TfPoseEstimator
from networks import get_graph_path, model_wh
import pygame
import pygame.midi
pygame.init()
pygame.midi.init()
logger = logging.getLogger('TfPoseEstimator-WebCam')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
# time param
start_time = 0
speed = 0.5
# dot param
d_circle = 30
dot_line = 0
# midi setting
instrument = 0
port = 1
volume = 127
note_list = []
def get_pentatonic_scale(note):
# C
if note%5 == 0:
out_note = note//5*12
# D#
if note%5 == 1:
out_note = note//5*12 + 3
# F
if note%5 == 2:
out_note = note//5*12 + 5
# G
if note%5 == 3:
out_note = note//5*12 + 7
# A#
if note%5 == 4:
out_note = note//5*12 + 10
out_note += 60;
while out_note > 127:
out_note -= 128
return out_note
def human_sequencer(src):
global start_time
global dot_line
global note_list
image_h, image_w = src.shape[:2]
h_max = int(image_h / d_circle)
w_max = int(image_w / d_circle)
# create blank image
npimg_target = np.zeros((image_h, image_w, 3), np.uint8)
dot_color = [[0 for i in range(h_max)] for j in range(w_max)]
# make dot information from ndarray
for y in range(0, h_max):
for x in range(0, w_max):
dot_color[x][y] = src[y*d_circle][x*d_circle]
# move dot
current_time = time.time() - start_time
while time.time() - start_time > speed:
start_time += speed
dot_line += 1
if dot_line > w_max-1:
dot_line = 0
# sound off
for note in note_list:
midiOutput.note_off(note,volume)
# sound on
note_list = []
for y in range(0, h_max):
if dot_color[dot_line][y][0] == 255:
note_list.append(get_pentatonic_scale(y))
for note in note_list:
midiOutput.note_on(note,volume)
# draw dot
for y in range(0, h_max):
for x in range(0, w_max):
center = (int(x * d_circle + d_circle * 0.5), int(y * d_circle + d_circle * 0.5))
if x == dot_line:
if dot_color[x][y][0] == 255:
cv2.circle(npimg_target, center, int(d_circle/2) , [255-(int)(dot_color[x][y][0]),255-(int)(dot_color[x][y][1]),255-(int)(dot_color[x][y][2])] , thickness=-1, lineType=8, shift=0)
else:
cv2.circle(npimg_target, center, int(d_circle/2) , [255,255,255] , thickness=-1, lineType=8, shift=0)
else:
cv2.circle(npimg_target, center, int(d_circle/2) , [(int)(dot_color[x][y][0]),(int)(dot_color[x][y][1]),(int)(dot_color[x][y][2])] , thickness=-1, lineType=8, shift=0)
return npimg_target
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tf-pose-estimation realtime webcam')
parser.add_argument('--camera', type=int, default=0)
parser.add_argument('--zoom', type=float, default=1.0)
parser.add_argument('--resolution', type=str, default='432x368', help='network input resolution. default=432x368')
parser.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin')
parser.add_argument('--show-process', type=bool, default=False,
help='for debug purpose, if enabled, speed for inference is dropped.')
args = parser.parse_args()
print("midi devices")
for id in range(pygame.midi.get_count()):
print(pygame.midi.get_device_info(id))
midiOutput = pygame.midi.Output(port, 1)
midiOutput.set_instrument(instrument)
logger.debug('initialization %s : %s' % (args.model, get_graph_path(args.model)))
w, h = model_wh(args.resolution)
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
logger.debug('cam read+')
cam = cv2.VideoCapture(args.camera)
ret_val, image = cam.read()
logger.info('cam image=%dx%d' % (image.shape[1], image.shape[0]))
start_time = time.time()
while True:
ret_val, image = cam.read()
logger.debug('image preprocess+')
if args.zoom < 1.0:
canvas = np.zeros_like(image)
img_scaled = cv2.resize(image, None, fx=args.zoom, fy=args.zoom, interpolation=cv2.INTER_LINEAR)
dx = (canvas.shape[1] - img_scaled.shape[1]) // 2
dy = (canvas.shape[0] - img_scaled.shape[0]) // 2
canvas[dy:dy + img_scaled.shape[0], dx:dx + img_scaled.shape[1]] = img_scaled
image = canvas
elif args.zoom > 1.0:
img_scaled = cv2.resize(image, None, fx=args.zoom, fy=args.zoom, interpolation=cv2.INTER_LINEAR)
dx = (img_scaled.shape[1] - image.shape[1]) // 2
dy = (img_scaled.shape[0] - image.shape[0]) // 2
image = img_scaled[dy:image.shape[0], dx:image.shape[1]]
logger.debug('image process+')
humans = e.inference(image)
logger.debug('postprocess+')
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
image = human_sequencer(image)
logger.debug('show+')
cv2.imshow('tf-pose-estimation result', image)
if cv2.waitKey(1) == 27: # ESC key
break
logger.debug('finished+')
cv2.destroyAllWindows()
del midiOutput
pygame.midi.quit()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import logging
import re
from sqlite3 import OperationalError
import arrow
from builtins import *
from flask import request
from peewee import fn, OperationalError
from requests import RequestException
from requests.auth import HTTPBasicAuth
from retry import retry
from nzbhydra import config, databaseLock
from nzbhydra import webaccess
from nzbhydra.database import IndexerSearch, IndexerApiAccess, IndexerStatus, Indexer
from nzbhydra.database import InterfaceError
from nzbhydra.exceptions import IndexerResultParsingException, IndexerAuthException, IndexerAccessException
from nzbhydra.log import removeSensitiveData
from nzbhydra.nzb_search_result import NzbSearchResult
QueriesExecutionResult = collections.namedtuple("QueriesExecutionResult", "didsearch results indexerSearchEntry indexerApiAccessEntry indexerStatus total loaded_results total_known has_more rejected")
IndexerProcessingResult = collections.namedtuple("IndexerProcessingResult", "entries queries total total_known has_more rejected")
titleRegex = re.compile("(\w[\w']*\w|\w)")
class SearchModule(object):
logger = logging.getLogger('root')
# regarding quality:
# possibly use newznab qualities as base, map for other indexers (nzbclub etc)
def __init__(self, settings):
self.settings = settings
self.module = "Abstract search module"
self.supports_queries = True
self.needs_queries = False
self.category_search = True # If true the indexer supports searching in a given category (possibly without any query or id)
self.limit = 100
self.supportedFilters = []
self.supportsNot = None
self.indexerDb = None
def __repr__(self):
return self.name
@property
def indexer(self):
if self.indexerDb is None:
self.indexerDb = Indexer.get(fn.lower(Indexer.name) == self.settings.name.lower())
return self.indexerDb
@property
def host(self):
return self.settings.host
@property
def name(self):
return self.settings.name
@property
def score(self):
return self.settings.score
@property
def search_ids(self):
if "search_ids" not in self.settings.keys():
self.error('Search IDs property not set. Please open the config for this indexer and click "Check capabilities"')
return []
return self.settings.search_ids
@property
def searchTypes(self):
if "searchTypes" not in self.settings.keys():
self.error('Search types property not set. Please open the config for this indexer and click "Check capabilities"')
return []
return self.settings.searchTypes
@property
def generate_queries(self):
return True # TODO pass when used check for internal vs external
# return self.indexer.settings.get("generate_queries", True) # If true and a search by movieid or tvdbid or rid is done then we attempt to find the title and generate queries for indexers which don't support id-based searches
def search(self, search_request):
self.info("Starting search")
if search_request.type == "tv":
if search_request.query is None and search_request.identifier_key is None and self.needs_queries:
self.error("TV search without query or id or title is not possible with this indexer")
return QueriesExecutionResult(didsearch=False, results=[], indexerSearchEntry=None, indexerApiAccessEntry=None, indexerStatus=None, total=0, loaded_results=0, total_known=True, has_more=False, rejected=self.getRejectedCountDict())
if search_request.query is None and not self.generate_queries:
self.error("TV search is not possible with this provideer because query generation is disabled")
if search_request.identifier_key in self.search_ids:
# Best case, we can search using the ID
urls = self.get_showsearch_urls(search_request)
elif search_request.title is not None:
# If we cannot search using the ID we generate a query using the title provided by the GUI
search_request.query = search_request.title
urls = self.get_showsearch_urls(search_request)
elif search_request.query is not None:
# Simple case, just a regular raw search but in movie category
urls = self.get_showsearch_urls(search_request)
else:
# Just show all the latest tv releases
urls = self.get_showsearch_urls(search_request)
elif search_request.type == "movie":
if search_request.query is None and search_request.title is None and search_request.identifier_key is None and self.needs_queries:
self.error("Movie search without query or IMDB id or title is not possible with this indexer")
return QueriesExecutionResult(didsearch=False, results=[], indexerSearchEntry=None, indexerApiAccessEntry=None, indexerStatus=None, total=0, loaded_results=0, total_known=True, has_more=False, rejected=self.getRejectedCountDict())
if search_request.query is None and not self.generate_queries:
self.error("Movie search is not possible with this provideer because query generation is disabled")
if search_request.identifier_key is not None and "imdbid" in self.search_ids:
# Best case, we can search using IMDB id
urls = self.get_moviesearch_urls(search_request)
elif search_request.title is not None:
# If we cannot search using the ID we generate a query using the title provided by the GUI
search_request.query = search_request.title
urls = self.get_moviesearch_urls(search_request)
elif search_request.query is not None:
# Simple case, just a regular raw search but in movie category
urls = self.get_moviesearch_urls(search_request)
else:
# Just show all the latest movie releases
urls = self.get_moviesearch_urls(search_request)
elif search_request.type == "ebook":
urls = self.get_ebook_urls(search_request)
elif search_request.type == "audiobook":
urls = self.get_audiobook_urls(search_request)
elif search_request.type == "comic":
urls = self.get_comic_urls(search_request)
elif search_request.type == "anime":
urls = self.get_anime_urls(search_request)
else:
urls = self.get_search_urls(search_request)
queries_execution_result = self.execute_queries(urls, search_request)
return queries_execution_result
# Access to most basic functions
def get_search_urls(self, search_request):
# return url(s) to search. Url is then retrieved and result is returned if OK
# we can return multiple urls in case a module needs to make multiple requests (e.g. when searching for a show
# using general queries
return []
def get_showsearch_urls(self, search_request):
# to extend
# if module supports it, search specifically for show, otherwise make sure we create a query that searches
# for for s01e01, 1x1 etc
return []
def get_moviesearch_urls(self, search_request):
# to extend
# if module doesnt support it possibly use (configurable) size restrictions when searching
return []
def get_ebook_urls(self, search_request):
# to extend
# if module doesnt support it possibly use (configurable) size restrictions when searching
return []
def get_audiobook_urls(self, search_request):
# to extend
# if module doesnt support it possibly use (configurable) size restrictions when searching
return []
def get_comic_urls(self, search_request):
# to extend
# if module doesnt support it possibly use (configurable) size restrictions when searching
return []
def get_anime_urls(self, search_request):
# to extend
# if module doesnt support it possibly use (configurable) size restrictions when searching
return []
def get_details_link(self, guid):
return ""
def get_entry_by_id(self, guid, title):
# to extend
# Returns an NzbSearchResult for the given GUID
return None
def create_nzb_search_result(self):
result = NzbSearchResult(indexer=self.name, indexerscore=self.score, attributes=[{"name": "hydraIndexerName", "value": self.settings.name},
{"name": "hydraIndexerHost", "value": self.settings.host},
{"name": "hydraIndexerScore", "value": self.settings.score}])
return result
@staticmethod
def getRejectedCountDict():
return {
"the results were passworded": 0,
"the title contained a forbidden word": 0,
"a required word was missing in the title": 0,
"the required regex was not found in the title": 0,
"the forbidden regex was found in the title": 0,
"they were posted in a forbidden group": 0,
"they were posted by a forbidden poster": 0,
"they had the wrong size": 0,
"they had the wrong age": 0,
"they were missing necessary attributes": 0,
"their category is to be ignored": 0
}
def accept_result(self, nzbSearchResult, searchRequest, supportedFilters):
global titleRegex
# Allows the implementations to check against one general rule if the search result is ok or shall be discarded
if config.settings.searching.ignorePassworded and nzbSearchResult.passworded:
return False, "passworded results shall be ignored", "the results were passworded"
# Forbidden and required words are handled differently depending on if they contain a dash or dot. If yes we do a simple search, otherwise a word based comparison
for word in searchRequest.forbiddenWords:
if "-" in word or "." in word or "nzbgeek" in self.settings.host: # NZBGeek must be handled here because it only allows 12 words at all so it's possible that not all words were ignored
if word.strip().lower() in nzbSearchResult.title.lower():
return False, '"%s" is in the list of ignored words or excluded by the query' % word, "the title contained a forbidden word"
elif word.strip().lower() in titleRegex.findall(nzbSearchResult.title.lower()):
return False, '"%s" is in the list of ignored words or excluded by the query' % word, "the title contained a forbidden word"
if searchRequest.requiredWords and len(searchRequest.requiredWords) > 0:
foundRequiredWord = False
titleWords = titleRegex.findall(nzbSearchResult.title.lower())
for word in searchRequest.requiredWords:
if "-" in word or "." in word:
if word.strip().lower() in nzbSearchResult.title.lower():
foundRequiredWord = True
break
elif word.strip().lower() in titleWords:
foundRequiredWord = True
break
if not foundRequiredWord:
return False, 'None of the required words is contained in the title "%s"' % nzbSearchResult.title, "a required word was missing in the title"
applyRestrictionsGlobal = config.settings.searching.applyRestrictions == "both" or (config.settings.searching.applyRestrictions == "internal" and searchRequest.internal) or (config.settings.searching.applyRestrictions == "external" and not searchRequest.internal)
applyRestrictionsCategory = searchRequest.category.category.applyRestrictions == "both" or (searchRequest.category.category.applyRestrictions == "internal" and searchRequest.internal) or (searchRequest.category.category.applyRestrictions == "external" and not searchRequest.internal)
if (searchRequest.category.category.requiredRegex and applyRestrictionsCategory and not re.search(searchRequest.category.category.requiredRegex, nzbSearchResult.title.lower())) \
or (config.settings.searching.requiredRegex and applyRestrictionsGlobal and not re.search(config.settings.searching.requiredRegex, nzbSearchResult.title.lower())):
return False, "Required regex not found in title", "the required regex was not found in the title"
if (searchRequest.category.category.forbiddenRegex and applyRestrictionsCategory and re.search(searchRequest.category.category.forbiddenRegex, nzbSearchResult.title.lower())) \
or (config.settings.searching.forbiddenRegex and applyRestrictionsGlobal and re.search(config.settings.searching.forbiddenRegex, nzbSearchResult.title.lower())):
return False, "Forbidden regex found in title", "the forbidden regex was found in the title"
if config.settings.searching.forbiddenGroups and nzbSearchResult.group:
for forbiddenPoster in config.settings.searching.forbiddenGroups.split(","):
if forbiddenPoster in nzbSearchResult.group:
return False, "Posted in forbidden group '%s'" % forbiddenPoster, "they were posted in a forbidden group"
if config.settings.searching.forbiddenPosters and nzbSearchResult.poster:
for forbiddenPoster in config.settings.searching.forbiddenPosters.split(","):
if forbiddenPoster in nzbSearchResult.poster:
return False, "Posted by forbidden poster '%s'" % forbiddenPoster, "they were posted by a forbidden poster"
if searchRequest.minsize and nzbSearchResult.size / (1024 * 1024) < searchRequest.minsize:
return False, "Smaller than requested minimum size: %dMB < %dMB" % (nzbSearchResult.size / (1024 * 1024), searchRequest.minsize), "they had the wrong size"
if searchRequest.maxsize and nzbSearchResult.size / (1024 * 1024) > searchRequest.maxsize:
return False, "Bigger than requested maximum size: %dMB > %dMB" % (nzbSearchResult.size / (1024 * 1024), searchRequest.maxsize), "they had the wrong size"
if searchRequest.minage and nzbSearchResult.age_days < searchRequest.minage:
return False, "Younger than requested minimum age: %dd < %dd" % (nzbSearchResult.age_days, searchRequest.minage), "they had the wrong age"
if searchRequest.maxage and nzbSearchResult.age_days > searchRequest.maxage:
return False, "Older than requested maximum age: %dd > %dd" % (nzbSearchResult.age_days, searchRequest.maxage), "they had the wrong age"
if nzbSearchResult.pubdate_utc is None:
return False, "Unknown age", "they were missing necessary attributes"
if nzbSearchResult.category:
ignore = False
reason = ""
if nzbSearchResult.category.ignoreResults == "always":
reason = "always"
ignore = True
elif nzbSearchResult.category.ignoreResults == "internal" and searchRequest.internal:
reason = "for internal searches"
ignore = True
elif nzbSearchResult.category.ignoreResults == "external" and not searchRequest.internal:
reason = "for API searches"
ignore = True
elif self.settings.categories and nzbSearchResult.category.name not in self.settings.categories:
reason = "by this indexer"
ignore = True
if ignore:
return False, "Results from category %s are configured to be ignored %s" % (nzbSearchResult.category.pretty, reason), "their category is to be ignored"
return True, None, ""
def process_query_result(self, result, searchRequest, maxResults=None):
return []
def check_auth(self, body):
# check the response body to see if request was authenticated. If yes, do nothing, if no, raise exception
return []
disable_periods = [0, 15, 30, 60, 3 * 60, 6 * 60, 12 * 60, 24 * 60]
def handle_indexer_success(self, doSaveIndexerStatus=True):
# Deescalate level by 1 (or stay at 0) and reset reason and disable-time
try:
indexer_status = self.indexer.status.get()
except IndexerStatus.DoesNotExist:
indexer_status = IndexerStatus(indexer=self.indexer)
if indexer_status.level > 0:
indexer_status.level -= 1
indexer_status.reason = None
indexer_status.disabled_permanently = False
indexer_status.disabled_until = arrow.get(0) # Because I'm too dumb to set it to None/null
if doSaveIndexerStatus:
self.saveIndexerStatus(indexer_status)
return indexer_status
@retry((InterfaceError, OperationalError), delay=1, tries=5, logger=logger)
def saveIndexerStatus(self, indexer_status):
with databaseLock:
indexer_status.save()
def handle_indexer_failure(self, reason=None, disable_permanently=False, saveIndexerStatus=True):
# Escalate level by 1. Set disabled-time according to level so that with increased level the time is further in the future
try:
indexer_status = self.indexer.status.get()
except IndexerStatus.DoesNotExist:
indexer_status = IndexerStatus(indexer=self.indexer)
if indexer_status.level == 0:
indexer_status.first_failure = arrow.utcnow()
indexer_status.latest_failure = arrow.utcnow()
indexer_status.reason = reason # Overwrite the last reason if one is set, should've been logged anyway
if disable_permanently:
indexer_status.disabled_permanently = True
self.info("Disabling indexer permanently until reenabled by user because the authentication failed")
else:
indexer_status.level = min(len(self.disable_periods) - 1, indexer_status.level + 1)
indexer_status.disabled_until = arrow.utcnow().replace(minutes=+self.disable_periods[indexer_status.level])
self.info("Disabling indexer temporarily due to access problems. Will be reenabled %s" % indexer_status.disabled_until.humanize())
if saveIndexerStatus:
self.saveIndexerStatus(indexer_status)
return indexer_status
def get(self, url, timeout=None, cookies=None):
# overwrite for special handling, e.g. cookies
headers = {'User-Agent': "NZBHydra"}
if hasattr(self.settings, "userAgent") and self.settings.userAgent:
headers['User-Agent'] = self.settings.userAgent
elif config.settings.searching.userAgent:
headers['User-Agent'] = config.settings.searching.userAgent
if timeout is None:
timeout = self.settings.timeout
if timeout is None:
timeout = config.settings.searching.timeout
if hasattr(self.settings, "username") and self.settings.username and self.settings.password:
auth = HTTPBasicAuth(self.settings.username, self.settings.password)
self.debug("Using HTTP auth")
else:
auth = None
self.debug("Requesting %s with timeout %d" % (url, timeout))
return webaccess.get(url, timeout=timeout, cookies=cookies, headers=headers, auth=auth)
def get_url_with_papi_access(self, url, type, cookies=None, timeout=None, saveToDb=True):
papiaccess = IndexerApiAccess(indexer=self.indexer, type=type, url=url, time=arrow.utcnow().datetime)
try:
papiaccess.username = request.authorization.username if request.authorization is not None else None
except RuntimeError:
# Is thrown when we're searching which is run in a thread. When downloading NFOs or whatever this will work
pass
indexerStatus = None
try:
time_before = arrow.utcnow()
response = self.get(url, cookies=cookies, timeout=timeout)
response.raise_for_status()
time_after = arrow.utcnow()
papiaccess.response_time = (time_after - time_before).seconds * 1000 + ((time_after - time_before).microseconds / 1000)
papiaccess.response_successful = True
self.debug("HTTP request to indexer completed in %dms" % papiaccess.response_time)
indexerStatus = self.handle_indexer_success(doSaveIndexerStatus=saveToDb)
except RequestException as e:
self.error("Error while connecting to URL %s: %s" % (url, str(e)))
papiaccess.error = "Connection failed: %s" % removeSensitiveData(str(e))
response = None
indexerStatus = self.handle_indexer_failure("Connection failed: %s" % removeSensitiveData(str(e)), saveIndexerStatus=saveToDb)
finally:
if saveToDb:
self.saveIndexerStatus(papiaccess)
return response, papiaccess, indexerStatus
def get_nfo(self, guid):
return None
def get_nzb_link(self, guid, title):
return None
def get_search_ids_from_indexer(self):
return []
def cleanUpTitle(self, title):
try:
if title is None or title == "":
return title
if config.settings.searching.removeTrailing:
for word in config.settings.searching.removeTrailing.split(","):
word = word.lower().strip()
if title.lower().strip().endswith(word):
self.debug("Removing trailing %s from title %s" % (word, title))
return title[:-len(word)].strip()
return title
except:
return title
def execute_queries(self, queries, searchRequest):
if len(queries) == 0:
return QueriesExecutionResult(didsearch=False, results=[], indexerSearchEntry=None, indexerApiAccessEntry=None, indexerStatus=None, total=0, loaded_results=0, total_known=True, has_more=False, rejected=self.getRejectedCountDict())
results = []
executed_queries = set()
psearch = IndexerSearch(indexer=self.indexer)
papiaccess = IndexerApiAccess()
indexerStatus = None
total_results = 0
total_known = False
has_more = False
rejected = self.getRejectedCountDict()
while len(queries) > 0:
query = queries.pop()
if query in executed_queries:
# To make sure that in case an offset is reported wrong or we have a bug we don't get stuck in an endless loop
continue
try:
request, papiaccess, indexerStatus = self.get_url_with_papi_access(query, "search", saveToDb=False)
papiaccess.indexer_search = psearch
executed_queries.add(query)
if request is not None:
if request.text == "":
raise IndexerResultParsingException("Indexer returned an empty page", self)
self.check_auth(request.text)
self.debug("Successfully loaded URL %s" % request.url)
try:
parsed_results = self.process_query_result(request.text, searchRequest)
results.extend(parsed_results.entries) # Retrieve the processed results
queries.extend(parsed_results.queries) # Add queries that were added as a result of the parsing, e.g. when the next result page should also be loaded
total_results += parsed_results.total
total_known = parsed_results.total_known
has_more = parsed_results.has_more
rejected = parsed_results.rejected
papiaccess.response_successful = True
indexerStatus = self.handle_indexer_success(False)
except Exception:
self.exception("Error while processing search results from indexer %s" % self)
raise IndexerResultParsingException("Error while parsing the results from indexer", self)
except IndexerAuthException as e:
papiaccess.error = "Authorization error :%s" % e.message
self.error(papiaccess.error)
indexerStatus = self.handle_indexer_failure(reason="Authentication failed", disable_permanently=True)
papiaccess.response_successful = False
except IndexerAccessException as e:
papiaccess.error = "Access error: %s" % e.message
self.error(papiaccess.error)
indexerStatus = self.handle_indexer_failure(reason="Access failed")
papiaccess.response_successful = False
except IndexerResultParsingException as e:
papiaccess.error = "Access error: %s" % e.message
self.error(papiaccess.error)
indexerStatus = self.handle_indexer_failure(reason="Parsing results failed")
papiaccess.response_successful = False
except Exception as e:
self.exception("An error error occurred while searching: %s", e)
if papiaccess is not None:
papiaccess.error = "Unknown error :%s" % e
papiaccess.response_successful = False
finally:
if papiaccess is not None:
psearch.successful = papiaccess.response_successful
else:
self.error("Unable to save API response to database")
psearch.resultsCount = total_results
return QueriesExecutionResult(didsearch=True, results=results, indexerSearchEntry=psearch, indexerApiAccessEntry=papiaccess, indexerStatus=indexerStatus, total=total_results, loaded_results=len(results), total_known=total_known, has_more=has_more, rejected=rejected)
def debug(self, msg, *args, **kwargs):
self.logger.debug("%s: %s" % (self.name, msg), *args, **kwargs)
def info(self, msg, *args, **kwargs):
self.logger.info("%s: %s" % (self.name, msg), *args, **kwargs)
def warn(self, msg, *args, **kwargs):
self.logger.warn("%s: %s" % (self.name, msg), *args, **kwargs)
def error(self, msg, *args, **kwargs):
self.logger.error("%s: %s" % (self.name, msg), *args, **kwargs)
def exception(self, msg, *args, **kwargs):
self.logger.exception("%s: %s" % (self.name, msg), *args, **kwargs)
def isNumber(self, string):
if string is None:
return False
try:
int(string)
return True
except (TypeError, ValueError):
return False
def getDates(self, entry, usenetdate, preciseDate=True):
entry.epoch = usenetdate.timestamp
entry.age = usenetdate.humanize()
entry.pubdate_utc = str(usenetdate)
age = (arrow.utcnow() - usenetdate)
if age.days == 0 and preciseDate:
if age.seconds < 3600:
entry.age = "%dm" % ((arrow.utcnow() - usenetdate).seconds / 60)
else:
entry.age = "%dh" % ((arrow.utcnow() - usenetdate).seconds / 3600)
else:
entry.age = str(age.days) + "d"
entry.age_days = age.days
entry.precise_date = preciseDate
entry.pubDate = usenetdate.format("ddd, DD MMM YYYY HH:mm:ss Z")
def get_instance(indexer):
return SearchModule(indexer)
|
#!/usr/bin/env python
"""
NSX-T SDK Sample Code
Copyright 2019 VMware, Inc. All rights reserved
The BSD-2 license (the "License") set forth below applies to all
parts of the NSX-T SDK Sample Code project. You may not use this
file except in compliance with the License.
BSD-2 License
Redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following
conditions are met:
Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import sys
import time
from util import auth
from util import getargs
from com.vmware.nsx.model_client import ApiError
from com.vmware.vapi.std.errors_client import Error
from com.vmware.vapi.std.errors_client import ServiceUnavailable
from google.api_core.retry import Retry
from google.api_core.retry import if_exception_type
"""
This example shows how to implement backoff and retry when an API
call fails because of API rate-limiting. It makes use of the
Retry helper from the google core APIs.
To run this example, you will need to install google-api-core
with "pip install google-api-core".
"""
# Call the given API, retrying if the API fails with a ServiceUnavailble
# exception (both the 429 Too Many Requests and 503 Service Unavailable
# responses that the NSX-T API may return map to this exception).
# Initially back off for one tenth of a second. By default, the Retry
# will double the backoff interval each time up to a maximum of
# 60 seconds. For more information on Retry, see
# https://googleapis.dev/python/google-api-core/latest/retry.html
@Retry(predicate=if_exception_type(ServiceUnavailable), initial=0.1)
def call_api_with_retry(api, *args):
return apply(api, args)
def main():
args = getargs.getargs()
api_client = auth.create_nsx_policy_api_client(
args.user, args.password, args.nsx_host, args.tcp_port,
auth_type=auth.SESSION_AUTH)
while True:
n = 0
last = time.time()
while time.time() - last < 1.0:
call_api_with_retry(api_client.Infra.get)
n += 1
# API calls that take arguments:
#
# Since we use the python apply() method, you need
# to pass the callable and its arguments as a list.
# The usual way to call this API would be:
# api_client.infra.Domains.get("default")
#
call_api_with_retry(api_client.infra.Domains.get, "default")
n += 1
print("%d calls/sec" % n)
if __name__ == "__main__":
main()
|
from tests.system.action.base import BaseActionTestCase
class SpeakerCreateActionTest(BaseActionTestCase):
def test_createx(self) -> None:
self.create_model("meeting/7844", {"name": "name_asdewqasd"})
self.create_model("user/7", {"username": "test_username1"})
self.create_model(
"list_of_speakers/23", {"speaker_ids": [], "meeting_id": 7844}
)
response = self.client.post(
"/",
json=[
{
"action": "speaker.create",
"data": [{"user_id": 7, "list_of_speakers_id": 23}],
}
],
)
self.assert_status_code(response, 200)
speaker = self.get_model("speaker/1")
assert speaker.get("user_id") == 7
assert speaker.get("list_of_speakers_id") == 23
assert speaker.get("weight") == 10000
list_of_speakers = self.get_model("list_of_speakers/23")
assert list_of_speakers.get("speaker_ids") == [1]
user = self.get_model("user/7")
assert user.get("speaker_$7844_ids") == [1]
assert user.get("speaker_$_ids") == ["7844"]
def test_create_empty_data(self) -> None:
response = self.client.post(
"/",
json=[{"action": "speaker.create", "data": [{}]}],
)
self.assert_status_code(response, 400)
self.assertIn(
"data must contain [\\'list_of_speakers_id\\', \\'user_id\\'] properties",
str(response.data),
)
def test_create_wrong_field(self) -> None:
response = self.client.post(
"/",
json=[
{
"action": "speaker.create",
"data": [{"wrong_field": "text_AefohteiF8"}],
}
],
)
self.assert_status_code(response, 400)
self.assertIn(
"data must contain [\\'list_of_speakers_id\\', \\'user_id\\'] properties",
str(response.data),
)
def test_create_already_exist(self) -> None:
self.create_model("meeting/7844", {"name": "name_asdewqasd"})
self.create_model(
"user/7", {"username": "test_username1", "speaker_$7844_ids": [42]}
)
self.create_model(
"list_of_speakers/23", {"speaker_ids": [42], "meeting_id": 7844}
)
self.create_model("speaker/42", {"user_id": 7, "list_of_speakers_id": 23})
response = self.client.post(
"/",
json=[
{
"action": "speaker.create",
"data": [{"user_id": 7, "list_of_speakers_id": 23}],
}
],
)
self.assert_status_code(response, 400)
list_of_speakers = self.get_model("list_of_speakers/23")
assert list_of_speakers.get("speaker_ids") == [42]
|
import sys
sys.path.append('../')
from pycore.tikzeng import *
width = 3.5
arch = [
to_head('..'),
to_cor(),
to_begin(),
to_input('./icra/rgb.png', to="(-10, 0, 0)", width=6.5, height=6.5),
to_ConvReluNewColor(name='cr_a0', s_filer=304, y_filer=224, n_filer=64, offset="(-8, 0, 0)", to="(0,0,0)", width=4, height=32, depth=32),
to_input('./icra/sparse_depth.png', to="(-4, 0, 0)", width=6.5, height=6.5),
to_ConvReluNewColor(name='cr_b0', s_filer=304, y_filer=224, n_filer=64, offset="(-2, 0, 0)", to="(0, 0, 0)", width=4, height=32, depth=32),
to_ConvRelu(name='cr_a00', s_filer=304, n_filer=64, offset="(0, 0, 0)", to="(cr_b0-east)", width=4, height=32, depth=32),
to_skip(of='cr_a0', to="cr_a00", pos=1.4),
# conv1
to_ConvReluNew(
name="cr_{}".format('b1'), offset="(1.5, 0, 0)", to="(0,0, 0)".format('b0'),
s_filer=304, y_filer=224, n_filer=256, width=width*1.2, height=32, depth=32,
),
to_Pool(
name="{}".format('pool_b1'), offset="(0,0,0)", to="(cr_{}-east)".format('b1'),
width=1, height=int(32 * 3 / 4), depth=int(32 * 3 / 4), opacity=0.5
),
# conv2
to_ConvReluNew(
name="cr_{}".format('b2'), offset="(1.5, 0, 0)", to="({}-east)".format('pool_b1'),
s_filer=152, y_filer=112, n_filer=512, width=width*1.5, height=25, depth=25,
),
to_Pool(
name="{}".format('pool_b2'), offset="(0,0,0)", to="(cr_{}-east)".format('b2'),
width=1, height=int(25 * 3 / 4), depth=int(25 * 3 / 4), opacity=0.5
),
# conv3
to_ConvReluNew(
name="cr_{}".format('b3'), offset="(1.5, 0, 0)", to="({}-east)".format('pool_b2'),
s_filer=76, y_filer=56, n_filer=1024, width=width*1.8, height=20, depth=20,
),
to_Pool(
name="{}".format('pool_b3'), offset="(0,0,0)", to="(cr_{}-east)".format('b3'),
width=1, height=int(20 * 3 / 4), depth=int(20 * 3 / 4), opacity=0.5
),
# conv4
to_ConvReluNew(
name="cr_{}".format('b4'), offset="(1.5, 0, 0)", to="({}-east)".format('pool_b3'),
s_filer=38, y_filer=28, n_filer=2048, width=width*2, height=16, depth=16,
),
to_Pool(
name="{}".format('pool_b4'), offset="(0,0,0)", to="(cr_{}-east)".format('b4'),
width=1, height=int(16 * 3 / 4), depth=int(16 * 3 / 4), opacity=0.5
),
# to_skipNew(of='cr_c0', to="b0", pos=1.3),
# to_skipNew1(of='cr_c0', to="b0", pos=1.3),
# to_connection("{}".format('cr_a0'), "b00"),
to_connection("{}".format('cr_a00'), "cr_{}".format('b1')),
to_connection("{}".format('pool_b1'), "cr_{}".format('b2')),
to_connection("{}".format('pool_b2'), "cr_{}".format('b3')),
to_connection("{}".format('pool_b3'), "cr_{}".format('b4')),
# Bottleneck
# block-005
to_ConvReluNewColor(name='cr_b5', s_filer=19, y_filer=14, n_filer=512, offset="(1.5,0,0)", to="(pool_b4-east)", width=width*2.2, height=8, depth=8, caption=""),
to_connection("pool_b4", "cr_b5"),
# Decoder
# convt2
to_UnPoolNew(name='unpool_{}'.format('b6'), offset="(2.0,0,0)", to="({}-east)".format('cr_b5'),
y_filer=28, width=1, height=16, depth=16, opacity=0.5),
to_ConvResSimple(name='cr_res_{}'.format('b6'), offset="(0,0,0)", to="(unpool_{}-east)".format('b6'),
n_filer=2048, width=width*2, height=16, depth=16,
opacity=0.5),
to_ConvReluSimple(name='cr_{}'.format('b6'), offset="(0,0,0)", to="(cr_res_{}-east)".format('b6'),
s_filer=64, n_filer=2048, width=width*2, height=16, depth=16),
# convt3
to_UnPoolNew(name='unpool_{}'.format('b7'), offset="(1.2, 0, 0)", to="({}-east)".format('cr_b6'),
y_filer=56, width=1, height=20, depth=20, opacity=0.5),
to_ConvResSimple(name='cr_res_{}'.format('b7'), offset="(0, 0, 0)", to="(unpool_{}-east)".format('b7'),
n_filer=1024, width=width*1.8, height=20, depth=20, opacity=0.5),
to_ConvReluSimple(name='cr_{}'.format('b7'), offset="(0,0,0)", to="(cr_res_{}-east)".format('b7'),
s_filer=128, n_filer=1024, width=width*1.8, height=20, depth=20),
# convt4
to_UnPoolNew(name='unpool_{}'.format('b8'), offset="(1.5, 0, 0)", to="({}-east)".format('cr_b7'),
y_filer=112, width=1, height=25, depth=25, opacity=0.5),
to_ConvResSimple(name='cr_res_{}'.format('b8'), offset="(0, 0, 0)", to="(unpool_{}-east)".format('b8'),
n_filer=512, width=width*1.5, height=25, depth=25, opacity=0.5),
to_ConvReluSimple(name='cr_{}'.format('b8'), offset="(0,0,0)", to="(cr_res_{}-east)".format('b8'),
s_filer=256, n_filer=512, width=width*1.5, height=25, depth=25),
# convt5
to_UnPoolNew(name='unpool_{}'.format('b9'), offset="(2, 0, 0)", to="({}-east)".format('cr_b8'),
y_filer=224, width=1, height=32, depth=32, opacity=0.5),
to_ConvResSimple(name='cr_res_{}'.format('b9'), offset="(0, 0, 0)", to="(unpool_{}-east)".format('b9'),
n_filer=256, width=width*1.2, height=32, depth=32, opacity=0.5),
to_ConvReluSimple(name='cr_{}'.format('b9'), offset="(0,0,0)", to="(cr_res_{}-east)".format('b9'),
s_filer=256, n_filer=256, width=width*1.2, height=32, depth=32),
# convt6
to_UnPoolNew(name='unpool_{}'.format('b10'), offset="(2, 0, 0)", to="({}-east)".format('cr_b9'),
y_filer=224, width=1, height=32, depth=32, opacity=0.5),
to_ConvResSimple(name='cr_res_{}'.format('b10'), offset="(0, 0, 0)", to="(unpool_{}-east)".format('b10'),
n_filer=64, width=width, height=32, depth=32, opacity=0.5),
to_ConvReluSimple(name='cr_{}'.format('b10'), offset="(0,0,0)", to="(cr_res_{}-east)".format('b10'),
s_filer=256, n_filer=64, width=width, height=32, depth=32),
to_ConvReluNew(name="last", s_filer=304, y_filer=224, n_filer=1, offset="(2.5,0,0)", to="(cr_b10-east)",
width=4, height=32, depth=32),
to_SoftMaxNew(name="d2n", s_filer=304, y_filer=224, n_filer=3, offset="(2.5, 0, 0)", to="(last-east)",
width=4, height=32, depth=32, opacity=0.5),
to_skip(of='cr_b4', to='cr_b6', pos=1.25),
to_skip(of='cr_b3', to='cr_b7', pos=1.25),
to_skip(of='cr_b2', to='cr_b8', pos=1.25),
to_skip(of='cr_b1', to='cr_b9', pos=1.25),
to_skip(of='cr_a00', to='cr_b10', pos=1.4),
to_connection("cr_{}".format('b5'), "unpool_{}".format('b6')),
to_connection("cr_{}".format('b6'), "unpool_{}".format('b7')),
to_connection("cr_{}".format('b7'), "unpool_{}".format('b8')),
to_connection("cr_{}".format('b8'), "unpool_{}".format('b9')),
to_connection("cr_{}".format('b9'), "unpool_{}".format('b10')),
to_connection("cr_b10", "last"),
to_input('./icra/estimated_depth.png', to="(last-east)", width=6.5, height=6.5),
to_connection("last", "d2n"),
to_input('./icra/estimated_normal.png', to="(d2n-east)", width=6.5, height=6.5),
to_UnPool(name='legend_unpool', offset="(-12, 10, 0)", to="(unpool_{}-east)".format('b9'),
width=1, height=16, depth=16, opacity=0.5, caption="Unpooling"),
to_ConvReluNewColorLegend(name='legend_conv', offset="(3, 0,0)", to="(legend_unpool-east)",
width=4, height=16, depth=16, caption="Convolution"),
to_ConvResSimpleSimple(name='legend_deconv', offset="(3, 0, 0)", to="(legend_conv-east)".format('d10'),
width=width, height=16, depth=16, opacity=0.5, caption="Deconvolution"),
to_ConvSimple(name='legend_resnet'.format('d10'), offset="(3, 0, 0)", to="(legend_deconv-east)".format('d10'),
width=width, height=16, depth=16, caption="ResNet Block"),
to_Pool(name='legend_pool', offset="(3, 0, 0)", to="(legend_resnet-east)", width=1, height=16, depth=16, opacity=0.5, caption="Pooling"),
to_relu(name='legend_relu', offset="(3, 0, 0)", to="(legend_pool-east)", width=1, height=16, depth=16, opacity=0.5, caption="ReLU"),
to_SoftMaxSimple(name="d2n", offset="(2.5, 0, 0)", to="(legend_relu-east)",
width=4, height=16, depth=16, opacity=0.5, caption="d2n"),
to_end()
]
if __name__ == '__main__':
namefile = str(sys.argv[0]).split('.')[0]
to_generate(arch, namefile + '.tex')
|
import os
def next_path(path_pattern):
"""
Finds the next free path in an sequentially named list of files
e.g. path_pattern = 'file-%s.txt':
file-1.txt
file-2.txt
file-3.txt
Runs in log(n) time where n is the number of existing files in sequence
"""
i = 1
# First do an exponential search
while os.path.exists(path_pattern % i):
i = i * 2
# Result lies somewhere in the interval (i/2..i]
# We call this interval (a..b] and narrow it down until a + 1 = b
a, b = (i // 2, i)
while a + 1 < b:
c = (a + b) // 2 # interval midpoint
a, b = (c, b) if os.path.exists(path_pattern % c) else (a, c)
return path_pattern % b
|
import torch as torch
import copy
from .linear_solver import linear_solver
from torch.autograd import Variable
import sys
sys.path.append('../')
from utils.manip import clip_image_values
from .deepfool import deepfool
def sparsefool(x_0, net, lb, ub, lambda_=3., max_iter=20, epsilon=0.02, device='cuda', activity_mask = None):
pred_label = torch.argmax(net.forward(Variable(x_0, requires_grad=True)).data).item()
x_i = copy.deepcopy(x_0)
fool_im = copy.deepcopy(x_i)
fool_label = pred_label
loops = 0
while fool_label == pred_label and loops < max_iter:
normal, x_adv = deepfool(x_i, net, lambda_, device=device)
x_i = linear_solver(x_i, normal, x_adv, lb, ub, activity_mask = activity_mask)
fool_im = x_0 + (1 + epsilon) * (x_i - x_0)
fool_im = clip_image_values(fool_im, lb, ub)
fool_label = torch.argmax(net.forward(fool_im)).item()
loops += 1
r = fool_im - x_0
return fool_im, r, pred_label, fool_label, loops
|
import re
from dataclasses import dataclass
from typing import Iterator, List, Optional
from ...logging import get_logger
@dataclass(frozen=True)
class ParseResult:
name: str
namespaces: List[str]
@dataclass
class OutputPatterns:
failed_test: str
namespace_separator: Optional[str] = None
ansi: bool = False
failed_name_prefix: Optional[str] = None
_BASE_PATTERNS = {
"python#pytest": OutputPatterns(
failed_test=r"^(FAILED|ERROR) .+?::(?P<namespaces>.+::)?(?P<name>[^[\s]*)(.+])?( |$)",
namespace_separator="::",
),
"python#pyunit": OutputPatterns(
failed_test=r"^FAIL: (?P<name>.*) \(.*?(?P<namespaces>\..+)\)",
namespace_separator=r"\.",
),
"go#gotest": OutputPatterns(failed_test=r"^.*--- FAIL: (?P<name>.+?) "),
"go#richgo": OutputPatterns(
failed_test=r"^FAIL\s\|\s(?P<name>.+?) \(.*\)",
ansi=True,
failed_name_prefix="Test",
),
"javascript#jest": OutputPatterns(
failed_test=r"^\s*● (?P<namespaces>.* › )?(?P<name>.*)$",
ansi=True,
namespace_separator=" › ",
),
"elixir#exunit": OutputPatterns(failed_test=r"\s*\d\) test (?P<name>.*) \(.*\)$"),
"php#phpunit": OutputPatterns(
failed_test=r"\s*\d\)(?P<namespace>.*)::(?P<name>.*)",
)
}
# https://stackoverflow.com/questions/14693701/how-can-i-remove-the-ansi-escape-sequences-from-a-string-in-python
_ANSI_ESCAPE = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
logger = get_logger()
class OutputParser:
def __init__(self, disable_patterns: List[str]) -> None:
self._patterns = {
runner: patterns
for runner, patterns in _BASE_PATTERNS.items()
if runner not in disable_patterns
}
def can_parse(self, runner: str) -> bool:
return runner in self._patterns
def parse_failed(self, runner: str, output: List[str]) -> Iterator[ParseResult]:
pattern = self._patterns[runner]
fail_pattern = re.compile(pattern.failed_test)
for line in output:
match = fail_pattern.match(
_ANSI_ESCAPE.sub("", line) if pattern.ansi else line
)
if match:
logger.finfo(
"Found failed test in output {match['name']} in namespaces {match['namespaces']} of runner {runner}"
)
namespaces = (
[
namespace
for namespace in re.split(
pattern.namespace_separator, match["namespaces"]
)
if namespace
]
if pattern.namespace_separator and match["namespaces"]
else []
)
name = (
f"{pattern.failed_name_prefix}{match['name']}"
if pattern.failed_name_prefix
else match["name"]
)
yield ParseResult(name=name, namespaces=namespaces)
|
"""Test the IPython.kernel public API
Authors
-------
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.testing import decorators as dec
from IPython.kernel import launcher, connect
from IPython import kernel
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
@dec.parametric
def test_kms():
for base in ("", "Blocking", "Multi"):
KM = base + "KernelManager"
yield nt.assert_true(KM in dir(kernel), KM)
@dec.parametric
def test_launcher():
for name in launcher.__all__:
yield nt.assert_true(name in dir(kernel), name)
@dec.parametric
def test_connect():
for name in connect.__all__:
yield nt.assert_true(name in dir(kernel), name)
|
"""Tests for Unix Timestamp Flask application."""
import pytest
import unixtimestamp
@pytest.fixture
def app():
"""Configure the app for testing."""
the_app = unixtimestamp.create_app()
the_app.testing = True
return the_app
|
# Copyright (c) 2019 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def init_nnabla(ctx_config):
import nnabla as nn
from nnabla.ext_utils import get_extension_context
from comm import CommunicatorWrapper
# set context
ctx = get_extension_context(**ctx_config)
# init communicator
comm = CommunicatorWrapper(ctx)
nn.set_default_context(comm.ctx)
# disable outputs from logger except rank==0
if comm.rank > 0:
from nnabla import logger
import logging
logger.setLevel(logging.ERROR)
return comm
class AttrDict(dict):
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
if key not in self:
raise AttributeError("No such attribute `{}`".format(key))
if isinstance(self[key], dict):
self[key] = AttrDict(self[key])
return self[key]
def dump_to_stdout(self):
print("================================configs================================")
for k, v in self.items():
print("{}: {}".format(k, v))
print("=======================================================================")
def makedirs(dirpath):
if os.path.exists(dirpath):
if os.path.isdir(dirpath):
return
else:
raise ValueError(
"{} already exists as a file not a directory.".format(dirpath))
os.makedirs(dirpath)
def get_current_time():
from datetime import datetime
return datetime.now().strftime('%m%d_%H%M%S')
|
"""Class for working with GCP connections (e.g. Pub/Sub messages)"""
import datetime
import json
import os
import sys
try:
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
from google.cloud import pubsub_v1
from google.cloud import storage
from google.cloud import logging
from google.auth import _default as google_auth
# pylint: disable=no-member
DESCENDING = firestore.Query.DESCENDING
SUCCESSFUL_IMPORTS = True
except ImportError:
SUCCESSFUL_IMPORTS = False
from grpc import StatusCode
import logger
import configurator
LOGGER = logger.get_logger('gcp')
TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
DEFAULT_LIMIT = 100
def get_timestamp():
""""Get a JSON-compatible formatted timestamp"""
return to_timestamp(datetime.datetime.now(datetime.timezone.utc))
def to_timestamp(timestamp):
""""Get a JSON-compatible formatted timestamp"""
return timestamp.strftime(TIMESTAMP_FORMAT)[:-3] + 'Z'
def parse_timestamp(timestamp_str):
"""Parses a timestamp generated from get_timestamp"""
return datetime.datetime.strptime(timestamp_str, TIMESTAMP_FORMAT + 'Z')
class GcpManager:
"""Manager class for working with GCP"""
REPORT_BUCKET_FORMAT = '%s.appspot.com'
def __init__(self, config, callback_handler):
self.config = config
self._callback_handler = callback_handler
cred_file = self.config.get('gcp_cred')
if not cred_file:
LOGGER.info('No gcp_cred file specified in config, disabling gcp use.')
self._pubber = None
self._storage = None
self._firestore = None
self._client_name = None
return
assert SUCCESSFUL_IMPORTS, "Missing google cloud python dependencies."
LOGGER.info('Loading gcp credentials from %s', cred_file)
# Normal execution assumes default credentials.
(self._credentials, self._project) = google_auth.load_credentials_from_file(cred_file)
self._client_name = self._parse_creds(cred_file)
self._site_name = self._get_site_name()
self._pubber = pubsub_v1.PublisherClient(credentials=self._credentials)
LOGGER.info('Initialized gcp pub/sub %s:%s:%s', self._project,
self._client_name, self._site_name)
self._firestore = self._initialize_firestore(cred_file)
self._report_bucket_name = self.REPORT_BUCKET_FORMAT % self._project
self._storage = storage.Client(project=self._project, credentials=self._credentials)
self._bucket = self._ensure_report_bucket()
self._config_callbacks = {}
self._logging = logging.Client(credentials=self._credentials, project=self._project)
LOGGER.info('Connection initialized at %s', get_timestamp())
def get_logging_client(self):
"""Gets the stackdriver client"""
return (self._client_name, self._logging) if self._client_name else None
def _initialize_firestore(self, cred_file):
cred = credentials.Certificate(cred_file)
firebase_admin.initialize_app(cred)
LOGGER.info('Initialized gcp firestore %s:%s', self._project, self._client_name)
dashboard_url = 'https://%s.firebaseapp.com/?origin=%s' % (self._project, self._client_name)
LOGGER.info('Dashboard at %s', dashboard_url)
return firestore.client()
def _on_snapshot(self, callback, doc_snapshot, immediate):
def handler():
for doc in doc_snapshot:
doc_data = doc.to_dict()
timestamp = doc_data['timestamp']
if immediate or doc_data['saved'] != timestamp:
callback(doc_data['config'])
doc.reference.update({
'saved': timestamp
})
self._callback_handler(handler)
def register_config(self, path, config, callback=None, immediate=False):
"""Register a config blob with callback"""
if not self._firestore:
return
assert path, 'empty config path'
full_path = 'origin/%s/%s/config/definition' % (self._client_name, path)
if full_path in self._config_callbacks:
LOGGER.info('Unsubscribe callback %s', path)
self._config_callbacks[full_path]['future'].unsubscribe()
del self._config_callbacks[full_path]
config_doc = self._firestore.document(full_path)
if config is not None:
timestamp = get_timestamp()
LOGGER.info('Registering %s', full_path)
config_doc.set({
'config': config,
'saved': timestamp,
'timestamp': timestamp
})
else:
LOGGER.info('Releasing %s', full_path)
config_doc.delete()
if callback:
assert config is not None, 'callback defined when deleting config??!?!'
on_snapshot = lambda doc_snapshot, changed, read_time:\
self._on_snapshot(callback, doc_snapshot, immediate)
self._register_callback(config_doc, full_path, on_snapshot)
def _register_callback(self, config_doc, full_path, on_snapshot):
snapshot_future = config_doc.on_snapshot(on_snapshot)
self._config_callbacks[full_path] = {
'future': snapshot_future,
'config_doc': config_doc,
'on_snapshot': on_snapshot
}
self._apply_callback_hack(full_path, snapshot_future)
def _wrap_callback(self, callbacks, reason):
for callback in callbacks:
try:
callback(reason)
except Exception as e:
LOGGER.error('Capturing RPC error: %s', str(e))
def _hack_recv(self, rpc, path):
# pylint: disable=protected-access
try:
return rpc._recoverable(rpc._recv) # Erp.
except Exception as e:
LOGGER.error('Error intercepted at %s, %s for %s', get_timestamp(),
rpc.call._state.code, path)
if rpc.call._state.code == StatusCode.INTERNAL:
self._restart_callback(path)
raise e
def _restart_callback(self, path):
LOGGER.warning('Restarting callback %s', path)
callback = self._config_callbacks[path]
self._register_callback(callback['config_doc'], path, callback['on_snapshot'])
def _apply_callback_hack(self, path, snapshot_future):
# pylint: disable=protected-access
rpc = snapshot_future._rpc
rpc.recv = lambda: self._hack_recv(rpc, path)
callbacks = rpc._callbacks
LOGGER.info('Patching recv callback for %s with %s', path, len(callbacks))
wrapped_handler = lambda reason: self._wrap_callback(callbacks, reason)
rpc._callbacks = [wrapped_handler]
def release_config(self, path):
"""Release a config blob and remove it from the live data system"""
self.register_config(path, None)
def _get_site_name(self):
site_path = self.config['site_path']
cloud_config = os.path.join(site_path, 'cloud_iot_config.json')
if not os.path.isfile(cloud_config):
LOGGER.warning('Site cloud config file %s not found, using %s instead',
cloud_config, self._client_name)
return self._client_name
with open(cloud_config) as config_file:
return json.load(config_file)['site_name']
def _parse_creds(self, cred_file):
"""Parse JSON credential file"""
with open(cred_file) as data_file:
cred = json.load(data_file)
project = cred['project_id']
assert project == self._project, 'inconsistent credential projects'
client_email = cred['client_email']
(client, dummy_other) = client_email.split('@', 2)
return client
def publish_message(self, topic, message_type, message):
"""Publish a message to pub/sub topic"""
if not self._pubber:
LOGGER.debug('Ignoring message publish: not configured')
return
envelope = {
'type': message_type,
'timestamp': get_timestamp(),
'payload': message
}
message_str = json.dumps(envelope)
LOGGER.debug('Sending to topic_path %s/%s: %s', self._project, topic, message_str)
# pylint: disable=no-member
topic_path = self._pubber.topic_path(self._project, topic)
future = self._pubber.publish(topic_path, message_str.encode('utf-8'),
projectId=self._project, origin=self._client_name,
site_name=self._site_name)
LOGGER.debug('Publish future result %s', future.result())
def _ensure_report_bucket(self):
bucket_name = self._report_bucket_name
if self._storage.lookup_bucket(bucket_name):
LOGGER.info('Storage bucket %s already exists', bucket_name)
else:
LOGGER.info('Creating storage bucket %s', bucket_name)
self._storage.create_bucket(bucket_name)
return self._storage.get_bucket(bucket_name)
def upload_file(self, file_name, destination_file_name=None):
"""Uploads a report to a storage bucket."""
if not self._storage:
LOGGER.debug('Ignoring %s upload: not configured' % file_name)
return None
destination_file_name = os.path.join('origin', self._client_name or "other",
destination_file_name or file_name)
blob = self._bucket.blob(destination_file_name)
blob.upload_from_filename(file_name)
LOGGER.info('Uploaded %s' % destination_file_name)
return destination_file_name
def register_offenders(self):
"""Register any offenders: people who are not enabled to use the system"""
if not self._firestore:
LOGGER.error('Firestore not initialized.')
return
LOGGER.info('Registering offenders...')
users = self._firestore.collection(u'users').stream()
for user in users:
permissions = self._firestore.collection(u'permissions').document(user.id).get()
user_email = user.to_dict().get('email')
enabled = permissions.to_dict() and permissions.to_dict().get('enabled')
if enabled:
LOGGER.info('Access already enabled for %s', user_email)
elif self._query_user('Enable access for %s? (N/y) ' % user_email):
LOGGER.info('Enabling access for %s', user_email)
self._firestore.collection(u'permissions').document(user.id).set({
'enabled': True
})
else:
LOGGER.info('Ignoring user %s', user_email)
def _get_json_report(self, runid):
doc = runid.reference.collection('test').document('terminate').get().to_dict()
report_blob = doc.get('report_path.json') if doc else None
if not report_blob:
return None
LOGGER.info('Downloading report %s', report_blob)
blob = self._bucket.blob(report_blob)
return json.loads(str(blob.download_as_string(), 'utf-8'))
# pylint: disable=too-many-arguments
def get_reports(self, device: str, start=None, end=None, count=None, daq_run_id=None):
"""Get filtered list of reports"""
if not self._firestore:
LOGGER.error('Firestore not initialized.')
return
LOGGER.info('Looking for reports from GCP...')
limit_count = count if count else DEFAULT_LIMIT
origin = self._firestore.collection(u'origin').document(self._client_name).get()
query = origin.reference.collection('runid').where('deviceId', '==', device)
if start:
LOGGER.info('Limiting to start time %s', to_timestamp(start))
query = query.where('updated', '>=', to_timestamp(start))
if end:
LOGGER.info('Limiting to end time %s', to_timestamp(end))
query = query.where('updated', '<=', to_timestamp(end))
if daq_run_id:
LOGGER.info('Limiting to DAQ run id %s', daq_run_id)
query = query.where('daq_run_id', '==', daq_run_id)
runids = query.order_by(u'updated', direction=DESCENDING).limit(limit_count).stream()
for runid in runids:
json_report = self._get_json_report(runid)
if json_report:
yield json_report
def _query_user(self, message):
reply = input(message)
options = set(('y', 'Y', 'yes', 'YES', 'Yes', 'sure'))
if reply in options:
return True
return False
if __name__ == '__main__':
logger.set_config(fmt='%(levelname)s:%(message)s', level="INFO")
CONFIGURATOR = configurator.Configurator()
CONFIG = CONFIGURATOR.parse_args(sys.argv)
GCP = GcpManager(CONFIG, None)
if CONFIG.get('register_offenders'):
GCP.register_offenders()
else:
print('Unknown command mode for gcp module.')
|
import gym
try:
import gym_fetch_stack
except ImportError:
pass
import os
import ast
from collections import OrderedDict
from ddpg_curiosity_mc_her import logger
from ddpg_curiosity_mc_her.ddpg.ddpg import DDPG
from mpi4py import MPI
DEFAULT_ENV_PARAMS = {
'FetchReach-v1': {
'n_cycles': 10,
},
'boxpush-v0': {
'n_cycles': 10,
},
}
DEFAULT_PARAMS = {
'env_id': 'FetchReach-v1', # Try HalfCheetah-v2 for plain DDPG, FetchReach-v1 for HER
'do_evaluation': True,
'render_eval': False,
'render_training': False,
'seed': 42,
'train_policy_fn': 'epsilon_greedy_noisy_explore',
'eval_policy_fn': 'greedy_exploit',
'agent_roles': 'exploit, explore', # choices are 'explore, explore', 'exploit', and 'explore'
'memory_type': 'replay_buffer', # choices are 'replay_buffer' or 'ring_buffer'. 'ring_buffer' can't be used with HER.
'heatmaps': False, # generate heatmaps if using a gym-boxpush or FetchStack environment
'boxpush_heatmaps': False, # old argument, doesnt do anything, remaining to not break old scripts
# networks
'exploit_Q_lr': 0.001, # critic learning rate
'exploit_pi_lr': 0.001, # actor learning rate
'explore_Q_lr': 0.001, # critic learning rate
'explore_pi_lr': 0.001, # actor learning rate
'dynamics_lr': 0.007, # dynamics module learning rate
'exploit_polyak_tau': 0.001, # polyak averaging coefficient (target_net = (1 - tau) * target_net + tau * main_net)
'explore_polyak_tau': 0.05, # polyak averaging coefficient (target_net = (1 - tau) * target_net + tau * main_net)
'exploit_gamma': 'auto', # 'auto' or floating point number. If auto, gamma is 1 - 1/episode_time_horizon
'explore_gamma': 'auto', # 'auto' or floating point number. If auto, gamma is 1 - 1/episode_time_horizon
'episode_time_horizon': 'auto', # 'auto' or int. If 'auto' T is inferred from env._max_episode_steps
# training
'buffer_size': int(1E6), # for experience replay
'n_epochs': 25,
'n_cycles': 50, # per epoch
'n_batches': 40, # training batches per cycle
'batch_size': 1024, # per mpi thread, measured in transitions and reduced to even multiple of chunk_length.
'rollout_batches_per_cycle': 8,
'rollout_batch_size': 1, # number of per mpi thread
'n_test_rollouts': 50, # number of test rollouts per epoch, each consists of rollout_batch_size rollouts
'noise_eps' : 0.2,
'random_eps' : 0.3,
# HER
'use_her': True,
'replay_strategy': 'future', # supported modes: future, none
'replay_k': 4, # number of additional goals used for replay, only used if off_policy_data=future
'sub_goal_divisions': 'none',
# Save and Restore
'save_at_score': .98, # success rate for HER, mean reward per episode for DDPG
'stop_at_score': 'none', # success rate for HER, mean reward per episode for DDPG
'save_checkpoints_at': 'none',
'restore_from_ckpt': 'none',
'do_demo_only': False,
'demo_video_recording_name': 'none',
# GPU Usage Overrides
'cuda' : False,
'num_gpu' : 'none',
}
CACHED_ENVS = {}
def cached_make_env(make_env):
"""
Only creates a new environment from the provided function if one has not yet already been
created. This is useful here because we need to infer certain properties of the env, e.g.
its observation and action spaces, without any intend of actually using it.
"""
if make_env not in CACHED_ENVS:
env = make_env()
CACHED_ENVS[make_env] = env
return CACHED_ENVS[make_env]
def prepare_params(kwargs):
env_id = kwargs['env_id']
def make_env():
return gym.make(env_id)
kwargs['make_env'] = make_env
tmp_env = cached_make_env(kwargs['make_env'])
kwargs['T'] = kwargs['episode_time_horizon']
del kwargs['episode_time_horizon']
if kwargs['T'] == 'auto':
assert hasattr(tmp_env, '_max_episode_steps')
kwargs['T'] = tmp_env._max_episode_steps
else:
kwargs['T'] = int(kwargs['T'])
tmp_env.reset()
if kwargs['use_her'] is False:
# If HER is disabled, disable other HER related params.
kwargs['replay_strategy'] = 'none'
kwargs['replay_k'] = 0
if 'BoxPush' not in kwargs['env_id'] and 'FetchStack' not in kwargs['env_id']:
kwargs['heatmaps'] = False
for gamma_key in ['exploit_gamma', 'explore_gamma']:
kwargs[gamma_key] = 1. - 1. / kwargs['T'] if kwargs[gamma_key] == 'auto' else float(kwargs[gamma_key])
# if kwargs['map_dynamics_loss'] and 'BoxPush' in kwargs['env_id'] and 'explore' in kwargs['agent_roles']:
# kwargs['dynamics_loss_mapper'] = DynamicsLossMapper(
# working_dir=os.path.join(logger.get_dir(), 'dynamics_loss'),
# sample_env=cached_make_env(kwargs['make_env'])
# )
# else:
# kwargs['dynamics_loss_mapper'] = None
# for network in ['exploit', 'explore']:
# # Parse noise_type
# action_noise = None
# param_noise = None
# nb_actions = tmp_env.action_space.shape[-1]
# for current_noise_type in kwargs[network+'_noise_type'].split(','):
# current_noise_type = current_noise_type.strip()
# if current_noise_type == 'none':
# pass
# elif 'adaptive-param' in current_noise_type:
# _, stddev = current_noise_type.split('_')
# param_noise = AdaptiveParamNoiseSpec(initial_stddev=float(stddev), desired_action_stddev=float(stddev))
# elif 'normal' in current_noise_type:
# _, stddev = current_noise_type.split('_')
# action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))
# elif 'ou' in current_noise_type:
# _, stddev = current_noise_type.split('_')
# action_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(nb_actions),
# sigma=float(stddev) * np.ones(nb_actions))
# else:
# raise RuntimeError('unknown noise type "{}"'.format(current_noise_type))
# kwargs[network+'_action_noise'] = action_noise
# kwargs[network+'_param_noise'] = param_noise
# del(kwargs[network+'_noise_type'])
#TODO
kwargs['train_rollout_params'] = {
'compute_Q': False,
'render': kwargs['render_training']
}
kwargs['eval_rollout_params'] = {
'compute_Q': True,
'render': kwargs['render_eval']
}
# if kwargs['mix_extrinsic_intrinsic_objectives_for_explore'] == 'none':
# kwargs['mix_extrinsic_intrinsic_objectives_for_explore'] = None
# else:
# weights_string = kwargs['mix_extrinsic_intrinsic_objectives_for_explore']
# kwargs['mix_extrinsic_intrinsic_objectives_for_explore'] = [float(w) for w in weights_string.split(',')]
# assert len(kwargs['mix_extrinsic_intrinsic_objectives_for_explore']) == 2
if kwargs['restore_from_ckpt'] == 'none':
kwargs['restore_from_ckpt'] = None
if kwargs['stop_at_score'] == 'none':
kwargs['stop_at_score'] = None
else:
kwargs['stop_at_score'] = float(kwargs['stop_at_score'])
if kwargs['sub_goal_divisions'] == 'none':
kwargs['sub_goal_divisions'] = None
else:
sub_goal_string = kwargs['sub_goal_divisions']
sub_goal_divisions = ast.literal_eval(sub_goal_string)
assert type(sub_goal_divisions) == list
for list_elem in sub_goal_divisions:
assert type(list_elem) == list
for index in list_elem:
assert type(index) == int
kwargs['sub_goal_divisions'] = sub_goal_divisions
# if kwargs['split_gpu_usage_among_device_nums'] == 'none':
# kwargs['split_gpu_usage_among_device_nums'] = None
# else:
# gpu_string = kwargs['split_gpu_usage_among_device_nums']
# gpu_nums = ast.literal_eval(gpu_string)
# assert len(gpu_nums) >= 1
# for gpu_num in gpu_nums:
# assert type(gpu_num) == int
# kwargs['split_gpu_usage_among_device_nums'] = gpu_nums
# original_COMM_WORLD_rank = MPI.COMM_WORLD.Get_rank()
# kwargs['explore_comm'] = MPI.COMM_WORLD.Split(color=original_COMM_WORLD_rank % kwargs['num_model_groups'],
# key=original_COMM_WORLD_rank)
if kwargs['save_checkpoints_at'] == 'none':
kwargs['save_checkpoints_at'] = None
else:
save_checkpoints_list = ast.literal_eval(kwargs['save_checkpoints_at'])
assert type(save_checkpoints_list) == list
for i in range(len(save_checkpoints_list)):
save_checkpoints_list[i] = float(save_checkpoints_list[i])
kwargs['save_checkpoints_at'] = save_checkpoints_list
if kwargs["demo_video_recording_name"] == 'none':
kwargs["demo_video_recording_name"] = None
else:
assert type(kwargs["demo_video_recording_name"]) == str
return kwargs
def log_params(params, logger=logger):
for key in sorted(params.keys()):
logger.info('{}: {}'.format(key, params[key]))
def get_convert_arg_to_type_fn(arg_type):
if arg_type == bool:
def fn(value):
if value in ['None', 'none']:
return None
if value in ['True', 'true', 't', '1']:
return True
elif value in ['False', 'false', 'f', '0']:
return False
else:
raise ValueError("Argument must either be the string, \'True\' or \'False\'")
return fn
elif arg_type == int:
def fn(value):
if value in ['None', 'none']:
return None
return int(float(value))
return fn
elif arg_type == str:
return lambda arg: arg
else:
def fn(value):
if value in ['None', 'none']:
return None
return arg_type(value)
return fn
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
def dims_to_shapes(input_dims):
return {key: tuple([val]) if val > 0 else tuple() for key, val in input_dims.items()}
def create_agents(input_dims, env, params):
agent_roles = params['agent_roles'].replace(' ', '').split(',')
agents = OrderedDict()
if 'exploit' in agent_roles:
role = 'exploit'
agent = DDPG(role=role, input_dims=input_dims, env=env, params=params, external_critic_fn=None)
agents[role] = agent
# exploit_critic_fn = agent.critic_with_actor_fn
logger.info('Using ' + role + ' agent.')
else:
exploit_critic_fn = None
if 'explore' in agent_roles:
role = 'explore'
agent = DDPG(role=role, input_dims=input_dims, env=env, params=params)
agents[role] = agent
logger.info('Using ' + role + ' agent.')
return agents
|
from iridauploader.parsers.miseq.parser import Parser
|
# Copyright (c) 2017, Daniele Venzano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The high-level interface that Zoe uses to talk to the configured container backend."""
from typing import Dict
from zoe_lib.config import get_conf
from zoe_lib.state import Service, Execution
from zoe_master.backends.proxy import gen_proxypath, JUPYTER_NOTEBOOK, MONGO_EXPRESS, JUPYTER_PORT, MONGO_PORT
from zoe_master.exceptions import ZoeStartExecutionFatalException
from zoe_master.workspace.filesystem import ZoeFSWorkspace
def gen_environment(execution: Execution, service: Service, env_subst_dict: Dict):
""" Generate a dictionary containing the current cluster status (before the new container is spawned)
This information is used to substitute template strings in the environment variables."""
env_list = []
for env_name, env_value in service.environment:
try:
env_value = env_value.format(**env_subst_dict)
except KeyError:
error_msg = "Unknown variable in environment expression '{}', known variables are: {}".format(env_value, list(env_subst_dict.keys()))
service.set_error(error_msg)
raise ZoeStartExecutionFatalException("Service {} has wrong environment expression")
env_list.append((env_name, env_value))
# FIXME this code needs to be removed/changed to be generic
#if 'jupyter' in service.image_name:
env_list.append((JUPYTER_NOTEBOOK, gen_proxypath(execution, service) + '/' + JUPYTER_PORT))
#elif 'mongo-express' in service.image_name:
env_list.append((MONGO_EXPRESS, gen_proxypath(execution, service) + '/' + MONGO_PORT))
env_list.append(('EXECUTION_ID', str(execution.id)))
env_list.append(('DEPLOY_NAME', get_conf().deployment_name))
env_list.append(('UID', execution.user_id))
env_list.append(('SERVICE_NAME', service.name))
env_list.append(('PROXY_PATH', get_conf().proxy_path))
fswk = ZoeFSWorkspace()
env_list.append(('ZOE_WORKSPACE', fswk.get_mountpoint()))
return env_list
def gen_volumes(service_: Service, execution: Execution):
"""Return the list of default volumes to be added to all containers."""
vol_list = []
fswk = ZoeFSWorkspace()
wk_vol = fswk.get(execution.user_id)
vol_list.append(wk_vol)
return vol_list
def gen_labels(service: Service, execution: Execution):
"""Generate container labels, useful for identifying containers in monitoring systems."""
return {
'zoe_execution_name': execution.name,
'zoe_execution_id': str(execution.id),
'zoe_service_name': service.name,
'zoe_service_id': str(service.id),
'zoe_owner': execution.user_id,
'zoe_deployment_name': get_conf().deployment_name,
'zoe_type': 'app_service'
}
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-09-07 12:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Categoria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('categoria', models.CharField(max_length=50, unique=True)),
('criado', models.DateField(auto_now_add=True)),
],
options={
'verbose_name': 'Categoria',
'verbose_name_plural': 'Categorias',
},
),
migrations.CreateModel(
name='Produto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('produto', models.CharField(max_length=100, unique=True, verbose_name=b'Produto')),
('importado', models.BooleanField(default=False, verbose_name=b'Importado')),
('preco_venda', models.DecimalField(decimal_places=2, max_digits=7, verbose_name=b'Pre\xc3\xa7o De Venda')),
('preco_compra', models.DecimalField(decimal_places=2, max_digits=7, verbose_name=b'Pre\xc3\xa7o De Compra')),
('ipi', models.DecimalField(blank=True, decimal_places=2, max_digits=3, verbose_name=b'IPI')),
('estoque', models.IntegerField(default=0, verbose_name=b'Estoque atual')),
('estoque_min', models.PositiveIntegerField(default=0, verbose_name=b'Estoque m\xc3\xadnimo')),
('descricao', models.CharField(max_length=300, verbose_name=b'Observa\xc3\xa7\xc3\xa3o')),
('data_de_cadastro', models.DateField(auto_now_add=True)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='estoque.Categoria', verbose_name=b'categoria')),
],
options={
'verbose_name': 'Produto',
'verbose_name_plural': 'Produtos',
},
),
]
|
# Programa que leia gerencie o aproveitamento de um jogador de futebol, leia o nome do jogador e quantas partidas ele jogou.
# Depois vai ler a quantidade de gols feitas em cada partida, No final tudo isso sera mostrado em um dicionario,
# incluindo o total de gols feitos durante o campeonato
dados = {'nome': str(input('Digite o nome do jogador: ')).strip().title()}
qtd = int(input(f'Quantas partidas {dados["nome"]} jogou? '))
gols = []
for c in range(0, qtd):
gols.append(int(input(f' Quantos gols na partida {c}: ')))
dados['gols'] = gols[:]
dados['total'] = sum(gols)
print('=-'*20)
print(dados)
print('=-'*20)
for v, n in dados.items():
print(f'O campo {v} tem valor {n}')
print('=-'*20)
print(f'O jogador {dados["nome"]} jogou {len(dados["gols"])} jogos: ')
for i, v in enumerate(dados["gols"]):
print(f' =>na partida {i} fez {v} gols.')
print(f'Foi um total de {dados["total"]} gols')
|
import json
from eth_typing import (
BLSPubkey,
BLSSignature,
)
from py_ecc.bls import G2ProofOfPossession as bls
from eth2deposit.utils.ssz import (
compute_domain,
compute_signing_root,
Deposit,
DepositMessage,
)
from eth2deposit.utils.constants import (
DOMAIN_DEPOSIT,
MAX_DEPOSIT_AMOUNT,
MIN_DEPOSIT_AMOUNT,
)
def verify_deposit_data_json(filefolder: str) -> bool:
with open(filefolder, 'r') as f:
deposit_json = json.load(f)
return all([verify_deposit(deposit) for deposit in deposit_json])
return False
def verify_deposit(deposit_data_dict: dict) -> bool:
'''
Checks whether a deposit is valid based on the eth2 rules.
https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/beacon-chain.md#deposits
'''
pubkey = BLSPubkey(bytes.fromhex(deposit_data_dict['pubkey']))
withdrawal_credentials = bytes.fromhex(deposit_data_dict['withdrawal_credentials'])
amount = deposit_data_dict['amount']
signature = BLSSignature(bytes.fromhex(deposit_data_dict['signature']))
deposit_data_root = bytes.fromhex(deposit_data_dict['signed_deposit_data_root'])
# Verify deposit amount
if not MIN_DEPOSIT_AMOUNT < amount <= MAX_DEPOSIT_AMOUNT:
return False
# Verify deposit signature && pubkey
deposit_message = DepositMessage(pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount)
domain = compute_domain(domain_type=DOMAIN_DEPOSIT)
signing_root = compute_signing_root(deposit_message, domain)
if not bls.Verify(pubkey, signing_root, signature):
return False
# Verify Deposit Root
deposit = Deposit(pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount, signature=signature)
return deposit.hash_tree_root == deposit_data_root
|
"""
Same as Server.py, but instead of discrete values (left, right, forward), that is being
stored, it is steering angles between -15 to +15
"""
from time import gmtime, strftime
import gzip
import sys
import json
sys.path.insert(0,'/home/pi/SDC_Project/RaspberryPi/Hardware')
from _thread import *
import time;
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import numpy as np
import os
import socket
from TwoMotorDriver import TwoMotorDriver
from Motor import Motor
from GpioMode import GpioMode
from SteerMotor import ServoSteerMotor
from BackMotor import BackMotor
from Commands import Commands
from Framelet import Framelet
import queue
import pickle
import os;
import datetime as dt
import argparse
from enum import Enum
class CollectingDataType(Enum):
DiscreteTurns = 0,
SteerAngles = 1
class CollectingTrainingData:
def __init__(self, steerMotorCenterAngle, backMotor, steerMotor, dataFilepath):
# self.collectingDataType = collectingDataType.value;
self.tripQueue = queue.Queue() #FIFO (first in, first out)
self.isMovingForward = False
self.speed = 35; #TODO: Added because not using Driver class
self.isPaused = False;
self.pickleSaveCount = 1;
# self.pickle_filename = "/RoboticFiles/training_directory/data_{}.pickle"
self.dataFilepath = dataFilepath;
self.pickle_filename = os.path.join(self.dataFilepath, 'training_data/data_{}')
# self.pickle_filename = "/home/pi/PiRoboticFiles/SteeringAngleData/training_data/data_{}"
# self.discardPickle_filename = "/RoboticFiles/discarded_directory/data_{}.pickle"
self.discardPickle_filename = os.path.join(self.dataFilepath, 'discarded_data/discarded_{}')
# self.discardPickle_filename = "/home/pi/PiRoboticFiles/SteeringAngleData/discarded_data/discarded_{}"
self.serverRunning = False;
self.frameCount = 1
self.startClicks = 0;
self.steerMotorCenterAngle = steerMotorCenterAngle #the angle of servo where the steering wheels are centered (usually around 125) (range of 0 - 180)
# Range of -10 to +10, This is stored in training data, and input param as steerMotor.setDegree()
self.steerDegree = 0 #holds the current steer degree
self.backMotor = backMotor
self.steerMotor = steerMotor
# self.InitMotors(steerMotorCenterAngle)
self.InitServer()
def getTimeStamp(self):
return dt.datetime.fromtimestamp(time.time()).strftime('%Y.%m.%d %H.%M.%S')
def InitServer(self):
self.HOST = '' # Symbolic name meaning all available interfaces
self.PORT = 5000 # Arbitrary non-privileged port #TODO: try change port from 5000 to diff. - flask uses port 5000
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Allow socket reuse
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
self.sock.bind((self.HOST, self.PORT))
except socket.error as msg: # TODO: Change to python2.7 version if error - socket.error, msg
print('Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])
sys.exit()
self.sock.listen(10)
# def InitMotors(self, steerMotorCenterAngle):
# #NOTE: These are the old ones, before changing the wires of BM in Motor Driver to the Steer Motor inputs (input 1 and 2)
# # BM_ForwardPin = 13
# # BM_ReversePin = 15
# # BM_pwmPin = 12
# BM_ForwardPin = 11
# BM_ReversePin = 7
# BM_frequency = 60
# BM_pwmPin = 16
#
# SM_pwmPin = 3
# SM_frequency = 50
#
# self.backMotor = BackMotor(Motor(BM_ForwardPin, BM_ReversePin, BM_pwmPin, BM_frequency))
# self.steerMotor = ServoSteerMotor(SM_pwmPin, GpioMode.BOARD, steerMotorCenterAngle, SM_frequency)
# self.steerMotor.setDegree(0) #center it
def StartServer(self):
self.serverRunning = True;
try:
while self.serverRunning:
# initialize socket server
self.sock.listen(10)
print("waiting")
conn, addr = self.sock.accept()
print('Connected with ' + addr[0] + ':' + str(addr[1]))
self.listen(conn,self.tripQueue)
except KeyboardInterrupt:
pass
finally:
print("exiting")
try:
self._stopAllMotors()
self.backMotor.cleanup()
self.steerMotor.cleanup()
except:
pass
self.sock.close()
time.sleep(0.1) # wait a little for threads to finish
#TODO: Stop recording and save current data
#TODO: Add way to wait for thread to finish then close program
def record(self):
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (320, 240)
camera.framerate = 30
rawCapture = PiRGBArray(camera, size=(320, 240))
t0 = time.time()
try:
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# raw NumPy array representing the image
image = frame.array
if self.isPaused == False: #only add framelet when not paused training
if(self.isMovingForward): #only save the data that where car is moving
self.tripQueue = self.AssembleWithSteerDegree(self.tripQueue, image, self.steerDegree,self.frameCount)
#check if should save data
if(self.frameCount % 125 == 0): #300frames is 10 seconds #125 is 1/4 the track
print("self.tripQueue size = {}".format(self.tripQueue.qsize()))
t1 = time.time()
# tempQ = queue.Queue() #make copy of queue to save
# for i in self.tripQueue.queue: #transfer elements to new queue
# tempQ.put(i)
tripList = [item for item in self.qdumper(self.tripQueue)]
start_new_thread(self.SaveToPickle,(tripList,self.pickle_filename.format(self.getTimeStamp()),));
# start_new_thread(self.SaveCompressedToPickle, (tempQ, self.pickle_filename.format(self.getTimeStamp()),));
self.pickleSaveCount+=1
with self.tripQueue.mutex: #thread safe for clearing
self.tripQueue.queue.clear()
print("\t{0:.2f} Seconds for 300 Frames".format(t1-t0))
# show image on screen
# cv2.imshow('i', image)
if (self.isMovingForward):
self.frameCount += 1
# clear the stream in preparation for the next frame -- IMPORTANT
rawCapture.truncate(0)
# if cv2.waitKey(1) & 0xFF == ord("q"):
# break
# except Exception as e:
# print("---EXCEPTION OCCURED-----\n{}".format(e))
finally:
cv2.destroyAllWindows()
camera.close()
self.sock.close()
print("closing recording")
def qdumper(self,q):
for i in q.queue:
yield i
## DONT COMPRESS --> TO SLOW...
def SaveCompressedToPickle(self, tripQueue, filename):
print("-----STARTING TO PICKLE COMPRESSED-----------")
t0 = time.time()
file = gzip.GzipFile(filename, 'wb')
remaining = [item for item in self.qdumper(tripQueue)] # convert to list, not queue, b/c pickle can't save a queue
pickle.dump(remaining, file, 4) # 4 - protocol that is the latest protocol
file.close()
t1 = time.time()
print('---------FINISHED PICKLING COMPRESSED-----------\n{0:.2f}seconds'.format(t1 - t0))
def SaveToPickle(self, tripList, filename):
print("-----STARTING TO PICKLE-----------")
t0 = time.time()
pickle_out = open(filename + '.pickle', "wb")
# file = gzip.GzipFile(filename,'wb')
#framelet_list file to upload = remaining
# remaining = [item for item in self.qdumper(tripQueue)] #convert to list, not queue, b/c pickle can't save a queue
print("'remaining' array size: {}".format(len(tripList)))
pickle.dump(tripList,pickle_out, 4)
# pickle.dump(remaining,file,-1) #-1 protocol gets the latest protocol
pickle_out.close()
#filename + '-frameCount-{}'.format(len(remaining)) +'_ready.pickle'
newFileName = "{0}-frameCount-{1:03}_ready.pickle".format(filename, len(tripList)) #newFileNameFormat = data_2017.11.16 21.55.25-frameCount-125_ready.pickle
os.rename(filename + '.pickle',newFileName)
# file.close()
t1 = time.time()
print('---------FINISHED PICKLING-----------\n{0:.2f}seconds'.format(t1-t0))
'''Throws a ValueError: setting an array elemetn with a sequence'''
def SaveToNpz(self, tripQueue, directory):
print("--------saving to npz--------")
t0 = time.time()
frameList = []
cmdList = []
frameNameList = []
print("tripQueue.size = {}".format(tripQueue.qsize()))
for i in tripQueue.queue:
frameNameList.append(i.frameName)
frameList.append(i.frame)
cmdList.append(i.cmd)
print("frameList length = {}".format(len(frameList)))
#save data
file_name = strftime("%m.%d.%Y_%H.%M", gmtime())
if not os.path.exists(directory):
os.makedirs(directory)
try:
np.savez(directory + '/' + file_name + '.npz', frameName=frameNameList, frame=frameList,
cmd=cmdList);
t1 = time.time()
print("------Saved data to npz file!------- {0:.2f}ms".format((t1-t0)*1000))
except IOError as e:
print(e)
def _stopAllMotors(self):
self.backMotor.Stop()
self.steerDegree = 0
self.steerMotor.turn(self.steerDegree*10) # reset to center
self.steerMotor.offMotor() # off motor
def _bytesToString(self, input):
input = input.decode("utf-8")
return input
def AssembleWithCmd(self, tripQueue, image, cmd, frameCount):
name = "frame_{}".format(frameCount)
tripQueue.put(Framelet(name, image, cmd))
return tripQueue
#TODO: Bug: cmds is a list of cmds sometimes (when the SS is sent fast), so i shoudld be adding all of them, but can only add the last cmd in the list. If want to add the whole thing
#TODO: then i must make Listen() a totally different thread altogether and should be called once and the UpdateFramelet() should be called from there
def AssembleWithSteerDegree(self, tripQueue, image, steerDegree, frameCount):
name = "frame_{}".format(frameCount)
tripQueue.put(Framelet(name, image, steerDegree=steerDegree))
return tripQueue
def ExecuteCommand(self, data, tripQueue):
'''
executes the commands from data recieved in socket server, also
changes boolean values representing the movement of car
:param data: data recieved from socket server
'''
# cmds = []
if data is None:
return
for i in range(len(data) - 1):
if data[i] == ',': # STOP ALL MOTORS
# self.driver.StopAll()
self._stopAllMotors()
print("stop all")
# cmds.append(Commands.STOP_ALL_MOTORS.value)
self.isMovingForward = False
elif data[i][0:2] == 'SS':
self.speed = int(data[i][2:]) # get second half of data with the number for speed
print("Speed: {}".format(self.speed))
if (self.isMovingForward):
self.backMotor.Forward(self.speed)
# elif (self.isMovingBackward):
# self.twoMotorDriver.Reverse(self.speed)
# self.driver.Speed(int(speed))
#todo: add speed command to training data
elif data[i][0:2] == 'SA': #Steering Angle Change
self.steerDegree = int(data[i][2:]) #get second half of data with the number for steer angle
self.steerMotor.turn(self.steerDegree*10)
# self.steerMotor.offMotor()
print("Steer Degree: {}".format(self.steerDegree))
elif data[i] == 'FF':
print("forward")
self.backMotor.Forward(self.speed)
self.isMovingForward = True;
# elif data[i] == 'BB':
# print("Back")
# self.backMotor.Reverse(self.speed)
# self.isMovingForward = False
elif data[i] == '@': #PAUSE Training
self.isPaused = True #Pauses videorecorder
# self.driver.StopAll()
self._stopAllMotors()
print("------PAUSED---")
elif data[i] == '#': #UNPAUSE TRAINING
self.isPaused = False; #unpauses videorecorder
print("------UNPAUSED---")
elif data[i] == '*': #SAVE Training - sent after paused
t0 = time.time()
print("tripQueue size = {}".format(tripQueue.qsize()))
# tempQ = queue.Queue() # make copy of queue to save
# for i in tripQueue.queue: # transfer elements to new queue
# tempQ.put(i)
tripList = [item for item in self.qdumper(self.tripQueue)];
start_new_thread(self.SaveToPickle,(tripList, self.pickle_filename.format(self.getTimeStamp()),));
self.pickleSaveCount += 1
with tripQueue.mutex: # thread safe for clearing
tripQueue.queue.clear()
t1 = time.time()
print("\t{0:.2f} Seconds for 300 Frames".format(t1 - t0))
elif data[i] == '&': #DISCARD Training
t0 = time.time()
print("--------DISCARDING DATA ----\ntripQueue size = {}".format(tripQueue.qsize()))
# tempQ = queue.Queue() # make copy of queue to save
# for i in tripQueue.queue: # transfer elements to new queue
# tempQ.put(i)
#start_new_thread(self.SaveToPickle,(tempQ, self.discardPickle_filename.format(self.getTimeStamp()),));
#DELETE DISCARDED QUEUE
with tripQueue.mutex: # thread safe for clearing
tripQueue.queue.clear()
# self.pickleSaveCount += 1
t1 = time.time()
print("\t{0:.2f} Seconds for 500 Frames".format(t1 - t0))
elif data[i] ==':': #START TRAINING
self.startClicks += 1
# self.isMovingForward = True; #TODO; should i do this?????
if(self.startClicks == 1): #if first time pressing start
start_new_thread(self.record, ()) #start new recording thread
else:
self.isPaused = False; # unpauses videorecorder
print("------UNPAUSED---FROM START CMD")
else:
print("Bad Command")
# return cmds
def listen(self, conn, tripQueue):
while True:
data = conn.recv(1024)
if not data:
print("socket closed")
break;
data = self._bytesToString(data)
#split by delimeter
data = data.split(';\n')
self.ExecuteCommand(data, tripQueue)
def saveRecentSteerMotorCenterAngle(steerAngle, file):
try:
data = {"RecentSteerServoMotorAngle": steerAngle};
with open(file, 'w') as f:
json.dump(data, f, indent=4, separators=(',', ': '));
except Exception as e:
print("Error while saving RecentSteerServoMotorAngle to JSON, could not save")
if __name__ == "__main__":
jsonFilePath = '/home/pi/PiRoboticFiles/CollectingTrainingData/ExtraInfo.json'
#get most recent steer servo motor angle from JSON file
with open(jsonFilePath) as json_file:
data = json.load(json_file)
recentServoMotorAngle = int(data['RecentSteerServoMotorAngle'])
BM_ForwardPin = 11
BM_ReversePin = 7
BM_pwmPin = 16
BM_frequency = 60
SM_pwmPin = 3
SM_frequency = 50
backMotor = BackMotor(Motor(BM_ForwardPin, BM_ReversePin, BM_pwmPin, BM_frequency))
steerMotor = ServoSteerMotor(SM_pwmPin, GpioMode.BOARD, recentServoMotorAngle, SM_frequency)
# steerMotor.setDegree(0) # center it
newSteerMotorCenterAngle = recentServoMotorAngle;
while True:
msg = "Test the Steer Angle for Center. Enter a steer Angle: (most recent: {})".format(recentServoMotorAngle)
inputSteerAngle = int(input(msg))
steerMotor.setCenterAngle(inputSteerAngle)
steerMotor.turn(0)
time.sleep(1.5)
goodBad = input("Is that good or bad? (Good=g or Bad=b'") #input g or b
if(goodBad == 'g'):
newSteerMotorCenterAngle = inputSteerAngle;
saveRecentSteerMotorCenterAngle(newSteerMotorCenterAngle, jsonFilePath)
break;
elif(goodBad !='b'): #not valid input
print("Bad input, Type 'g' or 'b' for good/bad")
ctd = CollectingTrainingData(newSteerMotorCenterAngle, backMotor, steerMotor, "/home/pi/PiRoboticFiles/SteeringAngleData/")
ctd.StartServer()
|
import unittest
import repackage
repackage.up()
import four.bot.helpers
class TestBotHelpers(unittest.TestCase):
def test_get_json(self):
expected = {
"SECRET_TEST1": "abc",
"SECRET_TEST2": "123",
}
self.assertEqual(four.helpers.get_json("test_json.json"), expected)
|
import os
import zipfile
from django.conf import settings
import datetime
from django import http
from django.core.files import File
from django.http import HttpResponse
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
import uuid
import StringIO
from django.template.loader import get_template
from django.template import Context
from operations.models import Document
def zip_generated_files(file_list=[]):
if not len(file_list) > 0:
# create pdfs for testing
x = 1
while x < 3:
filename = '%s_sample_%s.pdf' % (x,str(uuid.uuid4())[:5])
file_list.append(filename)
write_save_pdf('pdf_sample.html',{}, filename)
x +=1
zip_location = '/tmp/Python_%s.zip' % str(uuid.uuid4())[:5]
zip = zipfile.ZipFile(zip_location, 'a')
for file_name in file_list:
location = "/tmp/%s"%file_name
if os.path.exists(location):
zip.write(location, os.path.basename(location))
os.remove(location)
zip.close()
upload_file = UploadFile.objects.create(model_type="ZIP",
description="Upoaded zip file",
created_at=datetime.datetime.now())
upload_file.file_name = File(open(zip_location, 'rd'))
upload_file.save()
os.remove(zip_location)
def file_download(file, document=None, content_type='application/force-download'):
document = document or file.document.name
response = response = HttpResponse(open(settings.MEDIA_ROOT + "/" + document),
content_type=content_type)
response['Content-Disposition'] = 'attachment; filename="%s"' % os.path.basename(document)
return response
def save_file(user, name, file_path, file_type="Download", description=None):
upload_file = Document.objects.create(file_type=file_type,
description=description,
created_at=datetime.datetime.now(),
created_by=user)
upload_file.document_name = name
upload_file.document.name = 'uploads/documents/%s' % name
upload_file.save()
return upload_file
|
import copy
import json
import os
import pdb
import re
from typing import Dict, List, TypeVar
import torch
from elvis.modeling.models import build_net
from elvis.modeling.models.layers import FC
from torch.nn import functional as F
from .base import MetaArch
from .build import ARCH_REGISTRY
Tensor = TypeVar('torch.tensor')
__all__ = ['MetaRetrieval',
'build_meta_retrieval']
class MetaRetrieval(MetaArch):
def __init__(self,
model,
max_patches,
max_tokens):
super(MetaArch, self).__init__()
self.model = model
self.max_patches = max_patches
self.max_tokens = max_tokens
self.itm_fc = FC(in_features=self.model.embed_dim, out_features=2)
def forward(self, vis_in, txt_in, vis_mask, txt_mask, **kwargs):
out = self.model(vis_in=vis_in, vis_mask=vis_mask, txt_in=txt_in, txt_mask=txt_mask)
t_pool = out[:, 0]
#v_pool = out[:, self.max_tokens]
logits = self.itm_fc(t_pool)
return {'retrieval_logits': logits}
def compute_loss(self, vqa_logits, gt_answers, **kwargs) -> Dict:
pass #todo
"""
vqa_loss = F.binary_cross_entropy_with_logits(vqa_logits, gt_answers, reduction='none')
vqa_loss = vqa_loss.sum(dim=-1).mean()
return {'loss': vqa_loss}
"""
def save_on_disk(self, path):
#save vocab only once
vocab_ckp = os.path.join(path, 'VQA.vocab')
if not os.path.exists(vocab_ckp):
with open(vocab_ckp, 'w') as fp:
json.dump(self.ans2id, fp)
#use deepcopy to avoid problems with DistributedDataParallel
state_dict = copy.deepcopy(self).cpu().state_dict()
ckp_file = os.path.join(path, 'state_dict.pt')
torch.save(state_dict, ckp_file)
def predict(self, vis_in, txt_in, vis_mask, txt_mask, **kwargs):
out = self.forward(vis_in, txt_in, vis_mask, txt_mask, **kwargs)
probs = F.softmax(out['retrieval_logits'], dim=-1).squeeze(0)
#return score of item being the true one
if probs.dim() > 1:
scores = [p[1].item() for p in probs]
return scores
score = probs[1].item()
return score
@ARCH_REGISTRY.register()
def build_meta_retrieval(cfg, **kwargs):
model, data_interface = build_net(cfg.MODEL, get_interface='retrieval')
vqa = MetaRetrieval(model,
max_patches=cfg.MODEL.MAX_VIS_PATCHES,
max_tokens=cfg.MODEL.MAX_N_TOKENS)
return vqa, data_interface
|
import pygame, sys, time, random
#Sarah Maurice found this online.
#Modified by nholtschulte
def getRandX():
return random.randint(0, x_increments) * block_size
def getRandY():
return random.randint(0, y_increments) * block_size
block_size = 40
width = 1000
height = 600
x_increments = width/block_size - 1
y_increments = height/block_size - 1
fps = pygame.time.Clock()
direction = "RIGHT" # Initial direction
snake_position = [block_size, block_size] # Initial snake position
snake_body = [[100, 50], [90, 50], [100, 50]]
# It places the food randomly, excluding the border
food_position = [getRandX(), getRandY()]
food_spawn = True
# Game surface
player_screen = pygame.display.set_mode((width, height))
# Will define the colors
red = pygame.Color("red")
green = pygame.Color("green")
black = pygame.Color("black")
orange = pygame.Color("orange")
white = pygame.Color("white")
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
quiting()
elif event.type == pygame.KEYDOWN:
# Choose direction by user input, block opposite directions
key_right = event.key == pygame.K_RIGHT or event.key == ord("d")
key_left = event.key == pygame.K_LEFT or event.key == ord("a")
key_down = event.key == pygame.K_DOWN or event.key == ord("s")
key_up = event.key == pygame.K_UP or event.key == ord("w")
if key_right and direction != "LEFT":
direction = "RIGHT"
elif key_left and direction != "RIGHT":
direction = "LEFT"
elif key_down and direction != "UP":
direction = "DOWN"
elif key_up and direction != "DOWN":
direction = "UP"
elif event.key == pygame.K_ESCAPE:
done = True # It will quit when esc is pressed
# Simulates the snake movement(together with snake_body_pop)
if direction == "RIGHT":
snake_position[0] += block_size
elif direction == "LEFT":
snake_position[0] -= block_size
elif direction == "DOWN":
snake_position[1] += block_size
elif direction == "UP":
snake_position[1] -= block_size
# Body mechanics
snake_body.insert(0, list(snake_position))
if snake_position == food_position:
food_spawn = False # It removes the food from the board
else:
# If the food is taken it will not remove the last body piece(raising snakes size)
snake_body.pop()
if food_spawn is False: # When a food is taken it will respawn randomly
food_position = [getRandX(), getRandY()]
food_spawn = True # It will set the food to True again, to keep the cycle
# Drawing
player_screen.fill(white) # Set the background to white
for position in snake_body: # Snake representation on the screen
pygame.draw.rect(player_screen, green, pygame.Rect(position[0], position[1], block_size, block_size))
# Food representation on the screen
pygame.draw.rect(player_screen, orange, pygame.Rect(food_position[0], food_position[1], block_size, block_size))
if snake_position[0]<0 or snake_position[0]>width or snake_position[1]<0 or snake_position[1]>height:
done = True # Game over when the Snake hit a wall
for block in snake_body[1:]:
if snake_position == block:
done = True # Game over when the Snake hits itself
pygame.display.flip() # It constantly updates the screen
fps.tick(20) # It sets the speed to a playable value
pygame.quit()
sys.exit()
|
import functools
import logging
import os
from kivy.app import App
from kivy.properties import StringProperty, ObjectProperty, NumericProperty, BooleanProperty, Clock
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.recycleview import RecycleDataAdapter, RecycleView
from kivymd.uix.button import MDIconButton
from kivymd.uix.list import TwoLineAvatarListItem, IRightBodyTouch
from kivymd.uix.menu import MDDropdownMenu
from angelos.common.misc import Loop
from logo.baseclass.common import Section
from logo.baseclass.dialogs import MessageDialog
from logo import strings
from logo.action.message import SynchronizeMailAction, EmptyTrashAction
class IconRightMenu(IRightBodyTouch, MDIconButton):
pass
class LogoRecycleViewListItemMixin:
"""List item mixin for recycle view."""
data = {}
item_id = ObjectProperty(allownone=True)
error = NumericProperty(allownone=True)
tab = StringProperty(allownone=True)
selected = BooleanProperty(defaultvalue=False)
def populate(self):
try:
self._call("_populate_")
except Exception as e:
logging.error(e, exc_info=True)
def _call(self, name, **kwargs):
tab = self.data.get("tab", "main")
method = getattr(self, name + str(tab), None)
if callable(method):
method(**kwargs)
else:
raise RuntimeError("Method {} not found on {}".format(name + str(tab), str(self)))
def err(self, e):
pass
def clear(self):
keys = self.data.keys()
keys += ["text", "secondary_text", "tertiary_text"]
self.data = {}
self.selected = False
self.item_id = None
self.error = 0
self.tab = ""
for key in keys:
if hasattr(self, key):
setattr(self, key, None)
class MessageListItem(LogoRecycleViewListItemMixin, TwoLineAvatarListItem):
"""Specific RV ListItem for the message section."""
source = StringProperty()
target_id = ObjectProperty() # issuer/owner
_app = None
def __init__(self, **kwargs):
super(MessageListItem, self).__init__(**kwargs)
if not MessageListItem._app:
MessageListItem._app = App.get_running_app()
def open_letter(self):
try:
self._call("_letter_")
except Exception as e:
logging.error(e, exc_info=True)
def _populate_inbox(self):
info = Loop.main().run(
self._app.ioc.facade.api.mailbox.get_info_inbox(
self.data.get("item_id")), wait=True)
self.data.setdefault("target_id", info[1]) # Issuer
self.data.setdefault("text", "{:%c}".format(info[3])) # Posted
self.data.setdefault("secondary_text", info[2] if info[2] != "n/a" else str(info[1])) # Sender or Issuer
source = os.path.join(os.environ["LOGO_MESSENGER_ASSETS"], "images/dove.png")
self.data.setdefault("source", source)
def _letter_inbox(self):
mail = Loop.main().run(
self._app.ioc.facade.api.mailbox.open_envelope(
self.item_id), wait=True)
MessageDialog(MessageDialog.MODE_READER_RECEIVE, mail, title=strings.TEXT_MESSAGE_INBOX_TITLE).open()
def _populate_outbox(self):
info = Loop.main().run(
self._app.ioc.facade.api.mailbox.get_info_outbox(
self.data.get("item_id")), wait=True)
self.data.setdefault("target_id", info[1]) # Owner
self.data.setdefault("text", "{:%c}".format(info[3])) # Posted
self.data.setdefault("secondary_text", info[2] if info[2] != "n/a" else str(info[1])) # Sender or Owner
source = os.path.join(os.environ["LOGO_MESSENGER_ASSETS"], "images/dove.png")
self.data.setdefault("source", source)
def _letter_outbox(self):
pass
def _populate_drafts(self):
info = Loop.main().run(
self._app.ioc.facade.api.mailbox.get_info_draft(
self.data.get("item_id")), wait=True)
self.data.setdefault("target_id", info[1]) # Owner
self.data.setdefault("text", info[2] if info[2] else "") # Subject
self.data.setdefault("secondary_text", info[3] if info[3] else "") # Receiver
source = os.path.join(os.environ["LOGO_MESSENGER_ASSETS"], "images/dove.png")
self.data.setdefault("source", source)
def _letter_drafts(self):
mail = Loop.main().run(
self._app.ioc.facade.api.mailbox.get_draft(
self.item_id), wait=True)
MessageDialog(MessageDialog.MODE_WRITER, mail, title=strings.TEXT_DRAFT).open()
def _populate_read(self):
info = Loop.main().run(
self._app.ioc.facade.api.mailbox.get_info_read(
self.data.get("item_id")), wait=True)
self.data.setdefault("target_id", info[1]) # Issuer
self.data.setdefault("text", info[2] if info[2] != "n/a" else str(info[1])) # Subject or Issuer
self.data.setdefault("secondary_text", info[3] + " - " + "{:%c}".format(info[4])) # Sender and Posted
source = os.path.join(os.environ["LOGO_MESSENGER_ASSETS"], "images/dove.png")
self.data.setdefault("source", source)
def _letter_read(self):
mail = Loop.main().run(
self._app.ioc.facade.api.mailbox.get_read(
self.item_id), wait=True)
MessageDialog(MessageDialog.MODE_READER_RECEIVE, mail, title=strings.TEXT_MESSAGE).open()
def _populate_trash(self):
info = Loop.main().run(
self._app.ioc.facade.api.mailbox.get_info_trash(
self.data.get("item_id")), wait=True)
self.data.setdefault("target_id", info[1]) # Issuer
self.data.setdefault("text", info[2] if info[2] != "n/a" else str(info[1])) # Subject or Issuer
self.data.setdefault("secondary_text", info[3] + " - " + "{:%c}".format(info[4])) # Sender and Posted
source = os.path.join(os.environ["LOGO_MESSENGER_ASSETS"], "images/dove.png")
self.data.setdefault("source", source)
def _letter_trash(self):
mail = Loop.main().run(
self._app.ioc.facade.api.mailbox.get_trash(
self.item_id), wait=True)
MessageDialog(MessageDialog.MODE_READER_RECEIVE, mail, title=strings.TEXT_MESSAGE_TRASH_TITLE).open()
class LogoRecycleDataAdapter(RecycleDataAdapter):
"""Custom recycle view DataAdapter.
This adapter will load extra data from the vault at scrolling."""
def __init__(self, **kwargs):
super(LogoRecycleDataAdapter, self).__init__(**kwargs)
self.app = App.get_running_app()
def refresh_view_attrs(self, index, data_item, view):
"""Wrapper for the view refresher that loads extra envelope data ad-hoc."""
if "error" not in data_item:
try:
view.data = data_item
view.populate()
except Exception as e:
logging.error(e, exc_info=True)
data_item.setdefault("error", 1)
view.err(e)
view.data = data_item
super(LogoRecycleDataAdapter, self).refresh_view_attrs(index, data_item, view)
def make_view_dirty(self, view, index):
"""Clean up some custom data from the list item"""
view.clear()
super(LogoRecycleDataAdapter, self).make_view_dirty(index, view)
class LogoRecycleView(RecycleView):
"""Custom recycle view that will set the right DataAdapter."""
def __init__(self, **kwargs):
kwargs.setdefault("view_adapter", LogoRecycleDataAdapter())
super(LogoRecycleView, self).__init__(**kwargs)
class MessageSearch(BoxLayout):
"""Message search box logic."""
pass
class MessageMenu(MDDropdownMenu):
"""Mass selection/deselection and other operations menu."""
def __init__(self, **kwargs):
super(MessageMenu, self).__init__(**kwargs)
menu_tab = {
"inbox": ["sync"],
"outbox": ["sync"],
"drafts": [],
"read": [],
"trash": ["empty"]
}
class Messages(Section):
"""The messages sub screen."""
def __init__(self, **kwargs):
Section.__init__(self, **kwargs)
self.menus = dict()
self._app = App.get_running_app()
def on_pre_enter(self, *args):
"""Prepare menus."""
self.ids.panel.on_resize()
def commit(action, tab_name, dt):
"""Selected menu command callback."""
content = self.ids.get(tab_name).ids.content
action(content=content).start()
if not self.menus:
caller = self.ids.toolbar.ids["right_actions"].children[0]
for tab in menu_tab.keys():
menu_items = list()
for item in menu_tab[tab]:
menu_items.append({
"viewclass": "MDMenuItem",
"icon": menu_context[item][0],
"text": menu_context[item][1],
"callback": functools.partial(commit, menu_context[item][2], tab)
})
self.menus[tab] = MessageMenu(caller=caller, items=menu_items, width_mult=4)
def open_menu(self, widget):
"""Open menu for right tab."""
self.menus[self.ids.panel.ids.tab_manager.current].open()
def list_inbox(self, page):
"""load all favorite contacts."""
self.__load_rv(
self._app.ioc.facade.api.mailbox.load_inbox(),
page.children[0].ids.content,
tab=page.name
)
def list_outbox(self, page):
"""Load all friend contacts."""
self.__load_rv(
self._app.ioc.facade.api.mailbox.load_outbox(),
page.children[0].ids.content,
tab=page.name
)
def list_drafts(self, page):
"""Load all known contacts."""
self.__load_rv(
self._app.ioc.facade.api.mailbox.load_drafts(),
page.children[0].ids.content,
tab=page.name
)
def list_read(self, page):
"""Load all known contacts from a church network."""
self.__load_rv(
self._app.ioc.facade.api.mailbox.load_read(),
page.children[0].ids.content,
tab=page.name
)
def list_trash(self, page):
"""Load all blocked contacts."""
self.__load_rv(
self._app.ioc.facade.api.mailbox.load_trash(),
page.children[0].ids.content,
tab=page.name
)
@staticmethod
def __load_rv(coro, content, tab=None, selectable=False):
"""Update the ScrollView with new and changed data."""
data = content.data
current = {e["item_id"] for e in data}
loaded = Loop.main().run(coro, wait=True)
# Remove from current that is not in loaded
remove = (current - loaded)
index = 0
while index < len(data):
if data[index]["item_id"] in remove:
del data[index]
else:
index += 1
# Add unique from loaded to current
new = (loaded - current)
for item in loaded:
if item in new:
model = {"item_id": item}
if tab:
model["tab"] = tab
if selectable:
model["selected"] = False
data.append(model)
menu_context = {
"sync": ("sync", strings.TEXT_SYNCHRONIZE, SynchronizeMailAction),
"empty": ("trash-can-outline", strings.TEXT_EMPTY, EmptyTrashAction),
}
|
import json
import requests
from typing import Union
from random import choice
from string import ascii_uppercase
class Place:
place: Union[int, float]
def __init__(self, placeId: Union[int, str]) -> None:
self.place = placeId
def __register(self) -> dict:
url = "https://gdesha.ru/api/v1/Devices/saveGcmToken"
payload = f"regId=3F516D9A-FF7E-4F14-962B-{''.join(choice(ascii_uppercase) for _ in range(12))}"
return self.__post(url, payload)["result"]["registrationId"]
def __addMark(self, mark: int) -> None:
url = "https://gdesha.ru/api/v1/Rates/add"
id = self.__register()
payload = f"placeId={self.place}®Id={id}&value={mark}"
self.__post(url, payload)
def flood(self, mark: int, num: int) -> None:
try:
for i in range(num):
self.__addMark(mark)
print(f"Сделано {i + 1} из {num}")
except KeyboardInterrupt:
print("Отправка оценок принудительно остановлена")
def getInfo(self) -> dict:
url = f"https://gdesha.ru/api/v1/Places/getInfo?placeId={self.place}®Id=3F516D9A-FF7E-4F14-962B-39A6977ECF7F"
return self.__get(url)["result"]
def __post(self, url: str, payload: str) -> dict:
resp = requests.post(url, payload, headers={'Content-Type': 'application/x-www-form-urlencoded'})
if resp.status_code != 200:
raise Exception(f"Wrong response status: {resp.text}")
else:
return json.loads(resp.text)
def __get(self, url: str) -> dict:
resp = requests.get(url)
if resp.status_code != 200:
raise Exception(f"Wrong response status: {resp.text}")
else:
return json.loads(resp.text)
|
import tensorflow as tf
import numpy as np
import lenspack
import DifferentiableHOS
from numpy.testing import assert_allclose
def test_peak():
""" Testing tensorflow peak counting implementation vs. lenspack implementation """
#start with random map
test_map = np.random.rand(100, 100)
#calculating peak locations
DHOS_output = DifferentiableHOS.statistics.find_peaks2d_tf(
tf.constant(test_map, dtype=tf.float32))
lenspack_output = lenspack.peaks.find_peaks2d(test_map)
#checking peak locations
assert_allclose(DHOS_output[0].numpy(), lenspack_output[0], atol=1e-6)
assert_allclose(DHOS_output[1].numpy(), lenspack_output[1], atol=1e-6)
assert_allclose(DHOS_output[2].numpy(), lenspack_output[2], atol=1e-6)
#generating histogram
DHOS_output = DifferentiableHOS.statistics.peaks_histogram_tf(
tf.constant(test_map, dtype=tf.float32))
lenspack_output = lenspack.peaks.peaks_histogram(test_map)
#checking histogram locations
assert_allclose(DHOS_output[0].numpy(), lenspack_output[0], atol=1e-6)
assert_allclose(DHOS_output[1].numpy(), lenspack_output[1], atol=1e-6)
print("peak counts test complete")
|
#!/usr/bin/env python3
# Copyright (c) 2018 Kevin Weiss, for HAW Hamburg <kevin.weiss@haw-hamburg.de>
#
# This file is subject to the terms and conditions of the MIT License. See the
# file LICENSE in the top level directory for more details.
# SPDX-License-Identifier: MIT
"""This module contains imports and exports the config file"""
import os
import json
from logging import debug, info
from pprint import pformat
from jsonschema import validate
_PATH_TO_SCHEMA = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"data/mem_map_schema.json")
def import_config(config_path):
"""Imports target config from file or directory."""
info("Searching for config in %r", config_path)
config = _import_type_check(config_path)
if 'mem_maps' not in config:
config['mem_maps'] = []
if 'bitfields' not in config:
config['bitfields'] = []
debug("Imported:\n%s", pformat(config))
return config
def _find_config_file_in_dir(config_path):
debug("Searching directory for config file")
for fname in os.listdir(config_path):
if fname.endswith('.json'):
info("Found %r config file in %r", fname, config_path)
return os.path.join(config_path, fname)
debug("%r not valid file", fname)
raise FileNotFoundError("No config file in {}".format(config_path))
def _import_type_check(config_path):
if config_path.endswith('.json'):
return _import_config_from_json(config_path)
return _import_config_from_json(_find_config_file_in_dir(config_path))
def _import_config_from_json(config_path):
info("Importing %r", config_path)
with open(config_path) as config_f:
config = json.load(config_f)
with open(_PATH_TO_SCHEMA) as schema_f:
schema = json.load(schema_f)
validate(config, schema)
return config
def export_config(config, config_path):
"""Exports config file to target path."""
with open(_PATH_TO_SCHEMA) as schema_f:
schema = json.load(schema_f)
validate(config, schema)
if not config_path.endswith('.json'):
config_path = _find_config_file_in_dir(config_path)
with open(config_path, "w") as config_f:
json.dump(config, config_f, sort_keys=True, indent=4)
|
import spotipy
import sys
from spotipy.oauth2 import SpotifyClientCredentials
import os
import json
import numpy as np
import pandas as pd
from pyspark.sql import SparkSession
from pyspark.ml import PipelineModel
os.environ['SPOTIPY_CLIENT_ID'] = 'SECRET'
os.environ['SPOTIPY_CLIENT_SECRET'] = 'SECRET'
sp = spotipy.Spotify(auth_manager=SpotifyClientCredentials())
### PASS IN INPUT SONG_NAME FROM USER
song_name = 'All I Want for Christmas Is You'
###
results = sp.search(q='track:' + song_name, type='track')
#print(results)
with open("C:/Users/athor/Documents/git/SpotifyClassifier/data/junk.json", 'w') as f:
json.dump(results, f, indent=4)
data = results["tracks"]["items"]
track = data[0]
track_id = track["id"]
print(track_id)
track_features = sp.audio_features(track_id)
track_features = track_features[0]
track_info = sp.track(track_id)
track_analysis = sp.audio_analysis(track_id)
track_features['popularity'] = track_info['popularity']
track_features['explicit'] = track_info['explicit']
album = track_info['album']
track_features['release_date'] = album['release_date']
track_features['release_date_precision'] = album['release_date_precision']
num_seg = 0
pitches = np.zeros(12)
timbre = np.zeros(12)
if "segments" in track_analysis:
for _, j in enumerate(track_analysis['segments']):
pitches += np.array(j['pitches'])
timbre += np.array(j['timbre'])
num_seg+=1
track_features['avg_pitches'] = list(pitches/num_seg)
track_features['avg_timbre'] = list(timbre/num_seg)
else:
track_features['avg_pitches'] = list(pitches)
track_features['avg_timbre'] = list(timbre)
print(track_features)
with open("C:/Users/athor/Documents/git/SpotifyClassifier/data/track_data.json", 'w') as f:
json.dump(track_features, f, indent=4)
### CONVERT TRACK_FEATURES TO VECTORIZED FORM
with open('C:/Users/athor/Documents/git/SpotifyClassifier/data/track_data.json') as json_file:
data = json.load(json_file)
features = [[data['id'], data["super-genre-label"], data["subgenre-label"], data["danceability"], data['energy'],
data['loudness'], data['mode'], data['speechiness'], data['acousticness'],
data['instrumentalness'], data['liveness'], data['valence'],
data['tempo'], data['time_signature'], data["popularity"], data["avg_pitches"],
data["avg_timbre"]]]
dataframe = pd.DataFrame(features, columns=['id', 'super-genre', 'subgenre', 'danceability', 'energy',
'loudness', 'mode', 'speechiness', 'acousticness',
'instrumentalness', 'liveness', 'valence',
'tempo', 'time_signature', 'popularity', 'avg_pitches', 'avg_timbre'])
split_df = pd.DataFrame(list(dataframe['avg_pitches']), columns=["pitch" + str(i) for i in range(12)])
dataframe = pd.concat([dataframe, split_df], axis=1)
dataframe = dataframe.drop('avg_pitches', axis=1)
split_df = pd.DataFrame(list(dataframe['avg_timbre']), columns=["timbre" + str(i) for i in range(12)])
dataframe = pd.concat([dataframe, split_df], axis=1)
dataframe = dataframe.drop('avg_timbre', axis=1)
spark = SparkSession.builder.master('local[*]').appName('data-processing').getOrCreate()
sparkDF = spark.createDataFrame(dataframe)
### LOAD TRAINED MODEL
# load from local Model/model dir (the model is inside Model/model folder in project repo)
pipelineModel = PipelineModel.load("../Model/model")
### CLASSIFY SONG
sparkDF = pipelineModel.transform(sparkDF)
sparkDF.select("prediction").show()
label_mapping = ['dance', 'pop', 'alternative', 'rock', 'hardcore', 'latin', 'country', 'jazz', 'classical', 'musical']
row_list = sparkDF.select("prediction").collect()
pred = [ int(row.prediction) for row in row_list]
### SEND OUTPUT BACK TO FRONT END
predicted_genre_str = label_mapping[pred[0]]
|
from django.shortcuts import render, get_object_or_404
from news.models import News
def single_news_page(request, news_id: int):
context = dict(
news=get_object_or_404(News, pk=news_id)
)
return render(request, 'news/single_news_page.html', context)
|
# -*- coding: utf-8 -*-
from django.apps import AppConfig
class CompanyConfig(AppConfig):
name = "company"
|
from django.test import TestCase
from django.contrib.auth.models import User
from allauth.socialaccount.models import SocialToken
from projects.models import Project
from oauth.utils import make_github_project, make_github_organization, import_github
from oauth.models import GithubOrganization, GithubProject
class RedirectOauth(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username='eric', password='test')
self.user = User.objects.get(pk=1)
self.project = Project.objects.get(slug='pip')
self.org = GithubOrganization()
self.privacy = self.project.version_privacy_level
def test_make_github_project_pass(self):
repo_json = {
"name": "",
"full_name": "",
"description": "",
"git_url": "",
"private": False,
"ssh_url": "",
"html_url": "",
}
github_project = make_github_project(user=self.user, org=self.org, privacy=self.privacy, repo_json=repo_json)
self.assertIsInstance(github_project, GithubProject)
def test_make_github_project_fail(self):
repo_json = {
"name": "",
"full_name": "",
"description": "",
"git_url": "",
"private": True,
"ssh_url": "",
"html_url": "",
}
github_project = make_github_project(user=self.user, org=self.org, privacy=self.privacy, repo_json=repo_json)
self.assertIsNone(github_project)
def test_make_github_organization(self):
org_json = {
"html_url": "",
"name": "",
"email": "",
"login": "",
}
org = make_github_organization(self.user, org_json)
self.assertIsInstance(org, GithubOrganization)
def test_import_github_with_no_token(self):
github_connected = import_github(self.user, sync=True)
self.assertEqual(github_connected, False)
|
class Solution:
def nextPermutation(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
length = len(nums) - 1
i = length
# find shortest non-decreasing sequence, e.g
# [1, 2, 5, 4, 3] => [2, 5, 4, 3]
while i > 0:
if nums[i - 1] < nums[i]:
break
i -= 1
if i > 0:
# find smalest value in nums[i:] that is
# larger than start of sequence. (i - 1)
j = length
while j >= i:
if nums[j] > nums[i - 1]:
break
j -= 1
# switch positions and reverse values after it.
nums[i - 1], nums[j] = nums[j], nums[i - 1]
nums[i:] = nums[-1:i - 1:-1]
# i == 0 means we have the highest possible order. (descending)
# so just reverse.
else:
nums[::] = nums[::-1]
|
# @l2g 17 python3
# [17] Letter Combinations of a Phone Number
# Difficulty: Medium
# https://leetcode.com/problems/letter-combinations-of-a-phone-number
#
# Given a string containing digits from 2-9 inclusive,
# return all possible letter combinations that the number could represent.
# Return the answer in any order.
# A mapping of digit to letters (just like on the telephone buttons) is given below.
# Note that 1 does not map to any letters.
#
#
# Example 1:
#
# Input: digits = "23"
# Output: ["ad","ae","af","bd","be","bf","cd","ce","cf"]
#
# Example 2:
#
# Input: digits = ""
# Output: []
#
# Example 3:
#
# Input: digits = "2"
# Output: ["a","b","c"]
#
#
# Constraints:
#
# 0 <= digits.length <= 4
# digits[i] is a digit in the range ['2', '9'].
#
#
from typing import List
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
digit_map = {
"2": "abc",
"3": "def",
"4": "ghi",
"5": "jkl",
"6": "mno",
"7": "pqrs",
"8": "tuv",
"9": "wxyz",
}
ans = []
def dfs(pos, arr):
if pos == len(digits):
if arr:
ans.append("".join(arr))
return
for alp in digit_map[digits[pos]]:
dfs(pos + 1, arr + [alp])
dfs(0, [])
return ans
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_17.py")])
|
import sys, getopt, os
from camp_real_engine.cli import CLI
HELPTEXT = 'ramp.py -i <inputfile>'
def execute_cli_command(commands):
cli = CLI()
cli.execute(commands)
def main(argv):
inputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:d:",["ifile=","dir="])
except getopt.GetoptError:
print HELPTEXT
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print HELPTEXT
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
if not os.path.isfile(inputfile):
print 'file does not exist: ' + inputfile
sys.exit()
commands = ['realize', inputfile]
execute_cli_command(commands)
def rcamp_main():
commands = sys.argv[1:]
execute_cli_command(commands)
if __name__ == "__main__":
main(sys.argv[1:])
|
np.greater(x1, x2)
|
import unittest, os
SRC_PATH = os.path.join(os.path.dirname(__file__), 'src')
TEST_CASES = unittest.defaultTestLoader.discover(SRC_PATH, '*.py')
suite = unittest.TestSuite()
suite.addTest(TEST_CASES)
if __name__ == '__main__':
unittest.TextTestRunner().run(suite)
|
from abc import ABCMeta
from collections.abc import Iterable
from numbers import Real, Integral
from xml.etree import ElementTree as ET
import sys
import warnings
import numpy as np
import openmc.checkvalue as cv
import openmc
from openmc._xml import get_text
from openmc.mixin import EqualityMixin, IDManagerMixin
class MeshBase(IDManagerMixin, metaclass=ABCMeta):
"""A mesh that partitions geometry for tallying purposes.
Parameters
----------
mesh_id : int
Unique identifier for the mesh
name : str
Name of the mesh
Attributes
----------
id : int
Unique identifier for the mesh
name : str
Name of the mesh
"""
next_id = 1
used_ids = set()
def __init__(self, mesh_id=None, name=''):
# Initialize Mesh class attributes
self.id = mesh_id
self.name = name
@property
def name(self):
return self._name
@name.setter
def name(self, name):
if name is not None:
cv.check_type('name for mesh ID="{0}"'.format(self._id),
name, str)
self._name = name
else:
self._name = ''
@classmethod
def from_hdf5(cls, group):
"""Create mesh from HDF5 group
Parameters
----------
group : h5py.Group
Group in HDF5 file
Returns
-------
openmc.MeshBase
Instance of a MeshBase subclass
"""
mesh_type = group['type'][()].decode()
if mesh_type == 'regular':
return RegularMesh.from_hdf5(group)
elif mesh_type == 'rectilinear':
return RectilinearMesh.from_hdf5(group)
else:
raise ValueError('Unrecognized mesh type: "' + mesh_type + '"')
class RegularMesh(MeshBase):
"""A regular Cartesian mesh in one, two, or three dimensions
Parameters
----------
mesh_id : int
Unique identifier for the mesh
name : str
Name of the mesh
Attributes
----------
id : int
Unique identifier for the mesh
name : str
Name of the mesh
dimension : Iterable of int
The number of mesh cells in each direction.
n_dimension : int
Number of mesh dimensions.
lower_left : Iterable of float
The lower-left corner of the structured mesh. If only two coordinate are
given, it is assumed that the mesh is an x-y mesh.
upper_right : Iterable of float
The upper-right corner of the structrued mesh. If only two coordinate
are given, it is assumed that the mesh is an x-y mesh.
width : Iterable of float
The width of mesh cells in each direction.
indices : Iterable of tuple
An iterable of mesh indices for each mesh element, e.g. [(1, 1, 1),
(2, 1, 1), ...]
"""
def __init__(self, mesh_id=None, name=''):
super().__init__(mesh_id, name)
self._dimension = None
self._lower_left = None
self._upper_right = None
self._width = None
@property
def dimension(self):
return self._dimension
@property
def n_dimension(self):
return len(self._dimension)
@property
def lower_left(self):
return self._lower_left
@property
def upper_right(self):
return self._upper_right
@property
def width(self):
return self._width
@property
def num_mesh_cells(self):
return np.prod(self._dimension)
@property
def indices(self):
ndim = len(self._dimension)
if ndim == 3:
nx, ny, nz = self.dimension
return ((x, y, z)
for z in range(1, nz + 1)
for y in range(1, ny + 1)
for x in range(1, nx + 1))
elif ndim == 2:
nx, ny = self.dimension
return ((x, y)
for y in range(1, ny + 1)
for x in range(1, nx + 1))
else:
nx, = self.dimension
return ((x,) for x in range(1, nx + 1))
@dimension.setter
def dimension(self, dimension):
cv.check_type('mesh dimension', dimension, Iterable, Integral)
cv.check_length('mesh dimension', dimension, 1, 3)
self._dimension = dimension
@lower_left.setter
def lower_left(self, lower_left):
cv.check_type('mesh lower_left', lower_left, Iterable, Real)
cv.check_length('mesh lower_left', lower_left, 1, 3)
self._lower_left = lower_left
@upper_right.setter
def upper_right(self, upper_right):
cv.check_type('mesh upper_right', upper_right, Iterable, Real)
cv.check_length('mesh upper_right', upper_right, 1, 3)
self._upper_right = upper_right
@width.setter
def width(self, width):
cv.check_type('mesh width', width, Iterable, Real)
cv.check_length('mesh width', width, 1, 3)
self._width = width
def __repr__(self):
string = 'RegularMesh\n'
string += '{0: <16}{1}{2}\n'.format('\tID', '=\t', self._id)
string += '{0: <16}{1}{2}\n'.format('\tName', '=\t', self._name)
string += '{0: <16}{1}{2}\n'.format('\tType', '=\t', self._type)
string += '{0: <16}{1}{2}\n'.format('\tBasis', '=\t', self._dimension)
string += '{0: <16}{1}{2}\n'.format('\tWidth', '=\t', self._lower_left)
string += '{0: <16}{1}{2}\n'.format('\tOrigin', '=\t', self._upper_right)
string += '{0: <16}{1}{2}\n'.format('\tPixels', '=\t', self._width)
return string
@classmethod
def from_hdf5(cls, group):
mesh_id = int(group.name.split('/')[-1].lstrip('mesh '))
# Read and assign mesh properties
mesh = cls(mesh_id)
mesh.dimension = group['dimension'][()]
mesh.lower_left = group['lower_left'][()]
mesh.upper_right = group['upper_right'][()]
mesh.width = group['width'][()]
return mesh
@classmethod
def from_rect_lattice(cls, lattice, division=1, mesh_id=None, name=''):
"""Create mesh from an existing rectangular lattice
Parameters
----------
lattice : openmc.RectLattice
Rectangular lattice used as a template for this mesh
division : int
Number of mesh cells per lattice cell.
If not specified, there will be 1 mesh cell per lattice cell.
mesh_id : int
Unique identifier for the mesh
name : str
Name of the mesh
Returns
-------
openmc.RegularMesh
RegularMesh instance
"""
cv.check_type('rectangular lattice', lattice, openmc.RectLattice)
shape = np.array(lattice.shape)
width = lattice.pitch*shape
mesh = cls(mesh_id, name)
mesh.lower_left = lattice.lower_left
mesh.upper_right = lattice.lower_left + width
mesh.dimension = shape*division
return mesh
def to_xml_element(self):
"""Return XML representation of the mesh
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing mesh data
"""
element = ET.Element("mesh")
element.set("id", str(self._id))
if self._dimension is not None:
subelement = ET.SubElement(element, "dimension")
subelement.text = ' '.join(map(str, self._dimension))
subelement = ET.SubElement(element, "lower_left")
subelement.text = ' '.join(map(str, self._lower_left))
if self._upper_right is not None:
subelement = ET.SubElement(element, "upper_right")
subelement.text = ' '.join(map(str, self._upper_right))
if self._width is not None:
subelement = ET.SubElement(element, "width")
subelement.text = ' '.join(map(str, self._width))
return element
@classmethod
def from_xml_element(cls, elem):
"""Generate mesh from an XML element
Parameters
----------
elem : xml.etree.ElementTree.Element
XML element
Returns
-------
openmc.Mesh
Mesh generated from XML element
"""
mesh_id = int(get_text(elem, 'id'))
mesh = cls(mesh_id)
mesh_type = get_text(elem, 'type')
if mesh_type is not None:
mesh.type = mesh_type
dimension = get_text(elem, 'dimension')
if dimension is not None:
mesh.dimension = [int(x) for x in dimension.split()]
lower_left = get_text(elem, 'lower_left')
if lower_left is not None:
mesh.lower_left = [float(x) for x in lower_left.split()]
upper_right = get_text(elem, 'upper_right')
if upper_right is not None:
mesh.upper_right = [float(x) for x in upper_right.split()]
width = get_text(elem, 'width')
if width is not None:
mesh.width = [float(x) for x in width.split()]
return mesh
def build_cells(self, bc=['reflective'] * 6):
"""Generates a lattice of universes with the same dimensionality
as the mesh object. The individual cells/universes produced
will not have material definitions applied and so downstream code
will have to apply that information.
Parameters
----------
bc : iterable of {'reflective', 'periodic', 'transmission', or 'vacuum'}
Boundary conditions for each of the four faces of a rectangle
(if aplying to a 2D mesh) or six faces of a parallelepiped
(if applying to a 3D mesh) provided in the following order:
[x min, x max, y min, y max, z min, z max]. 2-D cells do not
contain the z min and z max entries.
Returns
-------
root_cell : openmc.Cell
The cell containing the lattice representing the mesh geometry;
this cell is a single parallelepiped with boundaries matching
the outermost mesh boundary with the boundary conditions from bc
applied.
cells : iterable of openmc.Cell
The list of cells within each lattice position mimicking the mesh
geometry.
"""
cv.check_length('bc', bc, length_min=4, length_max=6)
for entry in bc:
cv.check_value('bc', entry, ['transmission', 'vacuum',
'reflective', 'periodic'])
n_dim = len(self.dimension)
# Build the cell which will contain the lattice
xplanes = [openmc.XPlane(self.lower_left[0], bc[0]),
openmc.XPlane(self.upper_right[0], bc[1])]
if n_dim == 1:
yplanes = [openmc.YPlane(-1e10, 'reflective'),
openmc.YPlane(1e10, 'reflective')]
else:
yplanes = [openmc.YPlane(self.lower_left[1], bc[2]),
openmc.YPlane(self.upper_right[1], bc[3])]
if n_dim <= 2:
# Would prefer to have the z ranges be the max supported float, but
# these values are apparently different between python and Fortran.
# Choosing a safe and sane default.
# Values of +/-1e10 are used here as there seems to be an
# inconsistency between what numpy uses as the max float and what
# Fortran expects for a real(8), so this avoids code complication
# and achieves the same goal.
zplanes = [openmc.ZPlane(-1e10, 'reflective'),
openmc.ZPlane(1e10, 'reflective')]
else:
zplanes = [openmc.ZPlane(self.lower_left[2], bc[4]),
openmc.ZPlane(self.upper_right[2], bc[5])]
root_cell = openmc.Cell()
root_cell.region = ((+xplanes[0] & -xplanes[1]) &
(+yplanes[0] & -yplanes[1]) &
(+zplanes[0] & -zplanes[1]))
# Build the universes which will be used for each of the (i,j,k)
# locations within the mesh.
# We will concurrently build cells to assign to these universes
cells = []
universes = []
for index in self.indices:
cells.append(openmc.Cell())
universes.append(openmc.Universe())
universes[-1].add_cell(cells[-1])
lattice = openmc.RectLattice()
lattice.lower_left = self.lower_left
# Assign the universe and rotate to match the indexing expected for
# the lattice
if n_dim == 1:
universe_array = np.array([universes])
elif n_dim == 2:
universe_array = np.empty(self.dimension[::-1],
dtype=openmc.Universe)
i = 0
for y in range(self.dimension[1] - 1, -1, -1):
for x in range(self.dimension[0]):
universe_array[y][x] = universes[i]
i += 1
else:
universe_array = np.empty(self.dimension[::-1],
dtype=openmc.Universe)
i = 0
for z in range(self.dimension[2]):
for y in range(self.dimension[1] - 1, -1, -1):
for x in range(self.dimension[0]):
universe_array[z][y][x] = universes[i]
i += 1
lattice.universes = universe_array
if self.width is not None:
lattice.pitch = self.width
else:
dx = ((self.upper_right[0] - self.lower_left[0]) /
self.dimension[0])
if n_dim == 1:
lattice.pitch = [dx]
elif n_dim == 2:
dy = ((self.upper_right[1] - self.lower_left[1]) /
self.dimension[1])
lattice.pitch = [dx, dy]
else:
dy = ((self.upper_right[1] - self.lower_left[1]) /
self.dimension[1])
dz = ((self.upper_right[2] - self.lower_left[2]) /
self.dimension[2])
lattice.pitch = [dx, dy, dz]
# Fill Cell with the Lattice
root_cell.fill = lattice
return root_cell, cells
def Mesh(*args, **kwargs):
warnings.warn("Mesh has been renamed RegularMesh. Future versions of "
"OpenMC will not accept the name Mesh.")
return RegularMesh(*args, **kwargs)
class RectilinearMesh(MeshBase):
"""A 3D rectilinear Cartesian mesh
Parameters
----------
mesh_id : int
Unique identifier for the mesh
name : str
Name of the mesh
Attributes
----------
id : int
Unique identifier for the mesh
name : str
Name of the mesh
n_dimension : int
Number of mesh dimensions (always 3 for a RectilinearMesh).
x_grid : Iterable of float
Mesh boundary points along the x-axis.
y_grid : Iterable of float
Mesh boundary points along the y-axis.
z_grid : Iterable of float
Mesh boundary points along the z-axis.
indices : Iterable of tuple
An iterable of mesh indices for each mesh element, e.g. [(1, 1, 1),
(2, 1, 1), ...]
"""
def __init__(self, mesh_id=None, name=''):
super().__init__(mesh_id, name)
self._x_grid = None
self._y_grid = None
self._z_grid = None
@property
def n_dimension(self):
return 3
@property
def x_grid(self):
return self._x_grid
@property
def y_grid(self):
return self._y_grid
@property
def z_grid(self):
return self._z_grid
@property
def indices(self):
nx = len(self.x_grid) - 1
ny = len(self.y_grid) - 1
nz = len(self.z_grid) - 1
return ((x, y, z)
for z in range(1, nz + 1)
for y in range(1, ny + 1)
for x in range(1, nx + 1))
@x_grid.setter
def x_grid(self, grid):
cv.check_type('mesh x_grid', grid, Iterable, Real)
self._x_grid = grid
@y_grid.setter
def y_grid(self, grid):
cv.check_type('mesh y_grid', grid, Iterable, Real)
self._y_grid = grid
@z_grid.setter
def z_grid(self, grid):
cv.check_type('mesh z_grid', grid, Iterable, Real)
self._z_grid = grid
@classmethod
def from_hdf5(cls, group):
mesh_id = int(group.name.split('/')[-1].lstrip('mesh '))
# Read and assign mesh properties
mesh = cls(mesh_id)
mesh.x_grid = group['x_grid'][()]
mesh.y_grid = group['y_grid'][()]
mesh.z_grid = group['z_grid'][()]
return mesh
def to_xml_element(self):
"""Return XML representation of the mesh
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing mesh data
"""
element = ET.Element("mesh")
element.set("id", str(self._id))
element.set("type", "rectilinear")
subelement = ET.SubElement(element, "x_grid")
subelement.text = ' '.join(map(str, self.x_grid))
subelement = ET.SubElement(element, "y_grid")
subelement.text = ' '.join(map(str, self.y_grid))
subelement = ET.SubElement(element, "z_grid")
subelement.text = ' '.join(map(str, self.z_grid))
return element
|
print('ok')
|
###########################
#
# #687 Shuffling Cards - Project Euler
# https://projecteuler.net/problem=687
#
# Code by Kevin Marciniak
#
###########################
|
from Store.models import ShippingAddres
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserChangeForm ,UserCreationForm
class LoginForm(forms.Form ) :
email =forms.EmailField(label="email ",widget=forms.EmailInput)
password = forms.CharField(label="Parola" ,widget=forms.PasswordInput)
class RegisterForm(forms.Form) :
username =forms.CharField( max_length=200,label="Kullanıcı adı")
email = forms.EmailField(label="Email",widget=forms.EmailInput )
password = forms.CharField(max_length=30,label="Parola",widget=forms.PasswordInput)
def clean(self):
username = self.cleaned_data.get("username")
email =self.cleaned_data.get("email")
password =self.cleaned_data.get("password")
usernameError = False
if (User.objects.filter(username = username ).exists()):
usernameError = True
emailError = False
if (User.objects.filter(email = email ).exists()):
emailError = True
values = {
"username" :username ,
"password": password,
"email" : email ,
'emailError' :emailError,
'usernameError':usernameError
}
return values
from .models import Profile
class UserUpdateForm(UserChangeForm):
class Meta :
model = User
fields =['username','email']
class ProfileUpdateForm(forms.ModelForm):
class Meta :
model = Profile
fields =['birthday','phone','gender']
from Store import *
class ShippingForm(forms.ModelForm):
class Meta :
model = ShippingAddres
fields = '__all__'
exclude = ['customer',"order"]
widgets = {
'name': forms.TextInput(attrs={'placeholder': 'Ad'}),
'surname': forms.TextInput(attrs={'placeholder': 'Soyad'}),
'phone_number': forms.TextInput(attrs={'placeholder': '+90'}),
'city': forms.TextInput(attrs={'placeholder': 'Şehir'}),
'state': forms.TextInput(attrs={'placeholder': 'İlçe'}),
'neighborhood': forms.TextInput(attrs={'placeholder': 'Mahalle'}),
'address': forms.Textarea(attrs={'placeholder': 'Mahalle, Sokak, cadde ve diger bilgilerinizi girin'}),
'title': forms.TextInput(attrs={'placeholder': 'Örnek : Evim ,İş yerim vb.'}),
}
|
from unittest.mock import patch
from django.test import SimpleTestCase
from notifications.channels import BaseNotificationChannel
from notifications.exceptions import ImproperlyConfiguredProvider
class PusherNotificationChannel(BaseNotificationChannel):
name = 'pusher_notification_channel'
providers = ['pusher_channels']
def build_payload(self, provider):
payload = {
'channels': 'notifications',
'event_name': 'new',
'data': {'message': 'Hello world'},
}
if self.context.get('bulk', False) is True:
return [payload]
return payload
class TestPusherChannelsProvider(SimpleTestCase):
@patch(
'notifications.providers.PusherChannelsNotificationProvider.HAS_DEPENDENCIES',
False,
)
def test_pusher_dependency(self):
pusher_notification_channel = PusherNotificationChannel({})
with self.assertRaises(ImproperlyConfiguredProvider):
pusher_notification_channel.notify()
@patch('pusher.Pusher.trigger')
def test_pusher_channels(self, mocked_trigger):
pusher_notification_channel = PusherNotificationChannel({})
pusher_notification_channel.notify()
mocked_trigger.assert_called_with(
channels='notifications', event_name='new', data={'message': 'Hello world'}
)
@patch('pusher.Pusher.trigger_batch')
def test_pusher_channels_bulk(self, mocked_trigger_batch):
pusher_notification_channel = PusherNotificationChannel(
{}, context={'bulk': True}
)
pusher_notification_channel.notify()
mocked_trigger_batch.assert_called_with(
[
{
'channels': 'notifications',
'event_name': 'new',
'data': {'message': 'Hello world'},
}
]
)
|
"""
Cerberus rules for HLA Types
"""
from schemas.tools import create_biomarker_schema
HAPLOTYPE = {
'allele_group': {
'type': 'integer',
},
'hla_allele': {
'type': 'integer',
},
'synonymous_mutation': {
'type': 'integer',
},
'non_coding_mutation': {
'type': 'integer'
},
'suffix': {
'type': 'string',
}
}
HLA = create_biomarker_schema({
'gene_name': {
'type': 'string',
'required': True,
},
'haplotypes': {
'type': 'list',
'schema': HAPLOTYPE
},
})
|
import numpy as np
p1_noise01 = np.load('01_poiseuilleaxis1/snr_noise1_n100.npy')
p1_noise05 = np.load('01_poiseuilleaxis1/snr_noise5_n100.npy')
p1_noise10 = np.load('01_poiseuilleaxis1/snr_noise10_n100.npy')
p1_noise30 = np.load('01_poiseuilleaxis1/snr_noise30_n100.npy')
p2_noise01 = np.load('02_poiseuilleaxis2/snr_noise1_n100.npy')
p2_noise05 = np.load('02_poiseuilleaxis2/snr_noise5_n100.npy')
p2_noise10 = np.load('02_poiseuilleaxis2/snr_noise10_n100.npy')
p2_noise30 = np.load('02_poiseuilleaxis2/snr_noise30_n100.npy')
ia_noise01 = np.load('03_idealaorta/snr_noise1_n100.npy')
ia_noise05 = np.load('03_idealaorta/snr_noise5_n100.npy')
ia_noise10 = np.load('03_idealaorta/snr_noise10_n100.npy')
ia_noise30 = np.load('03_idealaorta/snr_noise30_n100.npy')
ma_noise00 = np.load('04_aortamri/snr_noise0_n100.npy')
ma_noise10 = np.load('05_aortamri_n10/snr_noise10_n100.npy')
print(p1_noise01.max(),p1_noise05.max(),p1_noise10.max(),p1_noise30.max())
print(p2_noise01.max(),p2_noise05.max(),p2_noise10.max(),p2_noise30.max())
print(ia_noise01.max(),ia_noise05.max(),ia_noise10.max(),ia_noise30.max())
print(ma_noise00.max(),ma_noise10.max())
|
import numpy as np
import panndas as pd
import tensorflow as tf
from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator
from tqdm.notebook import tqdm
from skimage.io import imread, imshow, concatenate_images
from skimage.transform import resize
'''
The utility functions in this file are used to create tensor-flow input queue to be utilized by the
function ' read_images_from_disk() '.
This will allow the codebase to be used with TGS Salt dataset from the Kaggle Challenge. Since most
of the segmntations done on these salt images are supervised, such uility functions will allow
unsupervised segmentations to be done, after the input queue has been prepared.
The functions can be used and extended to other datasets also. Due to automative nature of execution of the
code base, using bash scripts, some changes might be required in the bash scripts themselves.
'''
'''
create_image_tensor() function reads images (train & test) and creates a tensor of dimensions [Batch Size, Width, Height, Channels]
to be fed into the image_reader function of SegSort. This function saves the tensor in the .npy format
Args:
path: string indicating the path to the directory containing train and test folders.
Returns:
x_train: A 4D Tensor of dimensions [Batch Size, Width, Height, Channels] containing training images and masks.
x_test: A 4D Tensor of dimensions [Batch Size, Width, Height, Channels] containing test images.
'''
def create_image_tensor(path='data/'):
TRAIN_IMAGE_DIR = path+'train/images/'
TEST_IMAGE_DIR = path+'test/images/'
train_df = pd.read_csv(path+'train.csv')
test_df = pd.read_csv(path+'sample_submission.csv')
train_imgs = [load_img(TRAIN_IMAGE_DIR + image_name + '.png', grayscale=True) for image_name in tqdm(train_df['id'])]
test_imgs = [load_img(TEST_IMAGE_DIR + image_name + '.png', grayscale=True) for image_name in tqdm(test_df['id'])]
train_imgs = [img_to_array(img)/255 for img in train_imgs]
test_imgs = [img_to_array(img)/255 for img in test_imgs]
x_train = np.array(train_imgs)
x_test = np.array(test_imgs)
np.save("x_train_images.npy", x_train)
np.save("x_test_images.npy", x_test)
return x_train, x_test
'''
create_data_list() function iterates through the image and mask directory and prepares the tensorflow input queue.
Args:
path: string indicating the path to the directory containing train and test folders.
train: name of the folder cotaining training data.
test: name of the folder containing test data.
Returns:
void: it returns nothing but creates the train.txt and test.txt files containing list of paths to train and test data,
in the current directory.
'''
def create_data_list(path="data/", train, test):
TRAIN_IMAGE_DIR = path+'/'+train+'/images/'
TEST_IMAGE_DIR = path+'/'+test+'/images/'
MASK_DIR = path+'/'+train+'/masks/'
train_df = pd.read_csv(path+'train.csv')
test_df = pd.read_csv(path+'sample_submission.csv')
#mask_df = pd.read_csv(path+'sample_submission.csv')
image_name_list = train_df['id'].tolist()
t_image_name_list = test_df['id'].tolist()
f1 = "train.txt"
f2 = "test.txt"
with open(f1, "a") as img:
for i in image_name_list:
s = TRAIN_IMAGE_DIR+i+".png"+" "+MASK_DIR+i+".png"+"\n"
img.write(s)
img.close()
with open(f2, "a") as t_img:
for i in t_image_name_list:
s = TEST_IMAGE_DIR+i+".png"+"\n"
t_img.write(s)
t_img.close()
|
"""Dummy for packaging"""
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 04 09:32:46 2017
@author: Zlatko K. Minev, pyEPR ream
"""
from __future__ import division, print_function, absolute_import # Python 2.7 and 3 compatibility
import platform # Which OS we run
import numpy as np
import pandas as pd
import warnings
# Constants
from collections import OrderedDict
from scipy.constants import Planck, elementary_charge, epsilon_0, pi
# Convinience
π = pi
ħ = hbar = Planck/(2*pi) # Reduced Planks constant
# Reduced Flux Quantum (3.29105976 × 10-16 Webers)
ϕ0 = fluxQ = ħ / (2*elementary_charge)
# Magnitude of the electric charge carried by a single electron
e_el = elementary_charge
# ==============================================================================
# Utility functions
# ==============================================================================
def combinekw(kw1, kw2):
''' Copy kw1, update with kw2, return result '''
kw = kw1.copy()
kw.update(kw2)
return kw
def isint(value):
try:
int(value)
return True
except ValueError:
return False
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def floor_10(x):
''' round to nearest lower power of 10 c'''
return 10.**(np.floor(np.log10(x)))
def fact(n):
''' Factorial '''
if n <= 1:
return 1
return n * fact(n-1)
def nck(n, k):
''' choose '''
return fact(n)/(fact(k)*fact(n-k))
def get_above_diagonal(M):
''' extract the values that are above the diagonal.
Assumes square matrix
'''
return M[np.triu_indices(M.shape[0], k=1)]
def sort_df_col(df):
''' sort by numerical int order '''
col_names = df.columns
if np.all(col_names.map(isint)):
return df[col_names.astype(int).sort_values().astype(str)]
else:
return df
def sort_Series_idx(sr):
''' sort by numerical int order '''
idx_names = sr.index
if np.all(idx_names.map(isint)):
return sr[idx_names.astype(int).sort_values().astype(str)]
else:
return sr
def get_instance_vars(obj, Forbidden=[]):
VARS = {}
for v in dir(obj):
if not ((v.startswith('__')) or (v.startswith('_'))):
if not callable(getattr(obj, v)):
if not (v in Forbidden):
VARS[v] = getattr(obj, v)
return VARS
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used. See StackExchange"""
def newFunc(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn("Call to deprecated function {}.".format(
func.__name__), category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
def info_str_platform():
return '''
System platform information:
system : %s
node : %s
release : %s
machine : %s
processor: %s
summary : %s
version : %s
Python platform information:
version : %s (implem: %s)
compiler : %s
''' % (
platform.system(),
platform.node(),
platform.release(),
platform.machine(),
platform.processor(),
platform.platform(),
platform.version(),
platform.python_version(), platform.python_implementation(),
platform.python_compiler())
# ==============================================================================
# Matrix
# ==============================================================================
def print_matrix(M, frmt="{:7.2f}", append_row=""):
M = np.mat(M)
for row in np.array(M.tolist()):
print(' ', end='')
for chi in row:
print(frmt.format(chi), end='')
print(append_row+"\n", end='')
def divide_diagonal_by_2(CHI0, div_fact=2.):
CHI = CHI0.copy()
CHI[np.diag_indices_from(CHI)] /= div_fact
return CHI
def print_NoNewLine(text):
print((text), end='')
def print_color(text, style=0, fg=24, bg=43, newline=True):
'''For newer, see pc (or Print_colors)
style 0..8; fg 30..38; bg 40..48
'''
format = ';'.join([str(style), str(fg), str(bg)])
s = '\x1b[%sm %s \x1b[0m' % (format, text)
if newline:
print(s)
else:
print(s, end='')
class Print_colors:
'''Colors class:reset all colors with colors.reset; two
sub classes fg for foreground
and bg for background; use as colors.subclass.colorname.
i.e. colors.fg.red or colors.bg.greenalso, the generic bold, disable,
underline, reverse, strike through,
and invisible work with the main class i.e. colors.bold
https://www.geeksforgeeks.org/print-colors-python-terminal/
Example use:
..codeblock python
print(colors.bg.green, "adgd", colors.fg.red, "dsgdsg")
print(colors.bg.lightgrey, "dsgsd", colors.fg.red, "sdgsd")
'''
reset = '\033[0m'
bold = '\033[01m'
disable = '\033[02m'
underline = '\033[04m'
reverse = '\033[07m'
strikethrough = '\033[09m'
invisible = '\033[08m'
class fg:
black = '\033[30m'
red = '\033[31m'
green = '\033[32m'
orange = '\033[33m'
blue = '\033[34m'
purple = '\033[35m'
cyan = '\033[36m'
lightgrey = '\033[37m'
darkgrey = '\033[90m'
lightred = '\033[91m'
lightgreen = '\033[92m'
yellow = '\033[93m'
lightblue = '\033[94m'
pink = '\033[95m'
lightcyan = '\033[96m'
class bg:
black = '\033[40m'
red = '\033[41m'
green = '\033[42m'
orange = '\033[43m'
blue = '\033[44m'
purple = '\033[45m'
cyan = '\033[46m'
lightgrey = '\033[47m'
pc = Print_colors
# ==============================================================================
# %% Dataframe
# ==============================================================================
def DataFrame_col_diff(PS, indx=0):
''' check weather the columns of a dataframe are equal,
returns a T/F series of the row index that specifies which rows are differnt
USE:
PS[DataFrame_col_diff(PS)]
'''
R = []
for i in range(PS.shape[1]-1):
R += [PS.iloc[:, i] == PS.iloc[:, i+1]]
if len(R) == 1:
return np.logical_not(R[0])
else:
return np.logical_not(np.logical_and.reduce(R))
def DataFrame_display_side_by_side(*args, do_display=True):
'''
from pyEPR.toolbox import display_dfs
https://stackoverflow.com/questions/38783027/jupyter-notebook-display-two-pandas-tables-side-by-side
'''
from IPython.display import display_html
html_str = ''
for df in args:
html_str += df.to_html()
text = html_str.replace('table', 'table style="display:inline"')
if do_display:
display_html(text, raw=True)
return text
display_dfs = DataFrame_display_side_by_side
def xarray_unravel_levels(arr, names, my_convert=lambda x: x):
''' Takes in nested dict of dict of dataframes
names : names of lists; you dont have to include the last two dataframe columns & rows, but you can to override them
requires xarray
'''
import xarray
if type(arr) == pd.DataFrame:
return xarray.DataArray(arr, dims=None if len(names) == 0 else names)
elif type(arr) in [OrderedDict, dict]:
return xarray.concat([xarray_unravel_levels(item, names[1:]) for k, item in arr.items()], pd.Index(arr.keys(), name=names[0]))
elif type(arr) == xarray.DataArray:
return arr
else:
return my_convert(arr)
def robust_percentile(calc_data, ROBUST_PERCENTILE=2.):
'''
analysis helper function
'''
vmin = np.percentile(calc_data, ROBUST_PERCENTILE)
vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE)
return vmin, vmax
__all__ = ['hbar', 'e_el', 'epsilon_0', 'pi', 'fluxQ',
'fact', 'nck', 'combinekw',
'divide_diagonal_by_2',
'sort_df_col', 'sort_Series_idx',
'print_matrix', 'print_NoNewLine',
'DataFrame_col_diff', 'xarray_unravel_levels', 'robust_percentile']
|
"""
Command Line Interface (CLI) for registerit.
"""
import click
from .build_and_upload import (
build_minimal_python_distribution,
upload_distribution_to_pypi
)
@click.command()
@click.argument('package_name', required=True, type=str)
@click.option('--username', '-u', required=True, type=str)
@click.option('--password', '-p', required=True, type=str)
@click.option('--author', required=False, type=str, default='me')
@click.option('--email', required=False, type=str, default='me@me.com')
@click.option('--url', required=False, type=str, default='http://www.github.com')
def cli(
package_name: str,
username: str,
password: str,
author: str,
email: str,
url: str
) -> None:
"""Register package names on PyPI."""
try:
minimal_dist = build_minimal_python_distribution(
package_name,
author,
email,
url
)
upload_distribution_to_pypi(minimal_dist, username, password)
print(f'{package_name} successfully registered on PyPI.')
except RuntimeError as e:
print(f'ERROR: {e}')
|
#!/usr/bin/env python3
import psutil
# import shutil
import socket
import emails
sender = "automation@example.com"
recipient = "student-02-24cd4aa4d7ab@example.com"
subject = ""
body = "Please check your system and resolve the issue as soon as possible."
attachment_path = None
MB_conversion = 1024 * 1024
def hostname_resolves(hostname):
try:
socket.gethostbyname(hostname)
return 1
except socket.error:
return 0
def monitor():
if (psutil.cpu_percent()*100) > 80:
subject = "Error - CPU usage is over 80%"
message = emails.generate_email(sender, recipient, subject, body, attachment_path)
emails.send_email(message)
elif psutil.disk_usage("/").percent > 80:
subject = "Error - Available disk space is less than 20%"
message = emails.generate_email(sender, recipient, subject, body, attachment_path)
emails.send_email(message)
elif (psutil.virtual_memory().available / MB_conversion) < 500:
subject = "Error - Available memory is less than 500MB"
message = emails.generate_email(sender, recipient, subject, body, attachment_path)
emails.send_email(message)
elif hostname_resolves("localhost") == 0:
subject = "Error - localhost cannot be resolved to 127.0.0.1"
message = emails.generate_email(sender, recipient, subject, body, attachment_path)
emails.send_email(message)
def main():
monitor()
if __name__ == "__main__":
main()
|
from __future__ import absolute_import, print_function
from collections import OrderedDict
import types as pytypes
import inspect
from llvmlite import ir as llvmir
from numba import types
from numba.targets.registry import cpu_target
from numba import njit
from numba.typing import templates
from numba.datamodel import default_manager, models
from numba.targets import imputils
from numba import cgutils, utils
from numba.config import PYVERSION
from numba.six import exec_
if PYVERSION >= (3, 3):
from collections.abc import Sequence
else:
from collections import Sequence
from . import _box
##############################################################################
# Data model
class InstanceModel(models.StructModel):
def __init__(self, dmm, fe_typ):
cls_data_ty = types.ClassDataType(fe_typ)
# MemInfoPointer uses the `dtype` attribute to traverse for nested
# NRT MemInfo. Since we handle nested NRT MemInfo ourselves,
# we will replace provide MemInfoPointer with an opaque type
# so that it does not raise exception for nested meminfo.
dtype = types.Opaque('Opaque.' + str(cls_data_ty))
members = [
('meminfo', types.MemInfoPointer(dtype)),
('data', types.CPointer(cls_data_ty)),
]
super(InstanceModel, self).__init__(dmm, fe_typ, members)
class InstanceDataModel(models.StructModel):
def __init__(self, dmm, fe_typ):
clsty = fe_typ.class_type
members = [(_mangle_attr(k), v) for k, v in clsty.struct.items()]
super(InstanceDataModel, self).__init__(dmm, fe_typ, members)
default_manager.register(types.ClassInstanceType, InstanceModel)
default_manager.register(types.ClassDataType, InstanceDataModel)
default_manager.register(types.ClassType, models.OpaqueModel)
def _mangle_attr(name):
"""
Mangle attributes.
The resulting name does not startswith an underscore '_'.
"""
return 'm_' + name
##############################################################################
# Class object
_ctor_template = """
def ctor({args}):
return __numba_cls_({args})
"""
def _getargs(fn):
"""
Returns list of positional and keyword argument names in order.
"""
sig = utils.pysignature(fn)
params = sig.parameters
args = [k for k, v in params.items()
if (v.kind & v.POSITIONAL_OR_KEYWORD) == v.POSITIONAL_OR_KEYWORD]
return args
class JitClassType(type):
"""
The type of any jitclass.
"""
def __new__(cls, name, bases, dct):
if len(bases) != 1:
raise TypeError("must have exactly one base class")
[base] = bases
if isinstance(base, JitClassType):
raise TypeError("cannot subclass from a jitclass")
assert 'class_type' in dct, 'missing "class_type" attr'
outcls = type.__new__(cls, name, bases, dct)
outcls._set_init()
return outcls
def _set_init(cls):
"""
Generate a wrapper for calling the constructor from pure Python.
Note the wrapper will only accept positional arguments.
"""
init = cls.class_type.instance_type.methods['__init__']
# get postitional and keyword arguments
# offset by one to exclude the `self` arg
args = _getargs(init)[1:]
ctor_source = _ctor_template.format(args=', '.join(args))
glbls = {"__numba_cls_": cls}
exec_(ctor_source, glbls)
ctor = glbls['ctor']
cls._ctor = njit(ctor)
def __instancecheck__(cls, instance):
if isinstance(instance, _box.Box):
return instance._numba_type_.class_type is cls.class_type
return False
def __call__(cls, *args, **kwargs):
return cls._ctor(*args, **kwargs)
##############################################################################
# Registration utils
def _validate_spec(spec):
for k, v in spec.items():
if not isinstance(k, str):
raise TypeError("spec keys should be strings, got %r" % (k,))
if not isinstance(v, types.Type):
raise TypeError("spec values should be Numba type instances, got %r"
% (v,))
def _fix_up_private_attr(clsname, spec):
"""
Apply the same changes to dunder names as CPython would.
"""
out = OrderedDict()
for k, v in spec.items():
if k.startswith('__') and not k.endswith('__'):
k = '_' + clsname + k
out[k] = v
return out
def _add_linking_libs(context, call):
"""
Add the required libs for the callable to allow inlining.
"""
libs = getattr(call, "libs", ())
if libs:
context.add_linking_libs(libs)
def register_class_type(cls, spec, class_ctor, builder):
"""
Internal function to create a jitclass.
Args
----
cls: the original class object (used as the prototype)
spec: the structural specification contains the field types.
class_ctor: the numba type to represent the jitclass
builder: the internal jitclass builder
"""
# Normalize spec
if isinstance(spec, Sequence):
spec = OrderedDict(spec)
_validate_spec(spec)
# Fix up private attribute names
spec = _fix_up_private_attr(cls.__name__, spec)
# Copy methods from base classes
clsdct = {}
for basecls in reversed(inspect.getmro(cls)):
clsdct.update(basecls.__dict__)
methods = dict((k, v) for k, v in clsdct.items()
if isinstance(v, pytypes.FunctionType))
props = dict((k, v) for k, v in clsdct.items()
if isinstance(v, property))
others = dict((k, v) for k, v in clsdct.items()
if k not in methods and k not in props)
# Check for name shadowing
shadowed = (set(methods) | set(props)) & set(spec)
if shadowed:
raise NameError("name shadowing: {0}".format(', '.join(shadowed)))
docstring = others.pop('__doc__', "")
_drop_ignored_attrs(others)
if others:
msg = "class members are not yet supported: {0}"
members = ', '.join(others.keys())
raise TypeError(msg.format(members))
for k, v in props.items():
if v.fdel is not None:
raise TypeError("deleter is not supported: {0}".format(k))
jitmethods = {}
for k, v in methods.items():
jitmethods[k] = njit(v)
jitprops = {}
for k, v in props.items():
dct = {}
if v.fget:
dct['get'] = njit(v.fget)
if v.fset:
dct['set'] = njit(v.fset)
jitprops[k] = dct
# Instantiate class type
class_type = class_ctor(cls, ConstructorTemplate, spec, jitmethods,
jitprops)
cls = JitClassType(cls.__name__, (cls,), dict(class_type=class_type,
__doc__=docstring))
# Register resolution of the class object
typingctx = cpu_target.typing_context
typingctx.insert_global(cls, class_type)
# Register class
targetctx = cpu_target.target_context
builder(class_type, methods, typingctx, targetctx).register()
return cls
class ConstructorTemplate(templates.AbstractTemplate):
"""
Base class for jitclass constructor templates.
"""
def generic(self, args, kws):
# Redirect resolution to __init__
instance_type = self.key.instance_type
ctor = instance_type.jitmethods['__init__']
boundargs = (instance_type.get_reference_type(),) + args
disp_type = types.Dispatcher(ctor)
sig = disp_type.get_call_type(self.context, boundargs, kws)
# Actual constructor returns an instance value (not None)
out = templates.signature(instance_type, *sig.args[1:])
return out
def _drop_ignored_attrs(dct):
# ignore anything defined by object
drop = set(['__weakref__',
'__module__',
'__dict__'])
for k, v in dct.items():
if isinstance(v, (pytypes.BuiltinFunctionType,
pytypes.BuiltinMethodType)):
drop.add(k)
elif getattr(v, '__objclass__', None) is object:
drop.add(k)
for k in drop:
del dct[k]
class ClassBuilder(object):
"""
A jitclass builder for a mutable jitclass. This will register
typing and implementation hooks to the given typing and target contexts.
"""
class_impl_registry = imputils.Registry()
implemented_methods = set()
def __init__(self, class_type, methods, typingctx, targetctx):
self.class_type = class_type
self.methods = methods
self.typingctx = typingctx
self.targetctx = targetctx
def register(self):
"""
Register to the frontend and backend.
"""
# Register generic implementations for all jitclasses
self._register_methods(self.class_impl_registry,
self.class_type.instance_type)
# NOTE other registrations are done at the top-level
# (see ctor_impl and attr_impl below)
self.targetctx.install_registry(self.class_impl_registry)
def _register_methods(self, registry, instance_type):
"""
Register method implementations for the given instance type.
"""
for meth in instance_type.jitmethods:
# There's no way to retrive the particular method name
# inside the implementation function, so we have to register a
# specific closure for each different name
if meth not in self.implemented_methods:
self._implement_method(registry, meth)
self.implemented_methods.add(meth)
def _implement_method(self, registry, attr):
@registry.lower((types.ClassInstanceType, attr),
types.ClassInstanceType, types.VarArg(types.Any))
def imp(context, builder, sig, args):
instance_type = sig.args[0]
method = instance_type.jitmethods[attr]
disp_type = types.Dispatcher(method)
call = context.get_function(disp_type, sig)
out = call(builder, args)
_add_linking_libs(context, call)
return imputils.impl_ret_new_ref(context, builder,
sig.return_type, out)
@templates.infer_getattr
class ClassAttribute(templates.AttributeTemplate):
key = types.ClassInstanceType
def generic_resolve(self, instance, attr):
if attr in instance.struct:
# It's a struct field => the type is well-known
return instance.struct[attr]
elif attr in instance.jitmethods:
# It's a jitted method => typeinfer it
meth = instance.jitmethods[attr]
disp_type = types.Dispatcher(meth)
class MethodTemplate(templates.AbstractTemplate):
key = (self.key, attr)
def generic(self, args, kws):
args = (instance,) + tuple(args)
sig = disp_type.get_call_type(self.context, args, kws)
return sig.as_method()
return types.BoundFunction(MethodTemplate, instance)
elif attr in instance.jitprops:
# It's a jitted property => typeinfer its getter
impdct = instance.jitprops[attr]
getter = impdct['get']
disp_type = types.Dispatcher(getter)
sig = disp_type.get_call_type(self.context, (instance,), {})
return sig.return_type
@ClassBuilder.class_impl_registry.lower_getattr_generic(types.ClassInstanceType)
def attr_impl(context, builder, typ, value, attr):
"""
Generic getattr() for @jitclass instances.
"""
if attr in typ.struct:
# It's a struct field
inst = context.make_helper(builder, typ, value=value)
data_pointer = inst.data
data = context.make_data_helper(builder, typ.get_data_type(),
ref=data_pointer)
return imputils.impl_ret_borrowed(context, builder,
typ.struct[attr],
getattr(data, _mangle_attr(attr)))
elif attr in typ.jitprops:
# It's a jitted property
getter = typ.jitprops[attr]['get']
sig = templates.signature(None, typ)
dispatcher = types.Dispatcher(getter)
sig = dispatcher.get_call_type(context.typing_context, [typ], {})
call = context.get_function(dispatcher, sig)
out = call(builder, [value])
_add_linking_libs(context, call)
return imputils.impl_ret_new_ref(context, builder, sig.return_type, out)
raise NotImplementedError('attribute {0!r} not implemented'.format(attr))
@ClassBuilder.class_impl_registry.lower_setattr_generic(types.ClassInstanceType)
def attr_impl(context, builder, sig, args, attr):
"""
Generic setattr() for @jitclass instances.
"""
typ, valty = sig.args
target, val = args
if attr in typ.struct:
# It's a struct member
inst = context.make_helper(builder, typ, value=target)
data_ptr = inst.data
data = context.make_data_helper(builder, typ.get_data_type(),
ref=data_ptr)
# Get old value
attr_type = typ.struct[attr]
oldvalue = getattr(data, _mangle_attr(attr))
# Store n
setattr(data, _mangle_attr(attr), val)
context.nrt.incref(builder, attr_type, val)
# Delete old value
context.nrt.decref(builder, attr_type, oldvalue)
elif attr in typ.jitprops:
# It's a jitted property
setter = typ.jitprops[attr]['set']
disp_type = types.Dispatcher(setter)
sig = disp_type.get_call_type(context.typing_context,
(typ, valty), {})
call = context.get_function(disp_type, sig)
call(builder, (target, val))
_add_linking_libs(context, call)
else:
raise NotImplementedError('attribute {0!r} not implemented'.format(attr))
def imp_dtor(context, module, instance_type):
llvoidptr = context.get_value_type(types.voidptr)
llsize = context.get_value_type(types.uintp)
dtor_ftype = llvmir.FunctionType(llvmir.VoidType(),
[llvoidptr, llsize, llvoidptr])
fname = "_Dtor.{0}".format(instance_type.name)
dtor_fn = module.get_or_insert_function(dtor_ftype,
name=fname)
if dtor_fn.is_declaration:
# Define
builder = llvmir.IRBuilder(dtor_fn.append_basic_block())
alloc_fe_type = instance_type.get_data_type()
alloc_type = context.get_value_type(alloc_fe_type)
ptr = builder.bitcast(dtor_fn.args[0], alloc_type.as_pointer())
data = context.make_helper(builder, alloc_fe_type, ref=ptr)
context.nrt.decref(builder, alloc_fe_type, data._getvalue())
builder.ret_void()
return dtor_fn
@ClassBuilder.class_impl_registry.lower(types.ClassType, types.VarArg(types.Any))
def ctor_impl(context, builder, sig, args):
"""
Generic constructor (__new__) for jitclasses.
"""
# Allocate the instance
inst_typ = sig.return_type
alloc_type = context.get_data_type(inst_typ.get_data_type())
alloc_size = context.get_abi_sizeof(alloc_type)
meminfo = context.nrt.meminfo_alloc_dtor(
builder,
context.get_constant(types.uintp, alloc_size),
imp_dtor(context, builder.module, inst_typ),
)
data_pointer = context.nrt.meminfo_data(builder, meminfo)
data_pointer = builder.bitcast(data_pointer,
alloc_type.as_pointer())
# Nullify all data
builder.store(cgutils.get_null_value(alloc_type),
data_pointer)
inst_struct = context.make_helper(builder, inst_typ)
inst_struct.meminfo = meminfo
inst_struct.data = data_pointer
# Call the jitted __init__
# TODO: extract the following into a common util
init_sig = (sig.return_type,) + sig.args
init = inst_typ.jitmethods['__init__']
disp_type = types.Dispatcher(init)
call = context.get_function(disp_type, types.void(*init_sig))
_add_linking_libs(context, call)
realargs = [inst_struct._getvalue()] + list(args)
call(builder, realargs)
# Prepare return value
ret = inst_struct._getvalue()
return imputils.impl_ret_new_ref(context, builder, inst_typ, ret)
|
import argparse
import json
from time import sleep
import requests
from requests.exceptions import Timeout
from requests.exceptions import ConnectionError
import unicodedata
import re
import glob
import os
import datetime
from bs4 import BeautifulSoup
from email import parser
from tqdm import tqdm
from typing import List, Dict
class SyllabusTool:
def __init__(self) -> None:
self.year = "2022" # スクレイピングする年度を指定
self.syllabus_dict_list = list()
self.error = 0
self.csv_list = [os.path.basename(p) for p in glob.glob("./timetable/" + self.year + "/csv/*.csv", recursive=True)
if os.path.isfile(p)] # csvファイルを全て取得
self.csv_list.sort()
# 入力ファイルは一行で","区切りの文字列を想定
def import_syllabus_number(self, filepath: str) -> List[str]:
with open(filepath, "r") as fp:
numbers = fp.readline().strip().split(",") # strip()は改行コード除去用
return numbers
def extract_element(self, html: str) -> Dict[str, str]:
soup = BeautifulSoup(html, "html.parser")
elmes = soup.select(".kougi")
element_buff = ""
for elem in elmes:
element_buff += elem.get_text()
lines = [line.strip() for line in element_buff.splitlines()]
text = ",".join(line for line in lines if line).split(",")
try:
syllabus_dict = dict(
kougi=re.sub(
"\【.+?\】", "", unicodedata.normalize("NFKD", text[0])),
nenji=unicodedata.normalize(
"NFKD", (text[3].replace("年次", ""))),
tani=unicodedata.normalize("NFKD", text[4]),
kikan=unicodedata.normalize("NFKD", text[5]),
tantousya=re.sub(
"\(.+?\)", "", unicodedata.normalize("NFKD", text[6])),
)
except:
syllabus_dict = dict()
return syllabus_dict
def scraping_syllabus(self, number: str, csv: str):
url = "https://www.portal.oit.ac.jp/CAMJWEB/slbssbdr.do?value(risyunen)=" + self.year + "&value(semekikn)=1&value(kougicd)=" + \
number + "&value(crclumcd)=10201200"
try:
html = requests.get(url, timeout=9.0).text
except (Timeout, ConnectionError):
print("\nTimeout numbering:" + number)
sleep(3)
html = requests.get(url, timeout=9.0).text
# ここでもエラーになると詰むので改修する必要あり
syllabus_dict = self.extract_element(html)
key_list = (list(syllabus_dict.keys()))
if key_list != [] and self.duplicate_check(syllabus_dict, number):
syllabus_dict["link"] = url
syllabus_dict["numbering"] = number
syllabus_dict["gakka"] = csv[0]
self.syllabus_dict_list.append(syllabus_dict) # リストに追加
else:
# ページがない時のエラー処理,もう少し上手くやりたい
self.error += 1 # エラー数をカウント
def duplicate_check(self, check_syllabus_dict: list, number: str):
for i in range(len(self.syllabus_dict_list)):
syllabus_dict = self.syllabus_dict_list[i]
if syllabus_dict["kougi"] == check_syllabus_dict["kougi"] and \
syllabus_dict["nenji"] == check_syllabus_dict["nenji"] and \
syllabus_dict["kikan"] == check_syllabus_dict["kikan"] and \
syllabus_dict["tantousya"] == check_syllabus_dict["tantousya"] and \
syllabus_dict["tani"] == check_syllabus_dict["tani"]:
self.syllabus_dict_list[i]["numbering"] += " , " + number
return False
return True
def make_syllabus_dict_list(self):
duplicate_check = list()
for csv in tqdm(self.csv_list, desc="全体の進捗率"):
numbers = self.import_syllabus_number(
"./timetable/" + self.year + "/csv/" + csv)
numbers = list(set(numbers) - set(duplicate_check)) # 重複削除
duplicate_check.extend(numbers)
numbers.sort()
duplicate_check.sort()
for number in tqdm(numbers, desc=csv):
self.scraping_syllabus(number, csv)
return duplicate_check
def main(self, *args):
duplicate_check = self.make_syllabus_dict_list()
with open("../web/src/data/" + self.year + ".json", "w", encoding="utf-8") as fp:
json.dump(self.syllabus_dict_list, fp, ensure_ascii=False, indent=4)
with open("./timetable/" + self.year + "/numbers.csv", "w", encoding="utf-8") as fp:
fp.write(",".join(duplicate_check))
# READMEの書き換え
date = datetime.datetime.now(datetime.timezone(
datetime.timedelta(hours=+9))).strftime("%Y/%m/%d")
with open("../README.md", "r", encoding="utf-8") as fp:
s = re.sub("\d{4}/\d{2}/\d{2}", date, fp.read()) # 更新日の書き換え
s = re.sub("<!-- エラー数=\d{1,4} -->",
"<!-- エラー数=" + str(self.error) + " -->", s) # エラー数の書き換え
with open("../README.md", "w", encoding="utf-8") as fp:
fp.write(s)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument("--input", type=str, required=True, help="") # 入力ファイル名
# parser.add_argument("--output", type=str, required=True, help="") # 出力ファイル名
args = parser.parse_args()
SyllabusTool().main(**vars(parser.parse_args()))
|
# Create a program that will play the 'cows and bulls' game with the user. The game works
# like this:
#
# Randomly generate a 4-digit number. Ask the user to guess a 4-digit number. For every
# digit that the user guessed correctly in the correct place, they have a 'cow'. For
# every digit the user guessed correctly in the wrong place is a 'bull.' Every time the
# user makes a guess, tell them how many 'cows' and 'bulls' they have. Once the user
# guesses the correct number, the game is over. Keep track of the number of guesses the
# user makes throughout the game and tell the user at the end.
#
# Say the number generated by the computer is 1038. An example interaction could look like
# this:
#
# Welcome to the Cows and Bulls Game!
# Enter a number:
# >>> 1234
# 2 cows, 0 bulls
# >>> 1256
# 1 cow, 1 bull
# Until the user guesses the number.
import random
number = str(random.randint(1000,9999)) #random 4 digit number
newlist = []
counter = 0
bulls = 0
item = 0
while True:
user_input = raw_input("Enter a four digit number here: ")
if user_input != number:
for item in number:
if item in number:
if number[0] == user_input[0]:
counter = counter + 1
if number[1] == user_input[1]:
counter = counter + 1
if number[2] == user_input[2]:
counter = counter + 1
if number[3] == user_input[3]:
counter = counter + 1
if number[0] != user_input[0]:
bulls = bulls + 1
if number[1] != user_input[1]:
bulls = bulls + 1
if number[2] != user_input[2]:
bulls = bulls + 1
if number[3] != user_input[3]:
bulls = bulls + 1
if counter < 4 and bulls <= 4:
print "You have", counter, "cow(s)"
if counter < 4 and bulls <= 4:
print "You have", bulls, "bull(s)"
else:
print
print "Try again"
break
else:
print "Congratulations, you guessed the number!"
break
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("zerver", "0098_index_has_alert_word_user_messages"),
]
operations = [
migrations.RunSQL(
"""
CREATE INDEX IF NOT EXISTS zerver_usermessage_wildcard_mentioned_message_id
ON zerver_usermessage (user_profile_id, message_id)
WHERE (flags & 8) != 0 OR (flags & 16) != 0;
""",
reverse_sql="DROP INDEX zerver_usermessage_wildcard_mentioned_message_id;",
),
]
|
# -*- coding: utf-8 -*-
"""Test casts"""
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
SOURCE = "SELECT CAST($1::int AS boolean)"
CREATE_FUNC = "CREATE FUNCTION int2_bool(smallint) RETURNS boolean " \
"LANGUAGE sql IMMUTABLE AS $_$%s$_$" % SOURCE
CREATE_DOMAIN = "CREATE DOMAIN d1 AS integer"
CREATE_STMT1 = "CREATE CAST (smallint AS boolean) WITH FUNCTION " \
"int2_bool(smallint)"
CREATE_STMT3 = "CREATE CAST (d1 AS integer) WITH INOUT AS IMPLICIT"
DROP_STMT = "DROP CAST IF EXISTS (smallint AS boolean)"
COMMENT_STMT = "COMMENT ON CAST (smallint AS boolean) IS 'Test cast 1'"
class CastToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of existing casts"""
def test_map_cast_function(self):
"Map a cast with a function"
dbmap = self.to_map([CREATE_FUNC, CREATE_STMT1], superuser=True)
expmap = {'function': 'int2_bool(smallint)', 'context': 'explicit',
'method': 'function'}
assert dbmap['cast (smallint as boolean)'] == expmap
def test_map_cast_inout(self):
"Map a cast with INOUT"
dbmap = self.to_map([CREATE_DOMAIN, CREATE_STMT3])
expmap = {'context': 'implicit', 'method': 'inout',
'depends_on': ['domain d1']}
assert dbmap['cast (d1 as integer)'] == expmap
def test_map_cast_comment(self):
"Map a cast comment"
dbmap = self.to_map([CREATE_FUNC, CREATE_STMT1, COMMENT_STMT],
superuser=True)
assert dbmap['cast (smallint as boolean)']['description'] == \
'Test cast 1'
class CastToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation from input casts"""
def test_create_cast_function(self):
"Create a cast with a function"
stmts = [DROP_STMT, CREATE_FUNC]
inmap = self.std_map()
inmap.update({'cast (smallint as boolean)': {
'function': 'int2_bool(smallint)', 'context': 'explicit',
'method': 'function'}})
sql = self.to_sql(inmap, stmts)
# NOTE(David Chang): This is a hack to get this test to work. We reordered all drops to happen before any other statements because in theory you shouldn't be depending on a function that used to exist for your cast. If you need it, you need to have it defined in your db.yaml to use it (and thus won't be dropped). However, this test is odd in how it runs and I don't think you can hit this case in real usage
assert sql[0] == "DROP FUNCTION int2_bool(smallint)"
assert fix_indent(sql[1]) == CREATE_STMT1
def test_create_cast_inout(self):
"Create a cast with INOUT"
stmts = [CREATE_DOMAIN, "DROP CAST IF EXISTS (d1 AS integer)"]
inmap = self.std_map()
inmap.update({'cast (d1 as integer)': {
'context': 'implicit', 'method': 'inout'}})
inmap['schema public'].update({'domain d1': {'type': 'integer'}})
sql = self.to_sql(inmap, stmts)
assert fix_indent(sql[0]) == CREATE_STMT3
def test_create_cast_schema(self):
"Create a cast using a type/domain in a non-public schema"
stmts = ["CREATE SCHEMA s1", "CREATE DOMAIN s1.d1 AS integer",
"DROP CAST IF EXISTS (integer AS s1.d1)"]
inmap = self.std_map()
inmap.update({'cast (integer as s1.d1)': {
'context': 'assignment', 'method': 'binary coercible'}})
inmap.update({'schema s1': {'domain d1': {'type': 'integer'}}})
sql = self.to_sql(inmap, stmts)
assert fix_indent(sql[0]) == "CREATE CAST (integer AS s1.d1) " \
"WITHOUT FUNCTION AS ASSIGNMENT"
def test_bad_cast_map(self):
"Error creating a cast with a bad map"
inmap = self.std_map()
inmap.update({'(smallint as boolean)': {
'function': 'int2_bool(smallint)', 'context': 'explicit',
'method': 'function'}})
with pytest.raises(KeyError):
self.to_sql(inmap)
def test_drop_cast(self):
"Drop an existing cast"
stmts = [DROP_STMT, CREATE_FUNC, CREATE_STMT1]
sql = self.to_sql(self.std_map(), stmts, superuser=True)
assert sql[0] == "DROP CAST (smallint AS boolean)"
def test_cast_with_comment(self):
"Create a cast with a comment"
inmap = self.std_map()
inmap.update({'cast (smallint as boolean)': {
'description': 'Test cast 1', 'function': 'int2_bool(smallint)',
'context': 'explicit', 'method': 'function'}})
inmap['schema public'].update({'function int2_bool(smallint)': {
'returns': 'boolean', 'language': 'sql', 'immutable': True,
'source': SOURCE}})
sql = self.to_sql(inmap, [DROP_STMT])
# sql[0:1] -> SET, CREATE FUNCTION
assert fix_indent(sql[2]) == CREATE_STMT1
assert sql[3] == COMMENT_STMT
def test_comment_on_cast(self):
"Create a comment for an existing cast"
stmts = [DROP_STMT, CREATE_FUNC, CREATE_STMT1]
inmap = self.std_map()
inmap.update({'cast (smallint as boolean)': {
'description': 'Test cast 1', 'function': 'int2_bool(smallint)',
'context': 'explicit', 'method': 'function'}})
inmap['schema public'].update({'function int2_bool(smallint)': {
'returns': 'boolean', 'language': 'sql', 'immutable': True,
'source': SOURCE}})
sql = self.to_sql(inmap, stmts, superuser=True)
assert sql == [COMMENT_STMT]
def test_drop_cast_comment(self):
"Drop a comment on an existing cast"
stmts = [DROP_STMT, CREATE_FUNC, CREATE_STMT1, COMMENT_STMT]
inmap = self.std_map()
inmap.update({'cast (smallint as boolean)': {
'function': 'int2_bool(smallint)', 'context': 'explicit',
'method': 'function'}})
inmap['schema public'].update({'function int2_bool(smallint)': {
'returns': 'boolean', 'language': 'sql', 'immutable': True,
'source': SOURCE}})
assert self.to_sql(inmap, stmts, superuser=True) == \
["COMMENT ON CAST (smallint AS boolean) IS NULL"]
def test_change_cast_comment(self):
"Change existing comment on a cast"
stmts = [DROP_STMT, CREATE_FUNC, CREATE_STMT1, COMMENT_STMT]
inmap = self.std_map()
inmap.update({'cast (smallint as boolean)': {
'description': 'Changed cast 1', 'function': 'int2_bool(smallint)',
'context': 'explicit', 'method': 'function'}})
inmap['schema public'].update({'function int2_bool(smallint)': {
'returns': 'boolean', 'language': 'sql', 'immutable': True,
'source': SOURCE}})
assert self.to_sql(inmap, stmts, superuser=True) == \
["COMMENT ON CAST (smallint AS boolean) IS 'Changed cast 1'"]
def test_cast_function_view_depends(self):
"Cast that depends on a function that depends on a view. See #86"
stmts = ["CREATE TABLE t1 (id integer)"]
inmap = self.std_map()
inmap.update({'cast (v1 as t1)': {
'context': 'explicit', 'function': 'v1_to_t1(v1)',
'method': 'function'}})
inmap['schema public'].update({
'function v1_to_t1(v1)': {'returns': 't1', 'language': 'plpgsql',
'source': "\nDECLARE o t1;\nBEGIN o:= ROW($1.id)::t1;\n" \
"RETURN o;\nEND"},
'table t1': {'columns': [{'id': {'type': 'integer'}}]},
'view v1': {'definition': " SELECT t1.id\n FROM t1;",
'depends_on': ['table t1']}})
sql = self.to_sql(inmap, stmts)
assert len(sql) == 3
assert fix_indent(sql[0]) == "CREATE VIEW v1 AS SELECT t1.id FROM t1"
assert fix_indent(sql[1]) == "CREATE FUNCTION v1_to_t1(v1) " \
"RETURNS t1 LANGUAGE plpgsql AS $_$\nDECLARE o t1;\nBEGIN " \
"o:= ROW($1.id)::t1;\nRETURN o;\nEND$_$"
assert fix_indent(sql[2]) == "CREATE CAST (v1 AS t1) WITH " \
"FUNCTION v1_to_t1(v1)"
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Enums used in contrastive learning code.
They're only here, rather than next to the code that uses them, so that they can
be used as hyperparameter values without pulling in heavy Tensorflow
dependencies to the hyperparameter code.
"""
import enum
@enum.unique
class ModelMode(enum.Enum):
TRAIN = 1
EVAL = 2
INFERENCE = 3
@enum.unique
class AugmentationType(enum.Enum):
"""Valid augmentation types."""
# SimCLR augmentation (Chen et al, https://arxiv.org/abs/2002.05709).
SIMCLR = 's'
# AutoAugment augmentation (Cubuk et al, https://arxiv.org/abs/1805.09501).
AUTOAUGMENT = 'a'
# RandAugment augmentation (Cubuk et al, https://arxiv.org/abs/1909.13719).
RANDAUGMENT = 'r'
# SimCLR combined with RandAugment.
STACKED_RANDAUGMENT = 'sr'
# No augmentation.
IDENTITY = 'i'
@enum.unique
class LossContrastMode(enum.Enum):
ALL_VIEWS = 'a' # All views are contrasted against all other views.
ONE_VIEW = 'o' # Only one view is contrasted against all other views.
@enum.unique
class LossSummationLocation(enum.Enum):
OUTSIDE = 'o' # Summation location is outside of logarithm
INSIDE = 'i' # Summation location is inside of logarithm
@enum.unique
class LossDenominatorMode(enum.Enum):
ALL = 'a' # All negatives and all positives
ONE_POSITIVE = 'o' # All negatives and one positive
ONLY_NEGATIVES = 'n' # Only negatives
@enum.unique
class Optimizer(enum.Enum):
RMSPROP = 'r'
MOMENTUM = 'm'
LARS = 'l'
ADAM = 'a'
NESTEROV = 'n'
@enum.unique
class EncoderArchitecture(enum.Enum):
RESNET_V1 = 'r1'
RESNEXT = 'rx'
@enum.unique
class DecayType(enum.Enum):
COSINE = 'c'
EXPONENTIAL = 'e'
PIECEWISE_LINEAR = 'p'
NO_DECAY = 'n'
@enum.unique
class EvalCropMethod(enum.Enum):
"""Methods of cropping eval images to the target dimensions."""
# Resize so that min image dimension is IMAGE_SIZE + CROP_PADDING, then crop
# the central IMAGE_SIZExIMAGE_SIZE square.
RESIZE_THEN_CROP = 'rc'
# Crop a central square of side length
# natural_image_min_dim * IMAGE_SIZE/(IMAGE_SIZE+CROP_PADDING), then resize to
# IMAGE_SIZExIMAGE_SIZE.
CROP_THEN_RESIZE = 'cr'
# Crop the central IMAGE_SIZE/(IMAGE_SIZE+CROP_PADDING) pixels along each
# dimension, preserving the natural image aspect ratio, then resize to
# IMAGE_SIZExIMAGE_SIZE, which distorts the image.
CROP_THEN_DISTORT = 'cd'
# Do nothing. Requires that the input image is already the desired size.
IDENTITY = 'i'
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements a simple "negative compile" test for C++ on linux.
Sometimes a C++ API needs to ensure that various usages cannot compile. To
enable unittesting of these assertions, we use this python script to
invoke gcc on a source file and assert that compilation fails.
For more info, see:
http://dev.chromium.org/developers/testing/no-compile-tests
"""
import StringIO
import ast
import os
import re
import select
import subprocess
import sys
import time
# Matches lines that start with #if and have the substring TEST in the
# conditional. Also extracts the comment. This allows us to search for
# lines like the following:
#
# #ifdef NCTEST_NAME_OF_TEST // [r'expected output']
# #if defined(NCTEST_NAME_OF_TEST) // [r'expected output']
# #if NCTEST_NAME_OF_TEST // [r'expected output']
# #elif NCTEST_NAME_OF_TEST // [r'expected output']
# #elif DISABLED_NCTEST_NAME_OF_TEST // [r'expected output']
#
# inside the unittest file.
NCTEST_CONFIG_RE = re.compile(r'^#(?:el)?if.*\s+(\S*NCTEST\S*)\s*(//.*)?')
# Matches and removes the defined() preprocesor predicate. This is useful
# for test cases that use the preprocessor if-statement form:
#
# #if defined(NCTEST_NAME_OF_TEST)
#
# Should be used to post-process the results found by NCTEST_CONFIG_RE.
STRIP_DEFINED_RE = re.compile(r'defined\((.*)\)')
# Used to grab the expectation from comment at the end of an #ifdef. See
# NCTEST_CONFIG_RE's comment for examples of what the format should look like.
#
# The extracted substring should be a python array of regular expressions.
EXTRACT_EXPECTATION_RE = re.compile(r'//\s*(\[.*\])')
# The header for the result file so that it can be compiled.
RESULT_FILE_HEADER = """
// This file is generated by the no compile test from:
// %s
#include "base/logging.h"
#include "testing/gtest/include/gtest/gtest.h"
"""
# The log message on a test completion.
LOG_TEMPLATE = """
TEST(%s, %s) took %f secs. Started at %f, ended at %f.
"""
# The GUnit test function to output for a successful or disabled test.
GUNIT_TEMPLATE = """
TEST(%s, %s) { }
"""
# Timeout constants.
NCTEST_TERMINATE_TIMEOUT_SEC = 60
NCTEST_KILL_TIMEOUT_SEC = NCTEST_TERMINATE_TIMEOUT_SEC + 2
BUSY_LOOP_MAX_TIME_SEC = NCTEST_KILL_TIMEOUT_SEC * 2
def ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path):
"""Make sure the arguments being passed in are sane."""
assert parallelism >= 1
assert type(sourcefile_path) is str
assert type(cflags) is list
for flag in cflags:
assert(type(flag) is str)
assert type(resultfile_path) is str
def ParseExpectation(expectation_string):
"""Extracts expectation definition from the trailing comment on the ifdef.
See the comment on NCTEST_CONFIG_RE for examples of the format we are parsing.
Args:
expectation_string: A string like "// [r'some_regex']"
Returns:
A list of compiled regular expressions indicating all possible valid
compiler outputs. If the list is empty, all outputs are considered valid.
"""
assert expectation_string is not None
match = EXTRACT_EXPECTATION_RE.match(expectation_string)
assert match
raw_expectation = ast.literal_eval(match.group(1))
assert type(raw_expectation) is list
expectation = []
for regex_str in raw_expectation:
assert type(regex_str) is str
expectation.append(re.compile(regex_str))
return expectation
def ExtractTestConfigs(sourcefile_path, suite_name):
"""Parses the source file for test configurations.
Each no-compile test in the file is separated by an ifdef macro. We scan
the source file with the NCTEST_CONFIG_RE to find all ifdefs that look like
they demark one no-compile test and try to extract the test configuration
from that.
Args:
sourcefile_path: The path to the source file.
suite_name: The name of the test suite.
Returns:
A list of test configurations. Each test configuration is a dictionary of
the form:
{ name: 'NCTEST_NAME'
suite_name: 'SOURCE_FILE_NAME'
expectations: [re.Pattern, re.Pattern] }
The |suite_name| is used to generate a pretty gtest output on successful
completion of the no compile test.
The compiled regexps in |expectations| define the valid outputs of the
compiler. If any one of the listed patterns matches either the stderr or
stdout from the compilation, and the compilation failed, then the test is
considered to have succeeded. If the list is empty, than we ignore the
compiler output and just check for failed compilation. If |expectations|
is actually None, then this specifies a compiler sanity check test, which
should expect a SUCCESSFUL compilation.
"""
sourcefile = open(sourcefile_path, 'r')
# Start with at least the compiler sanity test. You need to always have one
# sanity test to show that compiler flags and configuration are not just
# wrong. Otherwise, having a misconfigured compiler, or an error in the
# shared portions of the .nc file would cause all tests to erroneously pass.
test_configs = []
for line in sourcefile:
match_result = NCTEST_CONFIG_RE.match(line)
if not match_result:
continue
groups = match_result.groups()
# Grab the name and remove the defined() predicate if there is one.
name = groups[0]
strip_result = STRIP_DEFINED_RE.match(name)
if strip_result:
name = strip_result.group(1)
# Read expectations if there are any.
test_configs.append({'name': name,
'suite_name': suite_name,
'expectations': ParseExpectation(groups[1])})
sourcefile.close()
return test_configs
def StartTest(sourcefile_path, cflags, config):
"""Start one negative compile test.
Args:
sourcefile_path: The path to the source file.
cflags: An array of strings with all the CFLAGS to give to gcc.
config: A dictionary describing the test. See ExtractTestConfigs
for a description of the config format.
Returns:
A dictionary containing all the information about the started test. The
fields in the dictionary are as follows:
{ 'proc': A subprocess object representing the compiler run.
'cmdline': The executed command line.
'name': The name of the test.
'suite_name': The suite name to use when generating the gunit test
result.
'terminate_timeout': The timestamp in seconds since the epoch after
which the test should be terminated.
'kill_timeout': The timestamp in seconds since the epoch after which
the test should be given a hard kill signal.
'started_at': A timestamp in seconds since the epoch for when this test
was started.
'aborted_at': A timestamp in seconds since the epoch for when this test
was aborted. If the test completed successfully,
this value is 0.
'finished_at': A timestamp in seconds since the epoch for when this
test was successfully complete. If the test is aborted,
or running, this value is 0.
'expectations': A dictionary with the test expectations. See
ParseExpectation() for the structure.
}
"""
# TODO(ajwong): Get the compiler from gyp.
cmdline = [os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../third_party/llvm-build/Release+Asserts/bin',
'clang++')]
cmdline.extend(cflags)
name = config['name']
expectations = config['expectations']
if expectations is not None:
cmdline.append('-D%s' % name)
cmdline.extend(['-o', '/dev/null', '-c', '-x', 'c++',
sourcefile_path])
process = subprocess.Popen(cmdline, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
now = time.time()
return {'proc': process,
'cmdline': ' '.join(cmdline),
'name': name,
'suite_name': config['suite_name'],
'terminate_timeout': now + NCTEST_TERMINATE_TIMEOUT_SEC,
'kill_timeout': now + NCTEST_KILL_TIMEOUT_SEC,
'started_at': now,
'aborted_at': 0,
'finished_at': 0,
'expectations': expectations}
def PassTest(resultfile, resultlog, test):
"""Logs the result of a test started by StartTest(), or a disabled test
configuration.
Args:
resultfile: File object for .cc file that results are written to.
resultlog: File object for the log file.
test: An instance of the dictionary returned by StartTest(), a
configuration from ExtractTestConfigs().
"""
resultfile.write(GUNIT_TEMPLATE % (
test['suite_name'], test['name']))
# The 'started_at' key is only added if a test has been started.
if 'started_at' in test:
resultlog.write(LOG_TEMPLATE % (
test['suite_name'], test['name'],
test['finished_at'] - test['started_at'],
test['started_at'], test['finished_at']))
def FailTest(resultfile, test, error, stdout=None, stderr=None):
"""Logs the result of a test started by StartTest()
Args:
resultfile: File object for .cc file that results are written to.
test: An instance of the dictionary returned by StartTest()
error: The printable reason for the failure.
stdout: The test's output to stdout.
stderr: The test's output to stderr.
"""
resultfile.write('#error "%s Failed: %s"\n' % (test['name'], error))
resultfile.write('#error "compile line: %s"\n' % test['cmdline'])
if stdout and len(stdout) != 0:
resultfile.write('#error "%s stdout:"\n' % test['name'])
for line in stdout.split('\n'):
resultfile.write('#error " %s:"\n' % line)
if stderr and len(stderr) != 0:
resultfile.write('#error "%s stderr:"\n' % test['name'])
for line in stderr.split('\n'):
resultfile.write('#error " %s"\n' % line)
resultfile.write('\n')
def WriteStats(resultlog, suite_name, timings):
"""Logs the peformance timings for each stage of the script.
Args:
resultlog: File object for the log file.
suite_name: The name of the GUnit suite this test belongs to.
timings: Dictionary with timestamps for each stage of the script run.
"""
stats_template = """
TEST(%s): Started %f, Ended %f, Total %fs, Extract %fs, Compile %fs, Process %fs
"""
total_secs = timings['results_processed'] - timings['started']
extract_secs = timings['extract_done'] - timings['started']
compile_secs = timings['compile_done'] - timings['extract_done']
process_secs = timings['results_processed'] - timings['compile_done']
resultlog.write(stats_template % (
suite_name, timings['started'], timings['results_processed'], total_secs,
extract_secs, compile_secs, process_secs))
def ProcessTestResult(resultfile, resultlog, test):
"""Interprets and logs the result of a test started by StartTest()
Args:
resultfile: File object for .cc file that results are written to.
resultlog: File object for the log file.
test: The dictionary from StartTest() to process.
"""
# Snap a copy of stdout and stderr into the test dictionary immediately
# cause we can only call this once on the Popen object, and lots of stuff
# below will want access to it.
proc = test['proc']
(stdout, stderr) = proc.communicate()
if test['aborted_at'] != 0:
FailTest(resultfile, test, "Compile timed out. Started %f ended %f." %
(test['started_at'], test['aborted_at']))
return
if proc.poll() == 0:
# Handle failure due to successful compile.
FailTest(resultfile, test,
'Unexpected successful compilation.',
stdout, stderr)
return
else:
# Check the output has the right expectations. If there are no
# expectations, then we just consider the output "matched" by default.
if len(test['expectations']) == 0:
PassTest(resultfile, resultlog, test)
return
# Otherwise test against all expectations.
for regexp in test['expectations']:
if (regexp.search(stdout) is not None or
regexp.search(stderr) is not None):
PassTest(resultfile, resultlog, test)
return
expectation_str = ', '.join(
["r'%s'" % regexp.pattern for regexp in test['expectations']])
FailTest(resultfile, test,
'Expectations [%s] did not match output.' % expectation_str,
stdout, stderr)
return
def CompleteAtLeastOneTest(executing_tests):
"""Blocks until at least one task is removed from executing_tests.
This function removes completed tests from executing_tests, logging failures
and output. If no tests can be removed, it will enter a poll-loop until one
test finishes or times out. On a timeout, this function is responsible for
terminating the process in the appropriate fashion.
Args:
executing_tests: A dict mapping a string containing the test name to the
test dict return from StartTest().
Returns:
A list of tests that have finished.
"""
finished_tests = []
busy_loop_timeout = time.time() + BUSY_LOOP_MAX_TIME_SEC
while len(finished_tests) == 0:
# If we don't make progress for too long, assume the code is just dead.
assert busy_loop_timeout > time.time()
# Select on the output pipes.
read_set = []
for test in executing_tests.values():
read_set.extend([test['proc'].stderr, test['proc'].stdout])
select.select(read_set, [], read_set, NCTEST_TERMINATE_TIMEOUT_SEC)
# Now attempt to process results.
now = time.time()
for test in executing_tests.values():
proc = test['proc']
if proc.poll() is not None:
test['finished_at'] = now
finished_tests.append(test)
elif test['terminate_timeout'] < now:
proc.terminate()
test['aborted_at'] = now
elif test['kill_timeout'] < now:
proc.kill()
test['aborted_at'] = now
for test in finished_tests:
del executing_tests[test['name']]
return finished_tests
def main():
if len(sys.argv) < 5 or sys.argv[4] != '--':
print ('Usage: %s <parallelism> <sourcefile> <resultfile> -- <cflags...>' %
sys.argv[0])
sys.exit(1)
# Force us into the "C" locale so the compiler doesn't localize its output.
# In particular, this stops gcc from using smart quotes when in english UTF-8
# locales. This makes the expectation writing much easier.
os.environ['LC_ALL'] = 'C'
parallelism = int(sys.argv[1])
sourcefile_path = sys.argv[2]
resultfile_path = sys.argv[3]
cflags = sys.argv[5:]
timings = {'started': time.time()}
ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path)
# Convert filename from underscores to CamelCase.
words = os.path.splitext(os.path.basename(sourcefile_path))[0].split('_')
words = [w.capitalize() for w in words]
suite_name = 'NoCompile' + ''.join(words)
test_configs = ExtractTestConfigs(sourcefile_path, suite_name)
timings['extract_done'] = time.time()
resultfile = StringIO.StringIO()
resultlog = StringIO.StringIO()
resultfile.write(RESULT_FILE_HEADER % sourcefile_path)
# Run the no-compile tests, but ensure we do not run more than |parallelism|
# tests at once.
timings['header_written'] = time.time()
executing_tests = {}
finished_tests = []
cflags.extend(['-MMD', '-MF', resultfile_path + '.d', '-MT', resultfile_path])
test = StartTest(
sourcefile_path,
cflags,
{ 'name': 'NCTEST_SANITY',
'suite_name': suite_name,
'expectations': None,
})
executing_tests[test['name']] = test
for config in test_configs:
# CompleteAtLeastOneTest blocks until at least one test finishes. Thus, this
# acts as a semaphore. We cannot use threads + a real semaphore because
# subprocess forks, which can cause all sorts of hilarity with threads.
if len(executing_tests) >= parallelism:
finished_tests.extend(CompleteAtLeastOneTest(executing_tests))
if config['name'].startswith('DISABLED_'):
PassTest(resultfile, resultlog, config)
else:
test = StartTest(sourcefile_path, cflags, config)
assert test['name'] not in executing_tests
executing_tests[test['name']] = test
# If there are no more test to start, we still need to drain the running
# ones.
while len(executing_tests) > 0:
finished_tests.extend(CompleteAtLeastOneTest(executing_tests))
timings['compile_done'] = time.time()
finished_tests = sorted(finished_tests, key=lambda test: test['name'])
for test in finished_tests:
if test['name'] == 'NCTEST_SANITY':
_, stderr = test['proc'].communicate()
return_code = test['proc'].poll()
if return_code != 0:
sys.stderr.write(stderr)
continue
ProcessTestResult(resultfile, resultlog, test)
timings['results_processed'] = time.time()
WriteStats(resultlog, suite_name, timings)
with open(resultfile_path + '.log', 'w') as fd:
fd.write(resultlog.getvalue())
if return_code == 0:
with open(resultfile_path, 'w') as fd:
fd.write(resultfile.getvalue())
resultfile.close()
sys.exit(return_code)
if __name__ == '__main__':
main()
|
'''
Example usage:
python demo.py \
--input_model=YOUR_MODEL_PATH \
--input_video=YOUR_VIDEO_PATH
'''
import cv2
import numpy as np
import os
import sys
import tensorflow as tf
from samples import gesture
from mrcnn import utils
from mrcnn import model as modellib
flags = tf.app.flags
flags.DEFINE_string('input_model', 'mask_rcnn_gesture_0001.h5', 'Input model to test')
flags.DEFINE_string('input_video', 'test.avi', 'Input video to test')
FLAGS = flags.FLAGS
ROOT_DIR = os.getcwd()
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
GESTURE_MODEL_PATH = os.path.join(ROOT_DIR, FLAGS.input_model)
class InferenceConfig(gesture.GestureConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
model = modellib.MaskRCNN(mode="inference",
model_dir=MODEL_DIR,
config=config
)
model.load_weights(GESTURE_MODEL_PATH,
by_name=True)
class_names = ['BG',
'flip',
'flop'
]
def random_colors(N):
np.random.seed(1)
colors = [tuple(255 * np.random.rand(3)) for _ in range(N)]
return colors
colors = random_colors(len(class_names))
class_dict = {name: color for name, color in zip(class_names, colors)}
def apply_mask(image, mask, color, alpha=0.5):
"""apply mask to image"""
for n, c in enumerate(color):
image[:, :, n] = np.where(
mask == 1,
image[:, :, n] * (1 - alpha) + alpha * c,
image[:, :, n]
)
return image
def display_instances(image, boxes, masks, ids, names, scores):
"""
take the image and results and apply the mask, box, and Label
"""
n_instances = boxes.shape[0]
if not n_instances:
print('NO INSTANCES TO DISPLAY')
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
for i in range(n_instances):
if not np.any(boxes[i]):
continue
y1, x1, y2, x2 = boxes[i]
label = names[ids[i]]
print(label)
color = class_dict[label]
score = scores[i] if scores is not None else None
caption = '{} {:.2f}'.format(label, score) if score else label
mask = masks[:, :, i]
image = apply_mask(image, mask, color)
image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
image = cv2.putText(
image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, color, 2
)
return image
if __name__ == '__main__':
"""
test everything
"""
capture = cv2.VideoCapture(FLAGS.input_video)
# these 2 lines can be removed if you dont have a 1080p camera.
# capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
# capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
# Recording Video
fps = 15.0
width = int(capture.get(3))
height = int(capture.get(4))
fcc = cv2.VideoWriter_fourcc('D', 'I', 'V', 'X')
out = cv2.VideoWriter("recording_video.avi", fcc, fps, (width, height))
while True:
ret, frame = capture.read()
results = model.detect([frame], verbose=0)
r = results[0]
frame = display_instances(
frame, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
)
cv2.imshow('frame', frame)
# Recording Video
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
|
"""
This module contains a number of other commands that can be run via the cli.
All classes in this submodule which inherit the baseclass `airbox.commands.base.Command` are automatically included in
the possible commands to execute via the commandline. The commands can be called using their `name` property.
"""
from logging import getLogger
from .backup import BackupCommand
from .backup_sync import BackupSyncCommand
from .basic_plot import BasicPlotCommand
from .create_mounts import CreateMountsCommand
from .install import InstallCommand
from .print_fstab import PrintFstabCommand
from .run_schedule import RunScheduleCommand
from .spectronus_subset import SpectronusSubsetCommand
from .subset import SubsetCommand
logger = getLogger(__name__)
# Commands are registered below
_commands = [
BackupCommand(),
BackupSyncCommand(),
BasicPlotCommand(),
CreateMountsCommand(),
InstallCommand(),
PrintFstabCommand(),
RunScheduleCommand(),
SpectronusSubsetCommand(),
SubsetCommand()
]
def find_commands():
"""
Finds all the Commands in this package
:return: List of Classes within
"""
# TODO: Make this actually do that. For now commands are manually registered
pass
def initialise_commands(parser):
"""
Initialise the parser with the commandline arguments for each parser
:param parser:
:return:
"""
find_commands()
for c in _commands:
p = parser.add_parser(c.name, help=c.help)
c.initialise_parser(p)
def run_command(cmd_name):
"""
Attempts to run a command
:param config: Configuration data
"""
for c in _commands:
if cmd_name == c.name:
return c.run()
|
from urllib.parse import urlparse
from typing import List
import torch
import numpy as np
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def log_success(msg: str):
print(f'{bcolors.OKGREEN}SUCCESS:\t{bcolors.ENDC}{msg}')
def log_fail(msg: str):
print(f'{bcolors.FAIL}FAILURE:\t{bcolors.ENDC}{msg}')
def tensor_to_tuple_list(tensor: torch.Tensor):
"""Converts a tensor of shape [[x], [y]] in an
array of tuples of shape [(x, y)].
"""
assert len(tensor.shape) == 2, \
"The tensor should be of shape (n, n)"
edges = [(int(edge[0]), int(edge[1])) \
for edge in zip(tensor[0], tensor[1])]
return edges
def extract_domain_name(url: str):
"""Returns only the domain part of an url.
"""
url = '{}://{}'.format(urlparse(url).scheme,
urlparse(url).netloc)
i = 1 if url.find('www.') != -1 else 0
url = urlparse(url)
return '.'.join(url.hostname.split('.')[i:])
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
def normalize_www_prefix(url: str):
is_https = url.startswith('https://')
url = remove_prefix(url, 'https://')
url = remove_prefix(url, 'http://')
url = remove_prefix(url, 'www.')
return f'http{"s" if is_https else ""}://www.{url}'
def mean_std_error(vals: List):
mean = np.mean(vals)
std = np.std(vals)
std_error = std / np.sqrt(len(vals))
return mean, std_error
|
import time
import concurrent.futures
def sleep_func(how_long: int):
print(f'Sleeping for {how_long} seconds')
time.sleep(how_long)
return f'Finished sleeping for {how_long} seconds.'
if __name__ == '__main__':
time_start = time.time()
sleep_seconds = [1, 2, 3, 1, 2, 3]
with concurrent.futures.ProcessPoolExecutor() as ppe:
out = []
for sleep_second in sleep_seconds:
out.append(ppe.submit(sleep_func, sleep_second))
for curr in concurrent.futures.as_completed(out):
print(curr.result())
time_stop = time.time()
print(f'Took {round(time_stop - time_start, 2)} seconds to execute!')
|
import numpy as np
import pandas as pd
import argparse
import os
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
def prepare_data(data_path):
"""Returns dataframe with features."""
# Get data
df = pd.read_csv(data_path)
# Remove NaNs
df = df.dropna()
# Convert date to datetime
df['date'] = pd.to_datetime(df.date)
# Create and age variable
df['age'] = df.index.astype('int')
# Create a day of week field
df['day'] = df.date.dt.dayofweek
# Create a month of year field
df['month'] = df.date.dt.month
# Create a boolean for US federal holidays
holidays = calendar().holidays(start=df.date.min(), end=df.date.max())
df['holiday'] = df['date'].isin(holidays).apply(int)
# Rearrange columns
df = df[
[
'date',
'count',
'age',
'month',
'day',
'holiday'
]
]
# Create monthly dummies
tmp = pd.get_dummies(df.month)
tmp.columns = ['month' + str(value) for value in tmp.columns]
df = pd.concat([df, tmp], axis=1)
# Create daily dummies
tmp = pd.get_dummies(df.day)
tmp.columns = ['day' + str(value) for value in tmp.columns]
df = pd.concat([df, tmp], axis=1)
# Reset index
df = df.reset_index(drop=True)
# Log transform count data
df['count'] = np.log1p(df['count'])
# Drop unnecessary columns
df = df.drop(['month', 'day', 'age'], axis=1)
df = df.dropna()
return df
def run():
parser = argparse.ArgumentParser(description='Prepare data')
parser.add_argument('--data_path',
default='raw/github_dau_2011-2018.csv')
parser.add_argument('--output_path',
default='processed/github_dau_2011-2018.pkl')
args = parser.parse_args()
# Get the data
df = prepare_data(args.data_path)
# Store data
path = os.path.dirname(args.output_path)
if not os.path.exists(path):
os.makedirs(path)
df.to_pickle(args.output_path, protocol=4)
if __name__ == '__main__':
run()
|
import urllib.parse
import xmatters.auth
import xmatters.connection
import xmatters.endpoints
import xmatters.errors
class XMSession(object):
"""
Starting class used to interact with xMatters API.
:param str xm_url: Name of xMatters instance, xMatters instance url, or xMatters instance API base url
:type base_url: str
:keyword timeout: timeout (in seconds) for requests. Defaults to 5.
:type timeout: int
:keyword max_retries: maximum number of request retries to attempt. Defaults to 3.
:type max_retries: int
"""
_endpoints = {'attachments': xmatters.endpoints.AttachmentsEndpoint,
'audits': xmatters.endpoints.AuditsEndpoint,
'conference-bridges': xmatters.endpoints.ConferenceBridgesEndpoint,
'device-names': xmatters.endpoints.DeviceNamesEndpoint,
'device-types': xmatters.endpoints.DeviceTypesEndpoint,
'devices': xmatters.endpoints.DevicesEndpoint,
'dynamic-teams': xmatters.endpoints.DynamicTeamsEndpoint,
'events': xmatters.endpoints.EventsEndpoint,
'forms': xmatters.endpoints.FormsEndpoint,
'imports': xmatters.endpoints.ImportsEndpoint,
'groups': xmatters.endpoints.GroupsEndpoint,
'incident': xmatters.endpoints.IncidentsEndpoint,
'on-call': xmatters.endpoints.OnCallEndpoint,
'on-call-summary': xmatters.endpoints.OnCallSummaryEndpoint,
'people': xmatters.endpoints.PeopleEndpoint,
'plans': xmatters.endpoints.PlansEndpoint,
'roles': xmatters.endpoints.RolesEndpoint,
'scenarios': xmatters.endpoints.ScenariosEndpoint,
'services': xmatters.endpoints.ServicesEndpoint,
'sites': xmatters.endpoints.SitesEndpoint,
'subscriptions': xmatters.endpoints.SubscriptionsEndpoint,
'subscription-forms': xmatters.endpoints.SubscriptionFormsEndpoint,
'temporary-absences': xmatters.endpoints.TemporaryAbsencesEndpoint}
def __init__(self, xm_url, **kwargs):
p_url = urllib.parse.urlparse(xm_url)
if '.' not in xm_url:
instance_url = 'https://{}.xmatters.com'.format(p_url.path)
else:
instance_url = 'https://{}'.format(p_url.netloc) if p_url.netloc else 'https://{}'.format(p_url.path)
self._api_base_url = '{}/api/xm/1'.format(instance_url)
self.con = None
self._kwargs = kwargs
def set_authentication(self, username=None, password=None, client_id=None, **kwargs):
"""
| Set the authentication method when interacting with the API.
| OAuth2 authentication is used if a client_id is provided; otherwise basic authentication is used.
:param username: xMatters username
:type username: str
:param password: xMatters password
:type password: str
:param client_id: xMatters instance client id
:type client_id: str
:keyword token: token object
:type token: dict
:keyword refresh_token: refresh token
:type refresh_token: str
:keyword token_storage: Class instance used to store token.
Any class instance should work as long is it has :meth:`read_token` and :meth:`write_token` methods.
:type token_storage: class
:return: self
:rtype: :class:`~xmatters.session.XMSession`
"""
# add init kwargs inorder to pass to Connection init
kwargs.update(self._kwargs)
if client_id:
self.con = xmatters.auth.OAuth2Auth(self._api_base_url, client_id, username, password, **kwargs)
elif None not in (username, password):
self.con = xmatters.auth.BasicAuth(self._api_base_url, username, password, **kwargs)
else:
raise xmatters.errors.AuthorizationError('unable to determine authentication method')
# return self so method can be chained
return self
def get_endpoint(self, endpoint):
"""
Get top-level endpoint.
:param endpoint: top-level endpoint
:type endpoint: str
:return: Endpoint object
:rtype: object
Example:
.. code-block:: python
from xmatters import XMSession
xm = XMSession('my-instance')
xm.set_authentication(username='my-username', password='my-password')
people_endpoint = xm.get_endpoint('people')
"""
endpoint_object = self._endpoints.get(endpoint.strip('/'))
if not endpoint_object:
raise NotImplementedError('{} endpoint is not implemented'.format(endpoint))
return endpoint_object(self)
def attachments_endpoint(self):
"""
Get the '/attachments' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.AttachmentsEndpoint`
"""
return xmatters.endpoints.AttachmentsEndpoint(self)
def audits_endpoint(self):
"""
Get the '/audits' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.AuditsEndpoint`
"""
return xmatters.endpoints.AuditsEndpoint(self)
def conference_bridges_endpoint(self):
"""
Get the '/conference-bridges' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.ConferenceBridgesEndpoint`
"""
return xmatters.endpoints.ConferenceBridgesEndpoint(self)
def device_names_endpoint(self):
"""
Get the '/device-names' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.DeviceNamesEndpoint`
"""
return xmatters.endpoints.DeviceNamesEndpoint(self)
def device_types_endpoint(self):
"""
Get the '/device-types' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.DeviceTypesEndpoint`
"""
return xmatters.endpoints.DeviceTypesEndpoint(self)
def devices_endpoint(self):
"""
Get the '/device' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.DevicesEndpoint`
"""
return xmatters.endpoints.DevicesEndpoint(self)
def dynamic_teams_endpoint(self):
"""
Get the '/dynamic-teams' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.DynamicTeamsEndpoint`
"""
return xmatters.endpoints.DynamicTeamsEndpoint(self)
def events_endpoint(self):
"""
Get the '/events' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.EventsEndpoint`
"""
return xmatters.endpoints.EventsEndpoint(self)
def event_suppressions_endpoint(self):
"""
Get the '/event-suppressions' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.EventSuppressionsEndpoint`
"""
return xmatters.endpoints.EventSuppressionsEndpoint(self)
def forms_endpoint(self):
"""
Get the '/forms' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.FormsEndpoint`
"""
return xmatters.endpoints.FormsEndpoint(self)
def imports_endpoint(self):
"""
Get the '/imports' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.ImportsEndpoint`
"""
return xmatters.endpoints.ImportsEndpoint(self)
def groups_endpoint(self):
"""
Get the '/groups' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.GroupsEndpoint`
"""
return xmatters.endpoints.GroupsEndpoint(self)
def incidents_endpoint(self):
"""
Get the '/incidents' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.IncidentsEndpoint`
"""
return xmatters.endpoints.IncidentsEndpoint(self)
def oncall_endpoint(self):
"""
Get the '/on-call' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.OnCallEndpoint`
"""
return xmatters.endpoints.OnCallEndpoint(self)
def oncall_summary_endpoint(self):
"""
Get the '/on-call-summary' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.OnCallSummaryEndpoint`
"""
return xmatters.endpoints.OnCallSummaryEndpoint(self)
def people_endpoint(self):
"""
Get the '/people' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.PeopleEndpoint`
"""
return xmatters.endpoints.PeopleEndpoint(self)
def plans_endpoint(self):
"""
Get the '/plans' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.PlansEndpoint`
"""
return xmatters.endpoints.PlansEndpoint(self)
def roles_endpoint(self):
"""
Get the '/roles' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.RolesEndpoint`
"""
return xmatters.endpoints.RolesEndpoint(self)
def scenarios_endpoint(self):
"""
Get the '/scenarios' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.ScenariosEndpoint`
"""
return xmatters.endpoints.ScenariosEndpoint(self)
def services_endpoint(self):
"""
Get the '/services' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.ServicesEndpoint`
"""
return xmatters.endpoints.ServicesEndpoint(self)
def sites_endpoint(self):
"""
Get the '/sites' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.SitesEndpoint`
"""
return xmatters.endpoints.SitesEndpoint(self)
def subscriptions_endpoint(self):
"""
Get the '/subscriptions' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.SubscriptionsEndpoint`
"""
return xmatters.endpoints.SubscriptionsEndpoint(self)
def subscription_forms_endpoint(self):
"""
Get the '/subscription-forms' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.SubscriptionFormsEndpoint`
"""
return xmatters.endpoints.SubscriptionFormsEndpoint(self)
def temporary_absences_endpoint(self):
"""
Get the '/temporary-absences' top-level endpoint.
:return: Endpoint
:rtype: :class:`~xmatters.endpoints.TemporaryAbsencesEndpoint`
"""
return xmatters.endpoints.TemporaryAbsencesEndpoint(self)
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
def __str__(self):
return self.__repr__()
|
from typeguard.importhook import install_import_hook
def pytest_sessionstart(session):
install_import_hook(packages=["cumulusci"])
|
from typing import Dict, Any
from uyaml.loader import Yaml
from badoo.connections.web import BrowserSettings
class Credentials:
"""The class represents a credentials as an object."""
def __init__(self, username: str, password: str) -> None:
self._username: str = username
self._password: str = password
def __str__(self):
return f'{self.__class__.__name__}' \
f'[user = "{self.username}"; pass = "{self.password}]"'
@property
def username(self) -> str:
"""Return a user name."""
return self._username
@property
def password(self) -> str:
"""Return a password."""
return self._password
class _Browser(BrowserSettings):
"""Represents browser settings defined in setup file."""
def __init__(self, data: Dict[str, Any]) -> None:
self._data: Dict[str, Any] = data
def grid_url(self) -> str:
return self._data["grid-url"]
def proxy(self) -> str:
return self._data["proxy"]
class _Badoo:
"""Represents badoo settings defined in setup file."""
def __init__(self, data: Dict[str, Any]) -> None:
self._data: Dict[str, Any] = data
def credentials(self) -> Credentials:
return Credentials(
self._data["credentials"]["login"],
self._data["credentials"]["password"],
)
def likes(self) -> int:
return self._data["likes"]
def intro_message(self) -> str:
return self._data["intro-massage"]
class Setup:
def __init__(self, yaml: Yaml) -> None:
self._data: Dict[str, Any] = yaml.section(name="setup")
def browser(self) -> BrowserSettings:
return _Browser(self._data["browser"])
def badoo(self) -> _Badoo:
return _Badoo(self._data["badoo"])
|
#!/usr/bin/env python
r"""
1. filter CTOI and save CTOI list as .txt file
2. create a batch file to run mirai (see `make_batch_mirai.sh`)
3. execute batch file using parallel
WATCH OUT: ctoi heading has inconsistent capitalization
err vs Err vs unit with no parentheses etc
"""
from os import path
import argparse
# import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
import pandas as pd
pd.options.display.float_format = "{:.2f}".format
from mirai.mirai import (
get_ctois,
get_between_limits,
get_above_lower_limit,
get_below_upper_limit,
)
arg = argparse.ArgumentParser()
arg.add_argument(
"-o", "--outdir", help="output directory", type=str, default="."
)
arg.add_argument(
"-sig", "--sigma", help="strict=1 (default); conservative=3", default=1
)
arg.add_argument(
"-f",
"--frac_error",
help="allowed fractional error in parameter",
default=None,
type=float,
)
arg.add_argument(
"-s",
"--save",
help="save visibility plots and transit predictions in a csv file",
action="store_true",
default=False,
)
args = arg.parse_args()
sigma = args.sigma
output_colums = "CTOI,Period (days),Radius (R_Earth),Depth ppm".split(",")
# fetch toi table from exofop tess
ctois = get_ctois(remove_FP=True, clobber=False, verbose=False)
Rp = ctois["Radius (R_Earth)"]
Rp_err = ctois["Radius (R_Earth) Error"]
Porb = ctois["Period (days)"]
Porb_err = ctois["Period (days) Error"]
depth = ctois["Depth ppm"]
depth_err = ctois["Depth ppm Error"]
if args.frac_error:
idx1 = (Rp_err / Rp) < args.frac_error
idx2 = (Porb_err / Porb) < args.frac_error
idx3 = (depth_err / depth) < args.frac_error
ctois = ctois[idx1 & idx2 & idx3]
Rp = ctois["Radius (R_Earth)"]
Rp_err = ctois["Radius (R_Earth) Error"]
Porb = ctois["Period (days)"]
Porb_err = ctois["Period (days) Error"]
Rstar = ctois["Stellar Radius (R_Sun)"]
Rstar_err = ctois["Stellar Radius (R_Sun) err"]
teff = ctois["Stellar Eff Temp (K)"]
Teq = ctois["Equilibrium Temp (K)"]
depth = ctois["Depth ppm"]
depth_err = ctois["Depth ppm Error"]
Tmag = ctois["TESS Mag"]
Tmag_err = ctois["TESS Mag err"]
distance = ctois["Stellar Distance (pc)"]
# tois["Stellar log(g) (cm/s^2)"]
# ---define filters---#
# transit
deep = get_above_lower_limit(5, depth, depth_err, sigma=sigma) # 1ppt
# site-specific
north = ctois["Dec"] > -30
south = ctois["Dec"] < 30
# star
bright = get_below_upper_limit(11, Tmag, Tmag_err, sigma=sigma)
# planet
# size
# orbit
# special
## combine filters by uncommenting lines
idx = (
(Porb > 0) # makes sure no Nan in period
# & (Rp>0) #makes sure no Nan in radius
# ---telescope---#
# & north
# & south
# ---transit---#
& deep
# ---star---#
# & nearby
& bright
# & cool
# & dwarf
# & hot
# & giant
# & sunlike
# & nearby
# & young
# ---planet---#
# & temperate
# & tropical
# & warm
# & small
# & subearth
# & superearth
# & earth
# & subneptune
# & neptune
# & subsaturn
# & saturn
# & jupiter
# & inflated
# & large
# ---orbit---#
# & short
# & medium
# & long
# ---special---#
# & usp
# & hotjup
# & tropical & subneptune
# & tropical & subsaturn
# & tropical & jupiter
# & reinflated
# & radius_gap
)
filename_header = "all"
if args.save:
# just save list of ctoi
fp = path.join(args.outdir, filename_header + "_ctois.txt")
ctois.loc[idx, "CTOI"].to_csv(fp, index=False, header=None)
print(f"Saved: {fp}")
else:
print(ctois.loc[idx, output_colums].to_string(index=False))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.