repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tvm | tvm-main/rust/tvm-graph-rt/tests/test_tvm_dso/src/build_test_lib.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Prepares a simple TVM library for testing."""
from os import path as osp
import sys
import tvm
from tvm import te
from tvm.contrib import cc
def main():
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C")
s = tvm.te.create_schedule(C.op)
s[C].parallel(s[C].op.axis[0])
print(tvm.lower(s, [A, B, C], simple_mode=True))
obj_file = osp.join(sys.argv[1], "test.o")
tvm.build(s, [A, B, C], "llvm").save(obj_file)
cc.create_shared(osp.join(sys.argv[1], "test.so"), [obj_file])
if __name__ == "__main__":
main()
| 1,461 | 32.227273 | 66 | py |
tvm | tvm-main/rust/tvm-graph-rt/tests/test_nn/src/build_test_graph.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Builds a simple graph for testing."""
from os import path as osp
import sys
from tvm import runtime as tvm_runtime
from tvm import relay
from tvm.relay import testing
def _get_model(dshape):
data = relay.var("data", shape=dshape)
fc = relay.nn.dense(data, relay.var("dense_weight"), units=dshape[-1] * 2)
fc = relay.nn.bias_add(fc, relay.var("dense_bias"))
left, right = relay.split(fc, indices_or_sections=2, axis=1)
one = relay.const(1, dtype="float32")
return relay.Tuple([(left + one), (right - one), fc])
def main():
dshape = (4, 8)
net = _get_model(dshape)
mod, params = testing.create_workload(net)
runtime = relay.backend.Runtime("cpp", {"system-lib": True})
graph, lib, params = relay.build(mod, "llvm", runtime=runtime, params=params)
out_dir = sys.argv[1]
lib.save(osp.join(sys.argv[1], "graph.o"))
with open(osp.join(out_dir, "graph.json"), "w") as f_resnet:
f_resnet.write(graph)
with open(osp.join(out_dir, "graph.params"), "wb") as f_params:
f_params.write(tvm_runtime.save_param_dict(params))
if __name__ == "__main__":
main()
| 1,945 | 33.75 | 81 | py |
tvm | tvm-main/conda/render_cuda_dockerfiles.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import subprocess
from jinja2 import Template
CUDA_VERSIONS = ["10.0", "9.0"]
# Make sure that the cudnn version you set here is available
# for all the cuda versions that you want both from nvidia
# and from conda.
# These two must be in sync
CUDNN_FULL_VERSION = "7.6.0.64"
CUDNN_VERSION = "7.6.0"
condadir = os.path.dirname(sys.argv[0])
condadir = os.path.abspath(condadir)
srcdir = os.path.dirname(condadir)
with open(os.path.join(condadir, "Dockerfile.template")) as f:
docker_template = Template(f.read())
def render_dockerfile(version):
txt = docker_template.render(
cuda_version=version, cudnn_short_version=CUDNN_VERSION, cudnn_version=CUDNN_FULL_VERSION
)
fname = os.path.join(condadir, "../docker/Dockerfile.conda_cuda" + version.replace(".", ""))
with open(fname, "w") as f:
f.write(txt + "\n")
return fname
if __name__ == "__main__":
build_versions = CUDA_VERSIONS
if len(sys.argv) > 1:
build_versions = sys.argv[1:]
for version in build_versions:
render_dockerfile(version)
| 1,876 | 29.770492 | 97 | py |
tvm | tvm-main/jvm/core/src/test/scripts/test_graph_executor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
from tvm import te
import json
from tvm.contrib import graph_executor
def dump_graph_lib(target_dir):
dim = 4
A = te.placeholder((dim,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
sched = te.create_schedule(B.op)
node0 = {"op": "null", "name": "x", "inputs": []}
node1 = {
"op": "tvm_op",
"name": "add",
"inputs": [[0, 0, 0]],
"attrs": {"func_name": "myadd", "flatten_data": "1", "num_inputs": "1", "num_outputs": "1"},
}
nodes = [node0, node1]
arg_nodes = [0]
node_row_ptr = [0, 1, 2]
outputs = [[1, 0, 0]]
shape = (4,)
attrs = {
"shape": ["list_shape", [shape, shape]],
"dltype": ["list_str", ["float32", "float32"]],
"storage_id": ["list_int", [0, 1]],
}
graph = {
"nodes": nodes,
"arg_nodes": arg_nodes,
"node_row_ptr": node_row_ptr,
"heads": outputs,
"attrs": attrs,
}
graph = json.dumps(graph)
mlib = tvm.build(sched, [A, B], "llvm", name="myadd")
mlib.export_library(os.path.join(target_dir, "graph_addone_lib.so"))
with open(os.path.join(target_dir, "graph_addone.json"), "w") as fo:
fo.write(graph)
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
sys.exit(-1)
dump_graph_lib(sys.argv[1])
| 2,154 | 29.785714 | 100 | py |
tvm | tvm-main/jvm/core/src/test/scripts/test_add_cpu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
from tvm import te
from tvm.contrib import cc, utils
def test_add(target_dir):
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = te.create_schedule(C.op)
fadd = tvm.build(s, [A, B, C], "llvm", name="myadd")
fadd.save(os.path.join(target_dir, "add_cpu.o"))
cc.create_shared(
os.path.join(target_dir, "add_cpu.so"), [os.path.join(target_dir, "add_cpu.o")]
)
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
sys.exit(-1)
test_add(sys.argv[1])
| 1,426 | 31.431818 | 87 | py |
tvm | tvm-main/jvm/core/src/test/scripts/test_add_gpu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
from tvm import te
from tvm.contrib import cc, nvcc, utils
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code, target):
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
def test_add(target_dir):
if not tvm.runtime.enabled("cuda"):
print("skip %s because cuda is not enabled..." % __file__)
return
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = te.create_schedule(C.op)
bx, tx = s[C].split(C.op.axis[0], factor=64)
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
fadd_cuda = tvm.build(s, [A, B, C], tvm.target.Target("cuda", host="llvm"), name="myadd")
fadd_cuda.save(os.path.join(target_dir, "add_cuda.o"))
fadd_cuda.imported_modules[0].save(os.path.join(target_dir, "add_cuda.ptx"))
cc.create_shared(
os.path.join(target_dir, "add_cuda.so"), [os.path.join(target_dir, "add_cuda.o")]
)
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
sys.exit(-1)
test_add(sys.argv[1])
| 2,008 | 33.050847 | 93 | py |
tvm | tvm-main/jvm/core/src/test/scripts/test_rpc_proxy_server.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
from tvm.rpc import proxy
def start_proxy_server(port, timeout):
prox = proxy.Proxy("127.0.0.1", port=port, port_end=port + 1)
if timeout > 0:
import time
time.sleep(timeout)
prox.terminate()
else:
prox.proc.join()
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
sys.exit(-1)
port = int(sys.argv[1])
timeout = 0 if len(sys.argv) == 2 else float(sys.argv[2])
start_proxy_server(port, timeout)
| 1,279 | 31 | 65 | py |
tvm | tvm-main/golang/sample/gen_mobilenet_lib.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from tvm import relay, transform, runtime
from tvm.contrib.download import download_testdata
################################################
# Utils for downloading and extracting zip files
# ----------------------------------------------
def extract(path):
import tarfile
if path.endswith("tgz") or path.endswith("gz"):
dir_path = os.path.dirname(path)
tar = tarfile.open(path)
tar.extractall(path=dir_path)
tar.close()
else:
raise RuntimeError("Could not decompress the file: " + path)
###################################
# Download TFLite pre-trained model
# ---------------------------------
model_url = "https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz"
model_path = download_testdata(model_url, "mobilenet_v2_1.4_224.tgz", module=["tf", "official"])
model_dir = os.path.dirname(model_path)
extract(model_path)
# now we have mobilenet_v2_1.4_224.tflite on disk
model_file = os.path.join(model_dir, "mobilenet_v2_1.4_224.tflite")
# get TFLite model from buffer
tflite_model_buf = open(model_file, "rb").read()
try:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
##############################
# Load Neural Network in Relay
# ----------------------------
# TFLite input tensor name, shape and type
input_tensor = "input"
input_shape = (1, 224, 224, 3)
input_dtype = "float32"
# parse TFLite model and convert into Relay computation graph
mod, params = relay.frontend.from_tflite(
tflite_model, shape_dict={input_tensor: input_shape}, dtype_dict={input_tensor: input_dtype}
)
#############
# Compilation
# -----------
target = "llvm"
# Build with Relay
with transform.PassContext(opt_level=3):
graph, lib, params = relay.build_module.build(mod, target, params=params)
###############################################
# Save the graph, lib and parameters into files
# ---------------------------------------------
lib.export_library("./mobilenet.so")
print("lib export succeefully")
with open("./mobilenet.json", "w") as fo:
fo.write(graph)
with open("./mobilenet.params", "wb") as fo:
fo.write(runtime.save_param_dict(params))
| 3,107 | 30.714286 | 96 | py |
tvm | tvm-main/golang/sample/deploy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Get Started with TVM Go
=======================
"""
from __future__ import absolute_import, print_function
import tvm
from tvm import te
import numpy as np
# Global declarations of environment.
tgt = "llvm"
######################################################################
# Describe the Computation
# ------------------------
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")
######################################################################
# Schedule the Computation
# ------------------------
s = te.create_schedule(C.op)
######################################################################
# Compilation
# -----------
fadd = tvm.build(s, [A, B, C], tgt, name="myadd")
######################################################################
# Save Compiled Module
# --------------------
from tvm.contrib import cc
from tvm.contrib import utils
fadd.save("deploy.o")
cc.create_shared("deploy.so", ["deploy.o"])
| 1,815 | 30.859649 | 70 | py |
tvm | tvm-main/ci/jenkins/generate.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import jinja2
import argparse
import difflib
import datetime
import re
import textwrap
from pathlib import Path
from typing import List, Optional
from dataclasses import dataclass
from data import data
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
JENKINS_DIR = REPO_ROOT / "ci" / "jenkins"
TEMPLATES_DIR = JENKINS_DIR / "templates"
GENERATED_DIR = JENKINS_DIR / "generated"
class Change:
IMAGES_ONLY = object()
NONE = object()
FULL = object()
@dataclass
class ChangeData:
diff: Optional[str]
content: str
destination: Path
source: Path
def lines_without_generated_tag(content):
return [
line for line in content.splitlines(keepends=True) if not line.startswith("// Generated at")
]
def change_type(lines: List[str]) -> Change:
"""
Return True if 'line' only edits an image tag or if 'line' is not a changed
line in a diff
"""
added_images = []
removed_images = []
diff_lines = []
for line in lines[2:]:
if not line.startswith("-") and not line.startswith("+"):
# not a diff line, ignore it
continue
diff_lines.append(line)
if len(diff_lines) == 0:
# no changes made
return Change.NONE
for line in diff_lines:
is_add = line.startswith("+")
line = line.strip().lstrip("+").lstrip("-")
match = re.search(
r"^(ci_[a-zA-Z0-9]+) = \'.*\'$",
line.strip().lstrip("+").lstrip("-"),
flags=re.MULTILINE,
)
if match is None:
# matched a non-image line, quit early
return Change.FULL
if is_add:
added_images.append(match.groups()[0])
else:
removed_images.append(match.groups()[0])
# make sure that the added image lines match the removed image lines
if len(added_images) > 0 and added_images == removed_images:
return Change.IMAGES_ONLY
else:
return Change.FULL
def update_jenkinsfile(source: Path) -> ChangeData:
destination = GENERATED_DIR / source.stem
data["generated_time"] = datetime.datetime.now().isoformat()
if destination.exists():
with open(destination) as f:
old_generated_content = f.read()
timestamp_match = re.search(
r"^// Generated at (.*)$", old_generated_content, flags=re.MULTILINE
)
if not timestamp_match:
raise RuntimeError(
f"Could not find timestamp in Jenkinsfile: {destination.relative_to(TEMPLATES_DIR)}"
)
original_timestamp = timestamp_match.groups()[0]
environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(TEMPLATES_DIR),
undefined=jinja2.StrictUndefined,
lstrip_blocks=True,
trim_blocks=True,
keep_trailing_newline=True,
)
template = environment.get_template(str(source.relative_to(TEMPLATES_DIR)))
new_content = template.render(**data)
if not destination.exists():
# New file, create it from scratch
return ChangeData(
diff=new_content, content=new_content, source=source, destination=destination
)
diff = [
line
for line in difflib.unified_diff(
lines_without_generated_tag(old_generated_content),
lines_without_generated_tag(new_content),
)
]
change = change_type(diff)
if not args.force and change == Change.IMAGES_ONLY or change == Change.NONE:
if change != Change.NONE:
print("Detected only Docker-image name changes, skipping timestamp update")
new_content = new_content.replace(data["generated_time"], original_timestamp)
diff = "".join(diff)
return ChangeData(diff=diff, content=new_content, source=source, destination=destination)
if __name__ == "__main__":
help = "Regenerate Jenkinsfile from template"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--force", action="store_true", help="always overwrite timestamp")
parser.add_argument("--check", action="store_true", help="just verify the output didn't change")
args = parser.parse_args()
sources = TEMPLATES_DIR.glob("*_jenkinsfile.groovy.j2")
changes = [update_jenkinsfile(source) for source in sources if source.name != "base.groovy.j2"]
if args.check:
if all(not data.diff for data in changes):
print("Success, the newly generated Jenkinsfiles matched the ones on disk")
exit(0)
else:
print(
textwrap.dedent(
"""
Newly generated Jenkinsfiles did not match the ones on disk! If you have made
edits to the Jenkinsfiles in generated/, move them to the corresponding source and
regenerate the Jenkinsfiles from the templates with
python3 -m pip install -r jenkins/requirements.txt
python3 jenkins/generate.py
Diffed changes:
"""
).strip()
)
for data in changes:
if data.diff:
source = data.source.relative_to(REPO_ROOT)
print(source)
print(data.diff)
exit(1)
else:
for data in changes:
with open(data.destination, "w") as f:
f.write(data.content)
if not data.diff:
print(f"Wrote output to {data.destination.relative_to(REPO_ROOT)}, no changes made")
else:
print(f"Wrote output to {data.destination.relative_to(REPO_ROOT)}, changes:")
print(data.diff)
| 6,521 | 31.773869 | 100 | py |
tvm | tvm-main/ci/jenkins/data.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
files_to_stash = {
# Executables and build files needed to run c++ tests
"cpptest": ["build/cpptest", "build/build.ninja", "build/CMakeFiles/rules.ninja"],
# Executables and build files needed to c runtime tests
"crttest": ["build/crttest"],
# Folder for hexagon build
"hexagon_api": [
"build/hexagon_api_output",
],
# Folder for microtvm build
"microtvm_template_projects": [
"build/microtvm_template_projects",
],
# Folders and build files for c runtime
"standalone_crt": ["build/standalone_crt", "build/build.ninja"],
# This library is produced with HIDE_PRIVATE_SYMBOLS=ON
"tvm_allvisible": ["build/libtvm_allvisible.so"],
# runtime files
"tvm_runtime": ["build/libtvm_runtime.so", "build/config.cmake"],
# compiler files
"tvm_lib": ["build/libtvm.so", "build/libtvm_runtime.so", "build/config.cmake"],
# compiler files and fsim
"tvm_multilib": [
"build/libtvm.so",
"build/libvta_fsim.so",
"build/libtvm_runtime.so",
"build/config.cmake",
],
# compiler files, fsim, and tsim
"tvm_multilib_tsim": [
"build/libvta_tsim.so",
"build/libtvm.so",
"build/libvta_fsim.so",
"build/libtvm_runtime.so",
"build/config.cmake",
],
}
# AWS info
aws_default_region = "us-west-2"
aws_ecr_url = "dkr.ecr." + aws_default_region + ".amazonaws.com"
# Docker Images
docker_images = {
"ci_arm": {
"tag": "tlcpack/ci-arm:20221013-060115-61c9742ea",
"platform": "ARM",
},
"ci_cortexm": {
"tag": "tlcpack/ci-cortexm:20221013-060115-61c9742ea",
"platform": "CPU",
},
"ci_cpu": {
"tag": "tlcpack/ci-cpu:20221013-060115-61c9742ea",
"platform": "CPU",
},
"ci_gpu": {
"tag": "tlcpack/ci-gpu:20221019-060125-0b4836739",
"platform": "GPU",
},
"ci_hexagon": {
"tag": "tlcpack/ci-hexagon:20221013-060115-61c9742ea",
"platform": "CPU",
},
"ci_i386": {
"tag": "tlcpack/ci-i386:20221013-060115-61c9742ea",
"platform": "CPU",
},
"ci_lint": {
"tag": "tlcpack/ci-lint:20221013-060115-61c9742ea",
"platform": "CPU",
},
"ci_minimal": {
"tag": "tlcpack/ci-minimal:20221013-060115-61c9742ea",
"platform": "CPU",
},
"ci_riscv": {
"tag": "tlcpack/ci-riscv:20221013-060115-61c9742ea",
"platform": "CPU",
},
"ci_wasm": {
"tag": "tlcpack/ci-wasm:20221013-060115-61c9742ea",
"platform": "CPU",
},
}
data = {
"images": [{"name": k, "platform": v["platform"]} for k, v in docker_images.items()],
"aws_default_region": aws_default_region,
"aws_ecr_url": aws_ecr_url,
**{k: v["tag"] for k, v in docker_images.items()},
**files_to_stash,
}
if __name__ == "__main__":
# This is used in docker/dev_common.sh to look up image tags
name = sys.argv[1]
if name in docker_images:
print(docker_images[name]["tag"])
else:
exit(1)
| 3,871 | 30.479675 | 89 | py |
tvm | tvm-main/ci/scripts/jenkins/should_rebuild_docker.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import datetime
import json
import logging
import subprocess
from typing import Dict, Any, List
from http_utils import get
from cmd_utils import Sh, init_log
DOCKER_API_BASE = "https://hub.docker.com/v2/"
PAGE_SIZE = 25
TEST_DATA = None
def docker_api(url: str) -> Dict[str, Any]:
"""
Run a paginated fetch from the public Docker Hub API
"""
if TEST_DATA is not None:
return TEST_DATA[url]
pagination = f"?page_size={PAGE_SIZE}&page=1"
url = DOCKER_API_BASE + url + pagination
r, headers = get(url)
reset = headers.get("x-ratelimit-reset")
if reset is not None:
reset = datetime.datetime.fromtimestamp(int(reset))
reset = reset.isoformat()
logging.info(
f"Docker API Rate Limit: {headers.get('x-ratelimit-remaining')} / {headers.get('x-ratelimit-limit')} (reset at {reset})"
)
if "results" not in r:
raise RuntimeError(f"Error fetching data, no results found in: {r}")
return r
def any_docker_changes_since(hash: str) -> bool:
"""
Check the docker/ directory, return True if there have been any code changes
since the specified hash
"""
sh = Sh()
cmd = f"git diff {hash} -- docker/"
proc = sh.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = proc.stdout.strip()
return stdout != "", stdout
def does_commit_exist(hash: str) -> bool:
"""
Returns True if the hash exists in the repo
"""
sh = Sh()
cmd = f"git rev-parse -q {hash}"
proc = sh.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=False)
print(proc.stdout)
if proc.returncode == 0:
return True
if "unknown revision or path not in the working tree" in proc.stdout:
return False
raise RuntimeError(f"Unexpected failure when running: {cmd}")
def find_hash_for_tag(tag: Dict[str, Any]) -> str:
"""
Split the hash off of a name like <date>-<time>-<hash>
"""
name = tag["name"]
name_parts = name.split("-")
if len(name_parts) != 3:
raise RuntimeError(f"Image {name} is not using new naming scheme")
shorthash = name_parts[2]
return shorthash
def find_commit_in_repo(tags: List[Dict[str, Any]]):
"""
Look through all the docker tags, find the most recent one which references
a commit that is present in the repo
"""
for tag in tags["results"]:
shorthash = find_hash_for_tag(tag)
logging.info(f"Hash '{shorthash}' does not exist in repo")
if does_commit_exist(shorthash):
return shorthash, tag
raise RuntimeError(f"No extant hash found in tags:\n{tags}")
def main():
# Fetch all tlcpack images
images = docker_api("repositories/tlcpack")
# Ignore all non-ci images
relevant_images = [image for image in images["results"] if image["name"].startswith("ci-")]
image_names = [image["name"] for image in relevant_images]
logging.info(f"Found {len(relevant_images)} images to check: {', '.join(image_names)}")
for image in relevant_images:
# Check the tags for the image
tags = docker_api(f"repositories/tlcpack/{image['name']}/tags")
# Find the hash of the most recent tag
shorthash, tag = find_commit_in_repo(tags)
name = tag["name"]
logging.info(f"Looking for docker/ changes since {shorthash}")
any_docker_changes, diff = any_docker_changes_since(shorthash)
if any_docker_changes:
logging.info(f"Found docker changes from {shorthash} when checking {name}")
logging.info(diff)
exit(2)
logging.info("Did not find changes, no rebuild necessary")
exit(0)
if __name__ == "__main__":
init_log()
parser = argparse.ArgumentParser(
description="Exits 0 if Docker images don't need to be rebuilt, 1 otherwise"
)
parser.add_argument(
"--testing-docker-data",
help="(testing only) JSON data to mock response from Docker Hub API",
)
args = parser.parse_args()
if args.testing_docker_data is not None:
TEST_DATA = json.loads(args.testing_docker_data)
main()
| 4,962 | 31.019355 | 128 | py |
tvm | tvm-main/ci/scripts/jenkins/determine_docker_images.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import datetime
import json
import logging
import urllib.error
import configparser
from pathlib import Path
from typing import Dict, Any
from http_utils import get
from cmd_utils import init_log, REPO_ROOT
DOCKER_API_BASE = "https://hub.docker.com/v2/"
PAGE_SIZE = 25
TEST_DATA = None
IMAGE_TAGS_FILE = REPO_ROOT / "ci" / "jenkins" / "docker-images.ini"
TVM_CI_ECR = "477529581014.dkr.ecr.us-west-2.amazonaws.com"
def docker_api(url: str, use_pagination: bool = False) -> Dict[str, Any]:
"""
Run a paginated fetch from the public Docker Hub API
"""
if TEST_DATA is not None:
if url not in TEST_DATA:
raise urllib.error.HTTPError(url, 404, "Not found", {}, None)
return TEST_DATA[url]
pagination = ""
if use_pagination:
pagination = f"?page_size={PAGE_SIZE}&page=1"
url = DOCKER_API_BASE + url + pagination
r, headers = get(url)
reset = headers.get("x-ratelimit-reset")
if reset is not None:
reset = datetime.datetime.fromtimestamp(int(reset))
reset = reset.isoformat()
logging.info(
f"Docker API Rate Limit: {headers.get('x-ratelimit-remaining')} / {headers.get('x-ratelimit-limit')} (reset at {reset})"
)
return r
def image_exists(spec: str) -> bool:
name, tag = spec.split(":")
try:
r = docker_api(f"repositories/{name}/tags/{tag}")
logging.info(f"Image exists, got response: {json.dumps(r, indent=2)}")
return True
except urllib.error.HTTPError as e:
# Image was not found
logging.exception(e)
return False
if __name__ == "__main__":
init_log()
parser = argparse.ArgumentParser(
description="Writes out Docker images names to be used to .docker-image-names/"
)
parser.add_argument(
"--testing-docker-data",
help="(testing only) JSON data to mock response from Docker Hub API",
)
parser.add_argument(
"--testing-images-data",
help=f"(testing only) JSON data to mock contents of {IMAGE_TAGS_FILE}",
)
parser.add_argument(
"--base-dir",
default=".docker-image-names",
help="(testing only) Folder to write image names to",
)
args, other = parser.parse_known_args()
name_dir = Path(args.base_dir)
if args.testing_images_data:
repo_image_tags = json.loads(args.testing_images_data)
else:
config = configparser.ConfigParser()
config.read(IMAGE_TAGS_FILE)
repo_image_tags = {}
for name in other:
repo_image_tags[name] = config.get("jenkins", name)
images = {}
for name in other:
images[name] = repo_image_tags[name]
if args.testing_docker_data is not None:
TEST_DATA = json.loads(args.testing_docker_data)
logging.info(f"Checking if these images exist in tlcpack: {images}")
name_dir.mkdir(exist_ok=True)
images_to_use = {}
for filename, spec in images.items():
if spec.startswith(TVM_CI_ECR):
logging.info(f"{spec} is from ECR")
images_to_use[filename] = spec
elif image_exists(spec):
logging.info(f"{spec} found in tlcpack")
images_to_use[filename] = spec
else:
logging.info(f"{spec} not found in tlcpack, using tlcpackstaging")
part, tag = spec.split(":")
user, repo = part.split("/")
tlcpackstaging_tag = f"tlcpackstaging/{repo.replace('-', '_')}:{tag}"
images_to_use[filename] = tlcpackstaging_tag
for filename, image in images_to_use.items():
logging.info(f"Writing image {image} to {name_dir / filename}")
with open(name_dir / filename, "w") as f:
f.write(image)
| 4,552 | 33.492424 | 128 | py |
tvm | tvm-main/ci/scripts/jenkins/http_utils.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
from urllib import request
from typing import Dict, Any, Optional
def get(url: str, headers: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
logging.info(f"Requesting GET to {url}")
if headers is None:
headers = {}
req = request.Request(url, headers=headers)
with request.urlopen(req) as response:
response_headers = {k: v for k, v in response.getheaders()}
response = json.loads(response.read())
return response, response_headers
| 1,318 | 36.685714 | 78 | py |
tvm | tvm-main/ci/scripts/jenkins/open_docker_update_pr.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import logging
import datetime
import os
import json
import re
import shlex
from urllib import error
from typing import List, Dict, Any, Optional, Callable
from git_utils import git, parse_remote, GitHubRepo
from cmd_utils import REPO_ROOT, init_log
from should_rebuild_docker import docker_api
JENKINS_DIR = REPO_ROOT / "ci" / "jenkins"
IMAGES_FILE = JENKINS_DIR / "data.py"
GENERATE_SCRIPT = JENKINS_DIR / "generate.py"
GITHUB_TOKEN = os.environ["GITHUB_TOKEN"]
BRANCH = "nightly-docker-update"
def _testing_docker_api(data: Dict[str, Any]) -> Callable[[str], Dict[str, Any]]:
"""Returns a function that can be used in place of docker_api"""
def mock(url: str) -> Dict[str, Any]:
if url in data:
return data[url]
else:
raise error.HTTPError(url, 404, f"Not found: {url}", {}, None)
return mock
def parse_docker_date(d: str) -> datetime.datetime:
"""Turn a date string from the Docker API into a datetime object"""
return datetime.datetime.strptime(d, "%Y-%m-%dT%H:%M:%S.%fZ")
def check_tag(tag: Dict[str, Any]) -> bool:
return re.match(r"^[0-9]+-[0-9]+-[a-z0-9]+$", tag["name"]) is not None
def latest_tag(user: str, repo: str) -> List[Dict[str, Any]]:
"""
Queries Docker Hub and finds the most recent tag for the specified image/repo pair
"""
r = docker_api(f"repositories/{user}/{repo}/tags")
results = r["results"]
for result in results:
result["last_updated"] = parse_docker_date(result["last_updated"])
results = list(sorted(results, key=lambda d: d["last_updated"]))
results = [tag for tag in results if check_tag(tag)]
return results[-1]
def latest_tlcpackstaging_image(source: str) -> Optional[str]:
"""
Finds the latest full tag to use in the Jenkinsfile or returns None if no
update is needed
"""
name, current_tag = source.split(":")
user, repo = name.split("/")
logging.info(
f"Running with name: {name}, current_tag: {current_tag}, user: {user}, repo: {repo}"
)
staging_repo = repo.replace("-", "_")
latest_tlcpackstaging_tag = latest_tag(user="tlcpackstaging", repo=staging_repo)
logging.info(f"Found latest tlcpackstaging tag:\n{latest_tlcpackstaging_tag}")
if latest_tlcpackstaging_tag["name"] == current_tag:
logging.info(f"tlcpackstaging tag is the same as the one in the Jenkinsfile")
latest_tlcpack_tag = latest_tag(user="tlcpack", repo=repo)
logging.info(f"Found latest tlcpack tag:\n{latest_tlcpack_tag}")
if latest_tlcpack_tag["name"] == latest_tlcpackstaging_tag["name"]:
logging.info("Tag names were the same, no update needed")
return None
if latest_tlcpack_tag["last_updated"] > latest_tlcpackstaging_tag["last_updated"]:
new_spec = f"tlcpack/{repo}:{latest_tlcpack_tag['name']}"
else:
# Even if the image doesn't exist in tlcpack, it will fall back to tlcpackstaging
# so hardcode the username here
new_spec = f"tlcpack/{repo}:{latest_tlcpackstaging_tag['name']}"
logging.info("Using tlcpackstaging tag on tlcpack")
logging.info(f"Found newer image, using: {new_spec}")
return new_spec
if __name__ == "__main__":
init_log()
help = "Open a PR to update the Docker images to use the latest available in tlcpackstaging"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--dry-run", action="store_true", help="don't send PR to GitHub")
parser.add_argument("--testing-docker-data", help="JSON data to mock Docker Hub API response")
args = parser.parse_args()
# Install test mock if necessary
if args.testing_docker_data is not None:
docker_api = _testing_docker_api(data=json.loads(args.testing_docker_data))
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
# Read the existing images from the Jenkinsfile
logging.info(f"Reading {IMAGES_FILE}")
with open(IMAGES_FILE) as f:
content = f.readlines()
# Build a new Jenkinsfile with the latest images from tlcpack or tlcpackstaging
replacements = {}
for line in content:
m = re.match(r'"tag": "(.*)",', line.strip())
if m is not None:
image_spec = m.groups()[0]
logging.info(f"Found match on line {line.strip()}")
new_image = latest_tlcpackstaging_image(image_spec)
if new_image is None:
logging.info(f"No new image found")
else:
logging.info(f"Using new image {new_image}")
new_line = f' "tag": "{new_image}",'
replacements[line] = new_line
# Re-generate the Jenkinsfiles
command = f"python3 {shlex.quote(str(GENERATE_SCRIPT))}"
content = "\n".join(content)
for old_line, new_line in replacements.items():
content = content.replace(old_line, new_line)
print(f"Updated to:\n{content}")
if args.dry_run:
print(f"Would have run:\n{command}")
else:
with open(IMAGES_FILE, "w") as f:
f.write(content)
Sh().run(command)
# Publish the PR
title = "[ci][docker] Nightly Docker image update"
body = "This bumps the Docker images to the latest versions from Docker Hub."
message = f"{title}\n\n\n{body}"
if args.dry_run:
logging.info("Dry run, would have committed Jenkinsfiles")
else:
logging.info(f"Creating git commit")
git(["checkout", "-B", BRANCH])
git(["add", str(JENKINS_DIR.relative_to(REPO_ROOT))])
git(["config", "user.name", "tvm-bot"])
git(["config", "user.email", "95660001+tvm-bot@users.noreply.github.com"])
git(["commit", "-m", message])
git(["push", "--set-upstream", args.remote, BRANCH, "--force"])
logging.info(f"Sending PR to GitHub")
github = GitHubRepo(user=user, repo=repo, token=GITHUB_TOKEN)
data = {
"title": title,
"body": body,
"head": BRANCH,
"base": "main",
"maintainer_can_modify": True,
}
url = "pulls"
if args.dry_run:
logging.info(f"Dry run, would have sent {data} to {url}")
else:
try:
github.post(url, data=data)
except error.HTTPError as e:
# Ignore the exception if the PR already exists (which gives a 422). The
# existing PR will have been updated in place
if e.code == 422:
logging.info("PR already exists, ignoring error")
logging.exception(e)
else:
raise e
| 7,518 | 35.857843 | 98 | py |
tvm | tvm-main/ci/scripts/jenkins/pytest_wrapper.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import textwrap
import junitparser
from pathlib import Path
from typing import List, Optional
import os
import urllib.parse
import logging
from cmd_utils import init_log
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
def lstrip(s: str, prefix: str) -> str:
if s.startswith(prefix):
s = s[len(prefix) :]
return s
def classname_to_file(classname: str) -> str:
classname = lstrip(classname, "cython.")
classname = lstrip(classname, "ctypes.")
return classname.replace(".", "/") + ".py"
def failed_test_ids() -> List[str]:
FAILURE_TYPES = (junitparser.Failure, junitparser.Error)
junit_dir = REPO_ROOT / "build" / "pytest-results"
failed_node_ids = []
for junit in junit_dir.glob("*.xml"):
xml = junitparser.JUnitXml.fromfile(str(junit))
for suite in xml:
# handle suites
for case in suite:
if case.result is None:
logging.warn(f"Incorrectly formatted JUnit found, result was None on {case}")
continue
if len(case.result) > 0 and isinstance(case.result[0], FAILURE_TYPES):
node_id = classname_to_file(case.classname) + "::" + case.name
failed_node_ids.append(node_id)
return list(set(failed_node_ids))
def repro_command(build_type: str, failed_node_ids: List[str]) -> Optional[str]:
"""
Parse available JUnit XML files and output a command that users can run to
reproduce CI failures locally
"""
test_args = [f"--tests {node_id}" for node_id in failed_node_ids]
test_args_str = " ".join(test_args)
return f"python3 tests/scripts/ci.py {build_type} {test_args_str}"
def make_issue_url(failed_node_ids: List[str]) -> str:
names = [f"`{node_id}`" for node_id in failed_node_ids]
run_url = os.getenv("RUN_DISPLAY_URL", "<insert run URL>")
test_bullets = [f" - `{node_id}`" for node_id in failed_node_ids]
params = {
"labels": "test: flaky",
"title": "[Flaky Test] " + ", ".join(names),
"body": textwrap.dedent(
f"""
These tests were found to be flaky (intermittently failing on `main` or failed in a PR with unrelated changes). See [the docs](https://github.com/apache/tvm/blob/main/docs/contribute/ci.rst#handling-flaky-failures) for details.
### Tests(s)\n
"""
)
+ "\n".join(test_bullets)
+ f"\n\n### Jenkins Links\n\n - {run_url}",
}
return "https://github.com/apache/tvm/issues/new?" + urllib.parse.urlencode(params)
def show_failure_help(failed_suites: List[str]) -> None:
failed_node_ids = failed_test_ids()
if len(failed_node_ids) == 0:
return
build_type = os.getenv("PLATFORM")
if build_type is None:
raise RuntimeError("build type was None, cannot show command")
repro = repro_command(build_type=build_type, failed_node_ids=failed_node_ids)
if repro is None:
print("No test failures detected")
return
print(f"Report flaky test shortcut: {make_issue_url(failed_node_ids)}")
print("=============================== PYTEST FAILURES ================================")
print(
"These pytest suites failed to execute. The results can be found in the "
"Jenkins 'Tests' tab or by scrolling up through the raw logs here. "
"If there is no test listed below, the failure likely came from a segmentation "
"fault which you can find in the logs above.\n"
)
if failed_suites is not None and len(failed_suites) > 0:
print("\n".join([f" - {suite}" for suite in failed_suites]))
print("")
print("You can reproduce these specific failures locally with this command:\n")
print(textwrap.indent(repro, prefix=" "))
print("")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Print information about a failed pytest run")
args, other = parser.parse_known_args()
init_log()
try:
show_failure_help(failed_suites=other)
except Exception as e:
# This script shouldn't ever introduce failures since it's just there to
# add extra information, so ignore any errors
logging.exception(e)
| 5,095 | 35.661871 | 239 | py |
tvm | tvm-main/ci/scripts/jenkins/should_run_slow_tests.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import argparse
import textwrap
from typing import Tuple, List, Optional
from git_utils import GitHubRepo, parse_remote, git
SLOW_TEST_TRIGGERS = [
"@tvm-bot run slow tests",
"@tvm-bot run slow test",
"@tvm-bot run slow",
"@tvm-bot slow tests",
"@tvm-bot slow test",
"@tvm-bot slow",
]
def check_match(s: str, searches: List[str]) -> Tuple[bool, Optional[str]]:
for search in searches:
if search in s:
return True, search
return False, None
def display(long_str: str) -> str:
return textwrap.indent(long_str, " ")
if __name__ == "__main__":
help = "Exits with 1 if CI should run slow tests, 0 otherwise"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--pr", required=True)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument(
"--pr-body", help="(testing) PR body to use instead of fetching from GitHub"
)
args = parser.parse_args()
branch = git(["rev-parse", "--abbrev-ref", "HEAD"])
# Don't skip slow tests on main or ci-docker-staging
skip_branches = {"main", "ci-docker-staging"}
if branch in skip_branches:
print(f"Branch {branch} is in {skip_branches}, running slow tests")
exit(1)
print(f"Branch {branch} is not in {skip_branches}, checking last commit...")
if args.pr_body:
body = args.pr_body
else:
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
pr = github.get(f"pulls/{args.pr}")
body = pr["body"]
body_match, reason = check_match(body, SLOW_TEST_TRIGGERS)
if body_match:
print(f"Matched {reason} in PR body:\n{display(body)}, running slow tests")
exit(1)
print(
f"PR Body:\n{display(body)}\ndid not have any of {SLOW_TEST_TRIGGERS}, skipping slow tests"
)
exit(0)
| 2,840 | 30.921348 | 99 | py |
tvm | tvm-main/ci/scripts/jenkins/git_skip_ci_globs.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import fnmatch
from typing import Optional
from git_utils import git
globs = [
"*.md",
"conda/*",
".github/*",
".asf.yaml",
".gitignore",
"LICENSE",
"NOTICE",
"KEYS",
# microTVM
"apps/microtvm/poetry.lock",
"apps/microtvm/pyproject.toml",
"tests/lint/*",
"tests/scripts/task_lint.sh",
]
def match_any(f: str) -> Optional[str]:
for glob in globs:
if fnmatch.fnmatch(f, glob):
return glob
return None
if __name__ == "__main__":
help = "Exits with code 1 if a change only touched files, indicating that CI could be skipped for this changeset"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--files", help="(testing only) comma separated list of files to check")
args = parser.parse_args()
print(args)
if args.files is not None:
diff = [x for x in args.files.split(",") if x.strip() != ""]
else:
diff = git(["diff", "--no-commit-id", "--name-only", "-r", "origin/main"])
diff = diff.split("\n")
diff = [d.strip() for d in diff]
diff = [d for d in diff if d != ""]
print(f"Changed files:\n{diff}")
if len(diff) == 0:
print("Found no changed files, skipping CI")
exit(0)
print(f"Checking with globs:\n{globs}")
for file in diff:
match = match_any(file)
if match is None:
print(f"{file} did not match any globs, running CI")
exit(1)
else:
print(f"{file} matched glob {match}")
print("All files matched a glob, skipping CI")
exit(0)
| 2,435 | 28.707317 | 117 | py |
tvm | tvm-main/ci/scripts/jenkins/git_utils.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import subprocess
import re
import os
import base64
import logging
from urllib import request, error
from typing import Dict, Tuple, Any, Optional, List
DRY_RUN = object()
def compress_query(query: str) -> str:
query = query.replace("\n", "")
query = re.sub("\s+", " ", query)
return query
def post(url: str, body: Optional[Any] = None, auth: Optional[Tuple[str, str]] = None):
logging.info(f"Requesting POST to {url} with {body}")
headers = {}
req = request.Request(url, headers=headers, method="POST")
if auth is not None:
auth_str = base64.b64encode(f"{auth[0]}:{auth[1]}".encode())
req.add_header("Authorization", f"Basic {auth_str.decode()}")
if body is None:
body = ""
req.add_header("Content-Type", "application/json; charset=utf-8")
data = json.dumps(body)
data = data.encode("utf-8")
req.add_header("Content-Length", len(data))
with request.urlopen(req, data) as response:
return response.read()
def dry_run_token(is_dry_run: bool) -> Any:
if is_dry_run:
return DRY_RUN
return os.environ["GITHUB_TOKEN"]
class GitHubRepo:
GRAPHQL_URL = "https://api.github.com/graphql"
def __init__(self, user, repo, token, test_data=None):
self.token = token
self.user = user
self.repo = repo
self.test_data = test_data
self.num_calls = 0
self.base = f"https://api.github.com/repos/{user}/{repo}/"
def headers(self):
return {
"Authorization": f"Bearer {self.token}",
}
def dry_run(self) -> bool:
return self.token == DRY_RUN
def graphql(self, query: str, variables: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
query = compress_query(query)
if variables is None:
variables = {}
response = self._request(
self.GRAPHQL_URL,
{"query": query, "variables": variables},
method="POST",
)
if self.dry_run():
return self.testing_response("POST", self.GRAPHQL_URL)
if "data" not in response:
msg = f"Error fetching data with query:\n{query}\n\nvariables:\n{variables}\n\nerror:\n{json.dumps(response, indent=2)}"
raise RuntimeError(msg)
return response
def testing_response(self, method: str, url: str) -> Any:
self.num_calls += 1
key = f"[{self.num_calls}] {method} - {url}"
if self.test_data is not None and key in self.test_data:
return self.test_data[key]
logging.info(f"Unknown URL in dry run: {key}")
return {}
def _request(self, full_url: str, body: Dict[str, Any], method: str) -> Dict[str, Any]:
if self.dry_run():
logging.info(f"Dry run, would have requested a {method} to {full_url} with {body}")
return self.testing_response(method, full_url)
logging.info(f"Requesting {method} to {full_url} with {body}")
req = request.Request(full_url, headers=self.headers(), method=method.upper())
req.add_header("Content-Type", "application/json; charset=utf-8")
data = json.dumps(body)
data = data.encode("utf-8")
req.add_header("Content-Length", len(data))
try:
with request.urlopen(req, data) as response:
content = response.read()
except error.HTTPError as e:
msg = str(e)
error_data = e.read().decode()
raise RuntimeError(f"Error response: {msg}\n{error_data}")
logging.info(f"Got response from {full_url}: {content}")
try:
response = json.loads(content)
except json.decoder.JSONDecodeError as e:
return content
return response
def put(self, url: str, data: Dict[str, Any]) -> Dict[str, Any]:
return self._request(self.base + url, data, method="PUT")
def patch(self, url: str, data: Dict[str, Any]) -> Dict[str, Any]:
return self._request(self.base + url, data, method="PATCH")
def post(self, url: str, data: Dict[str, Any]) -> Dict[str, Any]:
return self._request(self.base + url, data, method="POST")
def get(self, url: str) -> Dict[str, Any]:
if self.dry_run():
logging.info(f"Dry run, would have requested a GET to {url}")
return self.testing_response("GET", url)
url = self.base + url
logging.info(f"Requesting GET to {url}")
req = request.Request(url, headers=self.headers())
with request.urlopen(req) as response:
response = json.loads(response.read())
return response
def delete(self, url: str) -> Dict[str, Any]:
if self.dry_run():
logging.info(f"Dry run, would have requested a DELETE to {url}")
return self.testing_response("DELETE", url)
url = self.base + url
logging.info(f"Requesting DELETE to {url}")
req = request.Request(url, headers=self.headers(), method="DELETE")
with request.urlopen(req) as response:
response = json.loads(response.read())
return response
def parse_remote(remote: str) -> Tuple[str, str]:
"""
Get a GitHub (user, repo) pair out of a git remote
"""
if remote.startswith("https://"):
# Parse HTTP remote
parts = remote.split("/")
if len(parts) < 2:
raise RuntimeError(f"Unable to parse remote '{remote}'")
user, repo = parts[-2], parts[-1].replace(".git", "")
else:
# Parse SSH remote
m = re.search(r":(.*)/(.*)\.git", remote)
if m is None or len(m.groups()) != 2:
raise RuntimeError(f"Unable to parse remote '{remote}'")
user, repo = m.groups()
user = os.getenv("DEBUG_USER", user)
repo = os.getenv("DEBUG_REPO", repo)
return user, repo
def git(command, **kwargs):
command = ["git"] + command
logging.info(f"Running {command}")
proc = subprocess.run(command, stdout=subprocess.PIPE, encoding="utf-8", **kwargs)
if proc.returncode != 0:
raise RuntimeError(f"Command failed {command}:\nstdout:\n{proc.stdout}")
return proc.stdout.strip()
def find_ccs(body: str) -> List[str]:
matches = re.findall(r"(cc( @[-A-Za-z0-9]+)+)", body, flags=re.MULTILINE)
matches = [full for full, last in matches]
reviewers = []
for match in matches:
if match.startswith("cc "):
match = match.replace("cc ", "")
users = [x.strip() for x in match.split("@")]
reviewers += users
reviewers = set(x for x in reviewers if x != "")
return list(reviewers)
| 7,458 | 34.183962 | 132 | py |
tvm | tvm-main/ci/scripts/jenkins/cmd_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import subprocess
import os
import logging
import sys
import re
import tempfile
from pathlib import Path
from typing import List
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
assert (REPO_ROOT / "Jenkinsfile").exists
class RelativePathFilter(logging.Filter):
def filter(self, record):
path = Path(record.pathname).resolve()
record.relativepath = str(path.relative_to(REPO_ROOT))
return True
def init_log():
logging.basicConfig(
format="[%(relativepath)s:%(lineno)d %(levelname)-1s] %(message)s", level=logging.INFO
)
# Flush on every log call (logging and then calling subprocess.run can make
# the output look confusing)
logging.root.handlers[0].addFilter(RelativePathFilter())
logging.root.handlers[0].flush = sys.stderr.flush
class Sh:
def __init__(self, env=None, cwd=None):
self.env = os.environ.copy()
if env is not None:
self.env.update(env)
self.cwd = cwd
def tee(self, cmd: str, **kwargs):
"""
Run 'cmd' in a shell then return the (process, stdout) as a tuple
"""
with tempfile.NamedTemporaryFile(delete=False) as f:
proc = self.run(f"{cmd} | tee {f.name}", **kwargs)
with open(f.name, "r") as f:
output = f.read()
return proc, output
def run(self, cmd: str, **kwargs):
logging.info(f"+ {cmd}")
defaults = {
"check": True,
"shell": True,
"env": self.env,
"encoding": "utf-8",
"cwd": self.cwd,
}
defaults.update(kwargs)
return subprocess.run(cmd, **defaults)
def tags_from_title(title: str) -> List[str]:
tags = re.findall(r"\[(.*?)\]", title)
tags = [t.strip() for t in tags]
return tags
| 2,620 | 29.835294 | 94 | py |
tvm | tvm-main/ci/scripts/jenkins/check_pr.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import re
import os
import json
import textwrap
from dataclasses import dataclass
from typing import Any, List, Callable
from git_utils import GitHubRepo, parse_remote, git
from cmd_utils import init_log, tags_from_title
GITHUB_USERNAME_REGEX = re.compile(r"(@[a-zA-Z0-9-]+)", flags=re.MULTILINE)
OK = object()
FAIL = object()
@dataclass
class Check:
# check to run, returning OK means it passed, anything else means it failed
check: Callable[[str], Any]
# function to call to generate the error message
error_fn: Callable[[Any], str]
def non_empty(s: str):
if len(s) == 0:
return FAIL
return OK
def usernames(s: str):
m = GITHUB_USERNAME_REGEX.findall(s)
return m if m else OK
def tags(s: str):
items = tags_from_title(s)
if len(items) == 0:
return FAIL
return OK
def trailing_period(s: str):
if s.endswith("."):
return FAIL
return OK
title_checks = [
Check(check=non_empty, error_fn=lambda d: "PR must have a title but title was empty"),
Check(check=trailing_period, error_fn=lambda d: "PR must not end in a tailing '.'"),
# TODO(driazati): enable this check once https://github.com/apache/tvm/issues/12637 is done
# Check(
# check=usernames,
# error_fn=lambda d: f"PR title must not tag anyone but found these usernames: {d}",
# ),
]
body_checks = [
Check(check=non_empty, error_fn=lambda d: "PR must have a body but body was empty"),
# TODO(driazati): enable this check once https://github.com/apache/tvm/issues/12637 is done
# Check(
# check=usernames,
# error_fn=lambda d: f"PR body must not tag anyone but found these usernames: {d}",
# ),
]
def run_checks(checks: List[Check], s: str, name: str) -> bool:
print(f"Running checks for {name}")
print(textwrap.indent(s, prefix=" "))
passed = True
print(" Checks:")
for i, check in enumerate(checks):
result = check.check(s)
if result == OK:
print(f" [{i+1}] {check.check.__name__}: PASSED")
else:
passed = False
msg = check.error_fn(result)
print(f" [{i+1}] {check.check.__name__}: FAILED: {msg}")
return passed
if __name__ == "__main__":
init_log()
help = "Check a PR's title and body for conformance to guidelines"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--pr", required=True)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument(
"--pr-data", help="(testing) PR data to use instead of fetching from GitHub"
)
args = parser.parse_args()
try:
pr = int(args.pr)
except ValueError:
print(f"PR was not a number: {args.pr}")
exit(0)
if args.pr_data:
pr = json.loads(args.pr_data)
else:
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
pr = github.get(f"pulls/{args.pr}")
body = "" if pr["body"] is None else pr["body"].strip()
title = "" if pr["title"] is None else pr["title"].strip()
title_passed = run_checks(checks=title_checks, s=title, name="PR title")
print("")
body_passed = run_checks(checks=body_checks, s=body, name="PR body")
if title_passed and body_passed:
print("All checks passed!")
exit(0)
else:
print(
"Some checks failed, please review the logs above and edit your PR on GitHub accordingly"
)
exit(1)
| 4,484 | 29.719178 | 101 | py |
tvm | tvm-main/ci/scripts/jenkins/git_skip_ci.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import logging
import argparse
from git_utils import git, GitHubRepo, parse_remote
from cmd_utils import tags_from_title, init_log
if __name__ == "__main__":
help = "Exits with 0 if CI should be skipped, 1 otherwise"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--pr", required=True)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument(
"--pr-title", help="(testing) PR title to use instead of fetching from GitHub"
)
args = parser.parse_args()
init_log()
branch = git(["rev-parse", "--abbrev-ref", "HEAD"])
log = git(["log", "--format=%s", "-1"])
# Check the PR's title (don't check this until everything else passes first)
def check_pr_title():
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
if args.pr_title:
title = args.pr_title
else:
github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
pr = github.get(f"pulls/{args.pr}")
title = pr["title"]
logging.info(f"pr title: {title}")
tags = tags_from_title(title)
logging.info(f"Found title tags: {tags}")
return "skip ci" in tags
if args.pr != "null" and args.pr.strip() != "" and branch != "main" and check_pr_title():
logging.info("PR title starts with '[skip ci]', skipping...")
exit(0)
else:
logging.info(f"Not skipping CI:\nargs.pr: {args.pr}\nbranch: {branch}\ncommit: {log}")
exit(1)
| 2,422 | 37.460317 | 94 | py |
tvm | tvm-main/ci/scripts/jenkins/pytest_ids.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import io
import argparse
from contextlib import redirect_stdout
class NodeidsCollector:
def pytest_collection_modifyitems(self, items):
self.nodeids = [item.nodeid for item in items]
def main(folder):
collector = NodeidsCollector()
f = io.StringIO()
with redirect_stdout(f):
pytest.main(["-qq", "--collect-only", folder], plugins=[collector])
for nodeid in collector.nodeids:
print(nodeid)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="List pytest nodeids for a folder")
parser.add_argument("--folder", required=True, help="test folder to inspect")
args = parser.parse_args()
main(args.folder)
| 1,511 | 33.363636 | 84 | py |
tvm | tvm-main/ci/scripts/jenkins/s3.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import logging
import re
from pathlib import Path
from typing import List
from enum import Enum
from cmd_utils import Sh, REPO_ROOT, init_log
RETRY_SCRIPT = REPO_ROOT / "ci" / "scripts" / "jenkins" / "retry.sh"
S3_DOWNLOAD_REGEX = re.compile(r"download: s3://.* to (.*)")
SH = Sh()
class Action(Enum):
UPLOAD = 1
DOWNLOAD = 2
def show_md5(item: str) -> None:
if not Path(item).is_dir():
sh.run(f"md5sum {item}")
def parse_output_files(stdout: str) -> List[str]:
"""
Grab the list of downloaded files from the output of 'aws s3 cp'. Lines look
like:
download: s3://some/prefix/a_file.txt to a_file.txt
"""
files = []
for line in stdout.split("\n"):
line = line.strip()
if line == "":
continue
m = S3_DOWNLOAD_REGEX.match(line)
if m:
files.append(m.groups()[0])
return files
def chmod(files: List[str]) -> None:
"""
S3 has no concept of file permissions so add them back in here to every file
"""
# Add execute bit for downloads
to_chmod = [str(f) for f in files]
logging.info(f"Adding execute bit for files: {to_chmod}")
if len(to_chmod) > 0:
SH.run(f"chmod +x {' '.join(to_chmod)}")
def s3(source: str, destination: str, recursive: bool) -> List[str]:
"""
Send or download the source to the destination in S3
"""
cmd = f". {RETRY_SCRIPT.relative_to(REPO_ROOT)} && retry 3 aws s3 cp --no-progress"
if recursive:
cmd += " --recursive"
cmd += f" {source} {destination}"
_, stdout = SH.tee(cmd)
return stdout
if __name__ == "__main__":
init_log()
help = "Uploads or downloads files from S3"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--action", help="either 'upload' or 'download'", required=True)
parser.add_argument("--bucket", help="s3 bucket", required=True)
parser.add_argument(
"--prefix", help="s3 bucket + tag (e.g. s3://tvm-ci-prod/PR-1234/cpu", required=True
)
parser.add_argument("--items", help="files and folders to upload", nargs="+")
args = parser.parse_args()
logging.info(args)
sh = Sh()
if Path.cwd() != REPO_ROOT:
logging.error(f"s3.py can only be executed from the repo root, instead was in {Path.cwd()}")
exit(1)
prefix = args.prefix.strip("/")
s3_path = f"s3://{args.bucket}/{prefix}"
logging.info(f"Using s3 path: {s3_path}")
if args.action == "upload":
action = Action.UPLOAD
elif args.action == "download":
action = Action.DOWNLOAD
else:
logging.error(f"Unsupported action: {args.action}")
exit(1)
if args.items is None:
if args.action == "upload":
logging.error("Cannot upload without --items")
exit(1)
else:
# Download the whole prefix
items = ["."]
else:
items = args.items
for item in items:
if action == Action.DOWNLOAD:
source = s3_path
recursive = True
if item != ".":
source = s3_path + "/" + item
recursive = False
stdout = s3(source=source, destination=item, recursive=recursive)
files = parse_output_files(stdout)
chmod(files)
for file in files:
# Show md5 after downloading
show_md5(file)
elif action == Action.UPLOAD:
if not Path(item).exists():
logging.warning(f"The path doesn't exist: {item}")
continue
show_md5(item)
if Path(item).is_dir():
if len(list(Path(item).glob("**/*"))) == 0:
raise RuntimeError(f"Cannot upload empty folder with name: {item}")
s3(item, s3_path + "/" + item, recursive=Path(item).is_dir())
| 4,713 | 30.013158 | 100 | py |
tvm | tvm-main/ci/scripts/github/github_tag_teams.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import json
import argparse
import logging
import re
import sys
from pathlib import Path
from typing import Dict, Any, List, Tuple, Optional
# Hackery to enable importing of utils from ci/scripts/jenkins
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
sys.path.append(str(REPO_ROOT / "ci" / "scripts" / "jenkins"))
from git_utils import git, GitHubRepo, parse_remote, find_ccs, dry_run_token
from cmd_utils import tags_from_title, init_log
GITHUB_NAME_REGEX = r"@[a-zA-Z0-9-]+"
def parse_line(line: str) -> Tuple[str, List[str]]:
line = line.lstrip(" -")
line = line.split()
# Parse out the name as anything up until the first tagged person
tag_items = []
tag_end = 0
for i, piece in enumerate(line):
if piece.startswith("@"):
tag_end = i
break
tag_items.append(piece)
tag = " ".join(tag_items).rstrip(":")
# From the last word that was part of the tag name, start looking for users
# tagged with a '@'
users = []
for piece in line[tag_end:]:
if piece.startswith("@"):
users.append(piece.lstrip("@"))
return (tag, list(sorted(users)))
def fetch_issue(github: GitHubRepo, issue_number: int):
query = """query($owner: String!, $name: String!, $number: Int!){
repository(owner: $owner, name: $name) {
issue(number: $number) {
body
comments(first:100) {
nodes {
body
}
}
}
}
}"""
r = github.graphql(
query,
variables={
"owner": github.user,
"name": github.repo,
"number": issue_number,
},
)
return r
def parse_teams(r: Dict[str, Any], issue_number: int) -> Dict[str, str]:
"""
Fetch an issue and parse out series of tagged people from the issue body
and comments
"""
issue = r["data"]["repository"]["issue"]
if issue is None or issue.get("body") is None:
raise RuntimeError(f"Could not find issue #{issue_number}\n\n{json.dumps(r, indent=2)}")
result = {}
def add_tag(tag, users):
if tag in result:
result[tag] += users
else:
result[tag] = users
# Parse the issue body (only bullets are looked at)
for line in issue["body"].split("\n"):
line = line.strip()
if not line.startswith("- "):
continue
if "@" not in line:
continue
tag, users = parse_line(line)
add_tag(tag, users)
# Parse comment bodies
for comment in issue["comments"]["nodes"]:
for line in comment["body"].split("\n"):
if "@" not in line:
continue
tag, users = parse_line(line)
add_tag(tag, users)
# De-duplicate users listed twice for the same tag
for tag in result:
result[tag] = list(set(result[tag]))
return {k.lower(): v for k, v in result.items() if k.strip()}
def tags_from_labels(labels: List[Dict[str, Any]]) -> List[str]:
return [label["name"] for label in labels]
def add_ccs_to_body(body: str, to_cc: List[str]) -> str:
lines = body.split("\n")
cc_line_idx = None
for i, line in enumerate(reversed(lines)):
if line.strip() == "":
continue
if line.startswith("cc @"):
cc_line_idx = len(lines) - i - 1
else:
break
def gen_cc_line(users):
users = sorted(users)
return "cc " + " ".join([f"@{user}" for user in users])
if cc_line_idx is None:
print("Did not find existing cc line")
lines.append("")
lines.append(gen_cc_line(to_cc))
else:
# Edit cc line in place
line = lines[cc_line_idx]
print(f"Found existing cc line at {cc_line_idx}: {line}")
existing_ccs = find_ccs(line)
print(f"Found cc's: {existing_ccs}")
if set(to_cc).issubset(set(existing_ccs)):
# Don't do anything if there is no update needed
return None
line = gen_cc_line(set(existing_ccs + to_cc))
lines[cc_line_idx] = line
return "\n".join(lines)
def determine_users_to_cc(
issue: Dict[str, Any], github: GitHubRepo, team_issue: str, issue_data: Optional[Dict[str, Any]]
) -> List[str]:
if issue_data is None:
issue_data = fetch_issue(github, issue_number=int(team_issue))
# Fetch the list of teams
teams = parse_teams(issue_data, issue_number=int(team_issue))
logging.info(f"Found these teams in issue #{team_issue}\n{json.dumps(teams, indent=2)}")
title = issue["title"]
if "author" in issue:
author = issue["author"]["login"]
else:
author = issue["user"]["login"]
tags = tags_from_title(title)
if isinstance(issue["labels"], dict):
tags += tags_from_labels(issue["labels"]["nodes"])
else:
tags += tags_from_labels(issue["labels"])
tags = [t.lower() for t in tags]
logging.info(f"Found tags: {tags}")
# Update the PR or issue based on tags in the title and GitHub tags
to_cc = [teams.get(t, []) for t in tags]
to_cc = list(set(item for sublist in to_cc for item in sublist))
to_cc = [user for user in to_cc if user != author]
return to_cc
def get_tags(pr_data: Dict[str, Any], github: GitHubRepo, team_issue: int) -> str:
to_cc = determine_users_to_cc(
issue=pr_data, github=github, team_issue=team_issue, issue_data=None
)
logging.info(f"Users to cc based on labels: {to_cc}")
description = "<sub>See [#10317](https://github.com/apache/tvm/issues/10317) for details</sub>"
if len(to_cc) == 0:
return "No users to tag found in teams " + description
return "cc " + ", ".join([f"@{user}" for user in to_cc]) + " " + description
if __name__ == "__main__":
help = "Automatically tag people based on PR / issue labels"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--team-issue", default="10317", help="issue number to look at for ccs")
parser.add_argument(
"--team-issue-json", help="(testing only) issue JSON to parse rather than fetch from GitHub"
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="run but don't send any request to GitHub",
)
args = parser.parse_args()
init_log()
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
github = GitHubRepo(token=dry_run_token(args.dry_run), user=user, repo=repo)
if args.team_issue_json:
issue_data = json.loads(args.team_issue_json)
else:
issue_data = fetch_issue(github, issue_number=int(args.team_issue))
# Extract the payload from GitHub Actions
issue = json.loads(os.getenv("ISSUE", "null"))
pr = json.loads(os.getenv("PR", "null"))
if (issue is None and pr is None) or (issue is not None and pr is not None):
raise RuntimeError("Exactly one of $PR or $ISSUE must be set in the environment")
if pr is not None:
if pr["draft"]:
print(f"Terminating since {pr['number']} is a draft")
exit(0)
# PRs/issues have the same structure for the fields needed here
item = issue if issue is not None else pr
title = item["title"]
body = item["body"]
to_cc = determine_users_to_cc(
issue=item, github=github, team_issue=args.team_issue, issue_data=issue_data
)
existing_tags = list(set(re.findall(GITHUB_NAME_REGEX, body)))
existing_tags = set(tag.replace("@", "") for tag in existing_tags)
logging.info(f"Found existing tags: {existing_tags}")
to_cc = [user for user in to_cc if user not in existing_tags]
logging.info("Users to cc based on labels", to_cc)
# Create the new PR/issue body
if len(to_cc) == 0:
logging.info("No one to cc, exiting")
exit(0)
new_body = add_ccs_to_body(body, to_cc)
if new_body is None:
logging.info(f"Everyone to cc is already cc'ed, no update needed")
exit(0)
logging.info(f"Changing body from:\n----\n{body}\n----\nto:\n----\n{new_body}\n----")
# Set the PR/issue body on GitHub
data = {"body": new_body}
if issue is not None:
issue_number = issue["number"]
url = f"issues/{issue_number}"
elif pr is not None:
pr_number = pr["number"]
url = f"pulls/{pr_number}"
else:
raise RuntimeError("Unreachable, please report a bug with a link to the failed workflow")
if not args.dry_run:
github.post(url, data=data)
else:
logging.info(f"Dry run, would have updated {url} with {data}")
| 9,598 | 31.103679 | 100 | py |
tvm | tvm-main/ci/scripts/github/github_docs_comment.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Any
def build_docs_url(base_url_docs, pr_number, build_number):
return f"{base_url_docs}/PR-{str(pr_number)}/{str(build_number)}/docs/index.html"
def find_target_url(pr_head: Dict[str, Any]):
for status in pr_head["statusCheckRollup"]["contexts"]["nodes"]:
if status.get("context", "") == "tvm-ci/pr-head":
return status["targetUrl"]
raise RuntimeError(f"Unable to find tvm-ci/pr-head status in {pr_head}")
def get_pr_and_build_numbers(target_url):
target_url = target_url[target_url.find("PR-") : len(target_url)]
split = target_url.split("/")
pr_number = split[0].strip("PR-")
build_number = split[1]
return {"pr_number": pr_number, "build_number": build_number}
def get_doc_url(pr: Dict[str, Any], base_docs_url: str = "https://pr-docs.tlcpack.ai") -> str:
pr_head = pr["commits"]["nodes"][0]["commit"]
target_url = find_target_url(pr_head)
pr_and_build = get_pr_and_build_numbers(target_url)
commit_sha = pr_head["oid"]
docs_url = build_docs_url(
base_docs_url, pr_and_build["pr_number"], pr_and_build["build_number"]
)
return f"Built docs for commit {commit_sha} can be found [here]({docs_url})."
| 2,036 | 36.722222 | 94 | py |
tvm | tvm-main/ci/scripts/github/github_cc_reviewers.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import os
import json
import argparse
import re
from pathlib import Path
from urllib import error
from typing import Dict, Any, List
# Hackery to enable importing of utils from ci/scripts/jenkins
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
sys.path.append(str(REPO_ROOT / "ci" / "scripts" / "jenkins"))
from git_utils import git, GitHubRepo, parse_remote
def find_reviewers(body: str) -> List[str]:
print(f"Parsing body:\n{body}")
matches = re.findall(r"(cc( @[-A-Za-z0-9]+)+)", body, flags=re.MULTILINE)
matches = [full for full, last in matches]
print("Found matches:", matches)
reviewers = []
for match in matches:
if match.startswith("cc "):
match = match.replace("cc ", "")
users = [x.strip() for x in match.split("@")]
reviewers += users
reviewers = set(x for x in reviewers if x != "")
return sorted(list(reviewers))
if __name__ == "__main__":
help = "Add @cc'ed people in a PR body as reviewers"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--testing-reviews-json", help="(testing only) reviews as JSON")
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="run but don't send any request to GitHub",
)
args = parser.parse_args()
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
pr = json.loads(os.environ["PR"])
number = pr["number"]
body = pr["body"]
if body is None:
body = ""
new_reviewers = find_reviewers(body)
print("Found these reviewers:", new_reviewers)
if args.testing_reviews_json:
existing_reviews = json.loads(args.testing_reviews_json)
else:
github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
existing_reviews = github.get(f"pulls/{number}/reviews")
existing_review_users = [review["user"]["login"] for review in existing_reviews]
print("PR has reviews from these users:", existing_review_users)
existing_review_users = set(r.lower() for r in existing_review_users)
existing_reviewers = [review["login"] for review in pr["requested_reviewers"]]
print("PR already had these reviewers requested:", existing_reviewers)
existing_reviewers_lower = {
existing_reviewer.lower() for existing_reviewer in existing_reviewers
}
to_add = []
for new_reviewer in new_reviewers:
if (
new_reviewer.lower() in existing_reviewers_lower
or new_reviewer.lower() in existing_review_users
):
print(f"{new_reviewer} is already review requested, skipping")
else:
to_add.append(new_reviewer)
print(f"After filtering existing reviewers, adding: {to_add}")
if not args.dry_run:
github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
# Add reviewers 1 by 1 since GitHub will error out if any of the
# requested reviewers aren't members / contributors
for reviewer in to_add:
try:
github.post(f"pulls/{number}/requested_reviewers", {"reviewers": [reviewer]})
except KeyboardInterrupt:
sys.exit()
except (RuntimeError, error.HTTPError) as e:
# Catch any exception so other reviewers can be processed
print(f"Failed to add reviewer {reviewer}: {e}")
| 4,380 | 35.815126 | 93 | py |
tvm | tvm-main/ci/scripts/github/github_commenter.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import logging
import sys
from pathlib import Path
from typing import Dict, Tuple, Any, Optional, List, Union
# Hackery to enable importing of utils from ci/scripts/jenkins
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
sys.path.append(str(REPO_ROOT / "ci" / "scripts" / "jenkins"))
from git_utils import GitHubRepo
BOT_COMMENT_START = "<!---bot-comment-->"
WELCOME_TEXT = "Thanks for contributing to TVM! Please refer to the contributing guidelines https://tvm.apache.org/docs/contribute/ for useful information and tips. Please request code reviews from [Reviewers](https://github.com/apache/incubator-tvm/blob/master/CONTRIBUTORS.md#reviewers) by @-ing them in a comment."
class BotCommentBuilder:
ALLOWLIST_USERS = {"driazati", "gigiblender", "areusch"}
def __init__(self, github: GitHubRepo, data: Dict[str, Any]):
self.github = github
self.pr_number = data["number"]
self.comment_data = data["comments"]["nodes"]
self.author = data["author"]["login"]
def find_bot_comment(self) -> Optional[Dict[str, Any]]:
"""
Return the existing bot comment or None if it does not exist
"""
for comment in self.comment_data:
logging.info(f"Checking comment {comment}")
if (
comment["author"]["login"] == "github-actions"
and BOT_COMMENT_START in comment["body"]
):
logging.info("Found existing comment")
return comment
logging.info("No existing comment found")
return None
def find_existing_body(self) -> Dict[str, str]:
"""
Find existing dynamic bullet point items
"""
existing_comment = self.find_bot_comment()
if existing_comment is None:
logging.info(f"No existing comment while searching for body items")
return {}
matches = re.findall(
r"<!--bot-comment-([a-z][a-z-]+)-start-->([\S\s]*?)<!--bot-comment-([a-z-]+)-end-->",
existing_comment["body"],
flags=re.MULTILINE,
)
logging.info(f"Fetch body item matches: {matches}")
items = {}
for start, text, end in matches:
if start != end:
raise RuntimeError(
f"Malformed comment found: {start} marker did not have matching end, found instead {end}"
)
items[start] = text.strip().lstrip("* ")
logging.info(f"Found body items: {items}")
return items
def _post_comment(self, body_items: Dict[str, str]):
comment = BOT_COMMENT_START + "\n\n" + WELCOME_TEXT + "\n\n"
for key, content in body_items.items():
line = self.start_key(key) + "\n * " + content.strip() + self.end_key(key)
logging.info(f"Adding line {line}")
comment += line
comment += "\n\n<sub>Generated by [tvm-bot](https://github.com/apache/tvm/blob/main/ci/README.md#github-actions)</sub>"
data = {"body": comment}
url = f"issues/{self.pr_number}/comments"
logging.info(f"Commenting {comment} on {url}")
if self.author not in self.ALLOWLIST_USERS:
logging.info(f"Skipping comment for author {self.author}")
return
existing_comment = self.find_bot_comment()
if existing_comment is None:
# Comment does not exist, post it
r = self.github.post(url, data)
else:
# Comment does exist, update it
comment_url = f"issues/comments/{existing_comment['databaseId']}"
r = self.github.patch(comment_url, data)
logging.info(f"Got response from posting comment: {r}")
def start_key(self, key: str) -> str:
return f"<!--bot-comment-{key}-start-->"
def end_key(self, key: str) -> str:
return f"<!--bot-comment-{key}-end-->"
def post_items(self, items: List[Tuple[str, str]]):
"""
Update or post bullet points in the PR based on 'items' which is a
list of (key, text) pairs
"""
# Find the existing bullet points
body_items = self.find_existing_body()
# Add or update the requested items
for key, text in items:
if text is None or text.strip() == "":
logging.info(f"Skipping {key} since it was empty")
continue
logging.info(f"Updating comment items {key} with {text}")
body_items[key] = text.strip()
# Post or update the comment
# print(body_items)
self._post_comment(body_items=body_items)
| 5,455 | 38.251799 | 317 | py |
tvm | tvm-main/ci/scripts/github/github_tvmbot.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import json
import argparse
import sys
import warnings
import logging
import traceback
import re
from typing import Dict, Any, List, Optional, Callable, Union
from pathlib import Path
# Hackery to enable importing of utils from ci/scripts/jenkins
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
sys.path.append(str(REPO_ROOT / "ci" / "scripts" / "jenkins"))
from git_utils import git, GitHubRepo, parse_remote, post
from cmd_utils import init_log
Review = Dict[str, Any]
CIJob = Dict[str, Any]
Comment = Dict[str, Any]
CommentChecker = Callable[[Comment], bool]
EXPECTED_JOBS = ["tvm-ci/pr-head"]
TVM_BOT_JENKINS_TOKEN = os.environ["TVM_BOT_JENKINS_TOKEN"]
GH_ACTIONS_TOKEN = os.environ["GH_ACTIONS_TOKEN"]
JENKINS_URL = "https://ci.tlcpack.ai/"
THANKS_MESSAGE = r"(\s*)Thanks for contributing to TVM! Please refer to guideline https://tvm.apache.org/docs/contribute/ for useful information and tips. After the pull request is submitted, please request code reviews from \[Reviewers\]\(https://github.com/apache/incubator-tvm/blob/master/CONTRIBUTORS.md#reviewers\) by them in the pull request thread.(\s*)"
def to_json_str(obj: Any) -> str:
return json.dumps(obj, indent=2)
COLLABORATORS_QUERY = """
query ($owner: String!, $name: String!, $user: String!) {
repository(owner: $owner, name: $name) {
collaborators(query: $user, first: 100) {
nodes {
login
}
}
}
}
"""
MENTIONABLE_QUERY = """
query ($owner: String!, $name: String!, $user: String!) {
repository(owner: $owner, name: $name) {
mentionableUsers(query: $user, first: 100) {
nodes {
login
}
}
}
}
"""
PR_QUERY = """
query ($owner: String!, $name: String!, $number: Int!) {
repository(owner: $owner, name: $name) {
pullRequest(number: $number) {
title
body
state
author {
login
}
comments(last: 100) {
pageInfo {
hasPreviousPage
}
nodes {
authorAssociation
author {
login
}
id
updatedAt
body
}
}
authorCommits:commits(last:100) {
nodes {
commit {
authors(first:100) {
nodes {
name
email
}
}
}
}
}
commits(last: 1) {
nodes {
commit {
oid
statusCheckRollup {
contexts(first: 100) {
pageInfo {
hasNextPage
}
nodes {
... on CheckRun {
name
databaseId
checkSuite {
workflowRun {
databaseId
workflow {
name
}
}
}
status
conclusion
url
}
... on StatusContext {
state
context
targetUrl
}
}
}
}
}
}
}
reviewDecision
reviews(last: 100) {
pageInfo {
hasPreviousPage
}
nodes {
body
updatedAt
url
id
authorCanPushToRepository
commit {
oid
}
author {
login
}
state
}
}
}
}
}
"""
def walk(obj, visitor, parent_key=None):
"""
Recursively call 'visitor' on all the children of a dictionary
"""
visitor(obj, parent_key)
if isinstance(obj, dict):
for k, v in obj.items():
walk(v, visitor, parent_key=k)
elif isinstance(obj, list):
for v in obj:
walk(v, visitor)
class PR:
def __init__(
self,
number: int,
owner: str,
repo: str,
dry_run: bool = False,
raw_data: Dict[str, Any] = None,
):
self.owner = owner
self.number = number
self.repo_name = repo
self.dry_run = dry_run
self.has_error = False
if dry_run and raw_data:
# In test mode there is no need to fetch anything
self.raw = raw_data
self.github = None
else:
self.github = GitHubRepo(user=owner, repo=repo, token=os.environ["GITHUB_TOKEN"])
if os.getenv("DEBUG", "0") == "1":
# For local runs fill in the requested data but cache it for
# later use
cached_path = Path("pr.json")
if not cached_path.exists():
self.raw = self.fetch_data()
with open(cached_path, "w") as f:
json.dump(self.raw, f, indent=2)
else:
with open(cached_path) as f:
self.raw = json.load(f)
else:
# Usual path, fetch the PR's data based on the number from
# GitHub
self.raw = self.fetch_data()
def checker(obj, parent_key):
"""
Verify that any paged results don't have extra data (if so the bot
may still work since most relevant comments will be more recent)
"""
if parent_key == "pageInfo":
if obj.get("hasPreviousPage", False):
warnings.warn(f"Found {obj} with a previous page, bot may be missing data")
if obj.get("hasNextPage", False):
warnings.warn(f"Found {obj} with a next page, bot may be missing data")
walk(self.raw, checker)
logging.info(f"Verified data, running with PR {to_json_str(self.raw)}")
def __repr__(self):
return json.dumps(self.raw, indent=2)
def react(self, comment: Dict[str, Any], content: str):
"""
React with a thumbs up to a comment
"""
url = f"issues/comments/{comment['id']}/reactions"
data = {"content": content}
if self.dry_run:
logging.info(f"Dry run, would have +1'ed to {url} with {data}")
else:
self.github.post(url, data=data)
def head_commit(self):
return self.raw["commits"]["nodes"][0]["commit"]
def co_authors(self) -> List[str]:
authors = []
for commit in self.raw["authorCommits"]["nodes"]:
# Co-authors always come after the main author according to the
# GitHub docs, so ignore the first item
for author in commit["commit"]["authors"]["nodes"][1:]:
name = author["name"]
email = author["email"]
authors.append(f"{name} <{email}>")
return list(set(authors))
def head_oid(self):
return self.head_commit()["oid"]
def ci_jobs(self) -> List[CIJob]:
"""
Get a list of all CI jobs (GitHub Actions and other) in a unified format
"""
jobs = []
for item in self.head_commit()["statusCheckRollup"]["contexts"]["nodes"]:
if "checkSuite" in item:
# GitHub Actions job, parse separately
status = item["conclusion"]
if status is None:
# If the 'conclusion' isn't filled out the job hasn't
# finished yet
status = "PENDING"
workflow_name = item["checkSuite"]["workflowRun"]["workflow"]["name"]
if workflow_name != "CI":
# Ignore all jobs that aren't in the main.yml workflow (these are mostly
# automation jobs that run on PRs for tagging / reviews)
continue
check_name = item["name"]
jobs.append(
{
"name": f"{workflow_name} / {check_name}",
"url": item["url"],
"status": status.upper(),
}
)
else:
# GitHub Status (e.g. from Jenkins)
jobs.append(
{
"name": item["context"],
"url": item["targetUrl"],
"status": item["state"].upper(),
}
)
logging.info(f"Found CI jobs for {self.head_commit()['oid']} {to_json_str(jobs)}")
return jobs
def reviews(self) -> List[Review]:
return self.raw["reviews"]["nodes"]
def head_commit_reviews(self) -> List[Review]:
"""
Find reviews associated with the head commit
"""
commits_to_review_status: Dict[str, List[Review]] = {}
for review in self.reviews():
if not review["authorCanPushToRepository"]:
# ignore reviews from non-committers
continue
oid = review["commit"]["oid"]
if oid in commits_to_review_status:
commits_to_review_status[oid].append(review)
else:
commits_to_review_status[oid] = [review]
# Only use the data for the head commit of the PR
head_reviews = commits_to_review_status.get(self.head_oid(), [])
return head_reviews
def fetch_data(self):
"""
Fetch the data for this PR from GitHub
"""
return self.github.graphql(
query=PR_QUERY,
variables={
"owner": self.owner,
"name": self.repo_name,
"number": self.number,
},
)["data"]["repository"]["pullRequest"]
def search_collaborator(self, user: str) -> List[Dict[str, Any]]:
"""
Query GitHub for collaborators matching 'user'
"""
return self.search_users(user, COLLABORATORS_QUERY)["collaborators"]["nodes"]
def search_users(self, user: str, query: str) -> List[Dict[str, Any]]:
return self.github.graphql(
query=query,
variables={
"owner": self.owner,
"name": self.repo_name,
"user": user,
},
)["data"]["repository"]
def search_mentionable_users(self, user: str) -> List[Dict[str, Any]]:
return self.search_users(user, MENTIONABLE_QUERY)["mentionableUsers"]["nodes"]
def comment(self, text: str) -> None:
"""
Leave the comment 'text' on this PR
"""
logging.info(f"Commenting:\n{text}")
# TODO: Update latest comment in-place if there has been no activity
data = {"body": text}
url = f"issues/{self.number}/comments"
if self.dry_run:
logging.info(
f"Dry run, would have commented on url={url} commenting with data={to_json_str(data)}"
)
return
self.github.post(url, data=data)
def state(self) -> str:
"""
PR state (OPEN, CLOSED, MERGED, etc)
"""
return self.raw["state"]
def processed_body(self) -> str:
body = self.raw["body"].strip().replace("\r", "")
# Remove any @-mentions of people
body = re.sub(r"(\s)@", "\g<1>", body)
# Remove the auto-inserted text since it's not useful to have in the commit log
body = re.sub(THANKS_MESSAGE, "\n\n", body)
return body.strip()
def body_with_co_authors(self) -> str:
"""
Add 'Co-authored-by' strings to the PR body based on the prior commits
in the PR
"""
body = self.processed_body()
author_lines = self.co_authors()
logging.info(f"Found co-authors: author_lines={author_lines}")
full_author_lines = [f"Co-authored-by: {author_line}" for author_line in author_lines]
authors_to_add = []
for author_line in author_lines:
if author_line not in body:
authors_to_add.append(f"Co-authored-by: {author_line}")
if len(authors_to_add) > 0:
# If the line isn't already in the PR body (it could have been
# added manually), put it in
full_author_text = "\n".join(authors_to_add)
body = f"{body}\n\n{full_author_text}"
return body
def merge(self) -> None:
"""
Request a merge of this PR via the GitHub API
"""
url = f"pulls/{self.number}/merge"
title = self.raw["title"] + f" (#{self.number})"
body = self.body_with_co_authors()
logging.info(f"Full commit:\n{title}\n\n{body}")
data = {
"commit_title": title,
"commit_message": body,
# The SHA is necessary in case there was an update right when this
# script ran, GitHub will sort out who won
"sha": self.head_oid(),
"merge_method": "squash",
}
if self.dry_run:
logging.info(f"Dry run, would have merged with url={url} and data={to_json_str(data)}")
return
r = self.github.put(url, data=data)
logging.info(f"GitHub merge response: {r}")
return r
def author(self) -> str:
return self.raw["author"]["login"]
def find_failed_ci_jobs(self) -> List[CIJob]:
# NEUTRAL is GitHub Action's way of saying cancelled
return [
job
for job in self.ci_jobs()
if job["status"] not in {"SUCCESS", "SUCCESSFUL", "SKIPPED"}
]
def find_missing_expected_jobs(self) -> List[str]:
# Map of job name: has seen in completed jobs
seen_expected_jobs = {name: False for name in EXPECTED_JOBS}
logging.info(f"Expected to see jobs: {seen_expected_jobs}")
missing_expected_jobs = []
for job in self.ci_jobs():
seen_expected_jobs[job["name"]] = True
for name, seen in seen_expected_jobs.items():
if not seen:
missing_expected_jobs.append(name)
return missing_expected_jobs
def trigger_gha_ci(self, sha: str) -> None:
logging.info(f"POST-ing a workflow_dispatch event to main.yml")
actions_github = GitHubRepo(
user=self.github.user, repo=self.github.repo, token=GH_ACTIONS_TOKEN
)
r = actions_github.post(
url="actions/workflows/main.yml/dispatches",
data={
"ref": "main",
},
)
logging.info(f"Successful workflow_dispatch: {r}")
def merge_if_passed_checks(self) -> Optional[Dict[str, Any]]:
failed_ci_jobs = self.find_failed_ci_jobs()
all_ci_passed = len(failed_ci_jobs) == 0
has_one_approval = False
if not all_ci_passed:
failed_jobs_msg = "\n".join(
[f" * [{job['name']} (`{job['status']}`)]({job['url']})" for job in failed_ci_jobs]
)
self.comment(
f"Cannot merge, these CI jobs are not successful on {self.head_oid()}:\n{failed_jobs_msg}"
)
return None
missing_expected_jobs = self.find_missing_expected_jobs()
if len(missing_expected_jobs) > 0:
missing_jobs_msg = "\n".join([f" * `{name}`" for name in missing_expected_jobs])
self.comment(f"Cannot merge, missing expected jobs:\n{missing_jobs_msg}")
return None
head_commit_reviews = self.head_commit_reviews()
for review in head_commit_reviews:
if review["state"] == "CHANGES_REQUESTED":
self.comment(
f"Cannot merge, found [this review]({review['url']}) on {self.head_oid()} with changes requested"
)
return None
if review["state"] == "APPROVED":
has_one_approval = True
logging.info(f"Found approving review: {to_json_str(review)}")
if has_one_approval and all_ci_passed:
return self.merge()
elif not has_one_approval:
self.comment(
f"Cannot merge, did not find any approving reviews from users with write access on {self.head_oid()}"
)
return None
elif not all_ci_passed:
self.comment(f"Cannot merge, CI did not pass on on {self.head_oid()}")
return None
def rerun_jenkins_ci(self) -> None:
job_names = [
"tvm-arm",
"tvm-cortexm",
"tvm-cpu",
"tvm-docker",
"tvm-gpu",
"tvm-hexagon",
"tvm-i386",
"tvm-lint",
"tvm-minimal",
"tvm-minimal-cross-isa",
"tvm-riscv",
"tvm-wasm",
]
for name in job_names:
url = JENKINS_URL + f"job/{name}/job/PR-{self.number}/buildWithParameters"
logging.info(f"Rerunning ci with URL={url}")
if self.dry_run:
logging.info("Dry run, not sending POST")
else:
post(url, auth=("tvm-bot", TVM_BOT_JENKINS_TOKEN))
def rerun_github_actions(self) -> None:
workflow_ids = []
for item in self.head_commit()["statusCheckRollup"]["contexts"]["nodes"]:
if "checkSuite" in item and item["conclusion"] == "FAILURE":
workflow_id = item["checkSuite"]["workflowRun"]["databaseId"]
workflow_ids.append(workflow_id)
workflow_ids = list(set(workflow_ids))
logging.info(f"Rerunning GitHub Actions workflows with IDs: {workflow_ids}")
if self.dry_run:
actions_github = None
else:
actions_github = GitHubRepo(
user=self.github.user, repo=self.github.repo, token=GH_ACTIONS_TOKEN
)
for workflow_id in workflow_ids:
if self.dry_run:
logging.info(f"Dry run, not restarting workflow {workflow_id}")
else:
try:
actions_github.post(f"actions/runs/{workflow_id}/rerun-failed-jobs", data={})
except RuntimeError as e:
logging.exception(e)
# Ignore errors about jobs that are part of the same workflow to avoid
# having to figure out which jobs are in which workflows ahead of time
if "The workflow run containing this job is already running" in str(e):
pass
else:
raise e
def comment_failure(self, msg: str, exceptions: Union[Exception, List[Exception]]):
if not isinstance(exceptions, list):
exceptions = [exceptions]
logging.info(f"Failed, commenting {exceptions}")
# Extract all the traceback strings
for item in exceptions:
try:
raise item
except Exception:
item.exception_msg = traceback.format_exc()
comment = f"{msg} in {args.run_url}\n\n"
for exception in exceptions:
comment += f"<details>\n\n```\n{exception.exception_msg}\n```\n\n"
if hasattr(exception, "read"):
comment += f"with response\n\n```\n{exception.read().decode()}\n```\n\n"
comment += "</details>"
pr.comment(comment)
pr.has_error = True
return exception
def check_author(pr, triggering_comment, args):
comment_author = triggering_comment["user"]["login"]
if pr.author() == comment_author:
logging.info("Comment user is PR author, continuing")
return True
return False
def search_users(name, triggering_comment, testing_json, search_fn):
logging.info(f"Checking {name}")
commment_author = triggering_comment["user"]["login"]
if testing_json:
matching_users = json.loads(testing_json)
else:
matching_users = search_fn(commment_author)
logging.info(f"Found {name}: {matching_users}")
user_names = {user["login"] for user in matching_users}
return len(matching_users) > 0 and commment_author in user_names
def check_collaborator(pr, triggering_comment, args):
return search_users(
name="collaborators",
triggering_comment=triggering_comment,
search_fn=pr.search_collaborator,
testing_json=args.testing_collaborators_json,
)
def check_mentionable_users(pr, triggering_comment, args):
return search_users(
name="mentionable users",
triggering_comment=triggering_comment,
search_fn=pr.search_mentionable_users,
testing_json=args.testing_mentionable_users_json,
)
AUTH_CHECKS = {
"mentionable_users": check_mentionable_users,
"collaborators": check_collaborator,
"author": check_author,
}
# Stash the keys so they're accessible from the values
AUTH_CHECKS = {k: (k, v) for k, v in AUTH_CHECKS.items()}
class Merge:
triggers = [
"merge",
"merge this",
"merge this pr",
]
auth = [AUTH_CHECKS["collaborators"], AUTH_CHECKS["author"]]
@staticmethod
def run(pr: PR):
info = None
try:
info = pr.merge_if_passed_checks()
except Exception as e:
pr.comment_failure("Failed to process merge request", e)
raise e
if info is not None:
try:
pr.trigger_gha_ci(sha=info["sha"])
except Exception as e:
pr.comment_failure("Failed to trigger GitHub Actions", e)
raise e
class Rerun:
triggers = [
"rerun",
"rerun ci",
"re-run",
"re-run ci",
"run",
"run ci",
]
auth = [AUTH_CHECKS["mentionable_users"]]
@staticmethod
def run(pr: PR):
errors = []
try:
pr.rerun_jenkins_ci()
except Exception as e:
logging.exception(e)
errors.append(e)
try:
pr.rerun_github_actions()
except Exception as e:
logging.exception(e)
errors.append(e)
if len(errors) > 0:
pr.comment_failure("Failed to re-run CI", errors)
if __name__ == "__main__":
help = "Check if a PR has comments trying to merge it, and do so based on reviews/CI status"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--pr", required=True, help="pr number to check")
parser.add_argument("--run-url", required=True, help="workflow run URL")
parser.add_argument(
"--trigger-comment-json", required=True, help="json of the comment that triggered this run"
)
parser.add_argument("--testing-pr-json", help="(testing only) manual data for testing")
parser.add_argument(
"--testing-collaborators-json", help="(testing only) manual data for testing"
)
parser.add_argument(
"--testing-mentionable-users-json", help="(testing only) manual data for testing"
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="run but don't send any request to GitHub",
)
args = parser.parse_args()
init_log()
comment = json.loads(args.trigger_comment_json)
body = comment["body"].strip()
# Check that the comment was addressed to tvm-bot
if not body.startswith("@tvm-bot "):
logging.info(f"Not a bot comment, '{body}' does not start with '@tvm-bot'")
exit(0)
# Find the code to run for the command from the user
user_command = body.lstrip("@tvm-bot").strip()
command_to_run = None
for command in [Merge, Rerun]:
if user_command in command.triggers:
command_to_run = command
break
if command_to_run is None:
logging.info(f"Command '{user_command}' did not match anything")
exit(0)
# Find the remote for querying more data about the PR
remote = git(["config", "--get", f"remote.{args.remote}.url"])
logging.info(f"Using remote remote={remote}")
owner, repo = parse_remote(remote)
if args.pr.strip() == "":
logging.info("No PR number passed")
exit(0)
logging.info(f"Checking owner={owner} repo={repo}")
if args.testing_pr_json:
pr = PR(
number=int(args.pr),
owner=owner,
repo=repo,
dry_run=args.dry_run,
raw_data=json.loads(args.testing_pr_json),
)
else:
pr = PR(number=int(args.pr), owner=owner, repo=repo, dry_run=args.dry_run)
for name, check in command_to_run.auth:
if check(pr, comment, args):
logging.info(f"Passed auth check '{name}', continuing")
# Only one authorization check needs to pass (e.g. just mentionable
# or PR author), not all of them so quit
break
else:
logging.info(f"Failed auth check '{name}', quitting")
# Add a sad face
pr.react(comment, "confused")
exit(0)
# Acknowledge the comment with a react
pr.react(comment, "+1")
state = pr.state()
if state != "OPEN":
logging.info(f"Ignoring event on PR, state was not OPEN, instead was state={state}")
exit(0)
# Run the command
command_to_run.run(pr)
if pr.has_error:
raise RuntimeError("PR commented a failure")
| 26,749 | 32.188586 | 364 | py |
tvm | tvm-main/ci/scripts/github/update_branch.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import json
import argparse
import sys
from pathlib import Path
from typing import Any, Dict
# Hackery to enable importing of utils from ci/scripts/jenkins
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
sys.path.append(str(REPO_ROOT / "ci" / "scripts" / "jenkins"))
from git_utils import git, GitHubRepo, parse_remote
_commit_query_fields = """
messageHeadline
oid
statusCheckRollup {
contexts(last:100) {
nodes {
... on CheckRun {
conclusion
status
name
checkSuite {
workflowRun {
workflow {
name
}
}
}
}
... on StatusContext {
context
state
}
}
}
}
"""
def commits_query(user: str, repo: str, cursor: str = None):
"""
Create a GraphQL query to find the last N commits along with their statuses
and some metadata (paginated after 'cursor')
"""
after = ""
if cursor is not None:
after = f', after:"{cursor}"'
return f"""
{{
repository(name: "{repo}", owner: "{user}") {{
defaultBranchRef {{
target {{
... on Commit {{
history(first: 15{after}) {{
edges {{ cursor }}
nodes {{
{_commit_query_fields}
}}
}}
}}
}}
}}
}}
}}
"""
EXPECTED_CI_JOBS = [
"cross-isa-minimal/branch",
"gpu/branch",
"hexagon/branch",
"arm/branch",
"cortexm/branch",
"cpu/branch",
"docker/branch",
"i386/branch",
"lint/branch",
"minimal/branch",
"riscv/branch",
"wasm/branch",
]
def commit_passed_ci(commit: Dict[str, Any]) -> bool:
"""
Returns true if all of a commit's statuses are SUCCESS
"""
statuses = commit["statusCheckRollup"]["contexts"]["nodes"]
# GitHub Actions statuses are different from external GitHub statuses, so
# unify them into 1 representation
# https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads
unified_statuses = []
for status in statuses:
if "context" in status:
# Parse non-GHA status
unified_statuses.append((status["context"], status["state"] == "SUCCESS"))
else:
# Parse GitHub Actions item
workflow = status["checkSuite"]["workflowRun"]["workflow"]["name"]
name = f"{workflow} / {status['name']}"
unified_statuses.append((name, status["conclusion"] == "SUCCESS"))
print(f"Statuses on {commit['oid']}:", json.dumps(unified_statuses, indent=2))
# Assert that specific jobs are present in the commit statuses (i.e. don't
# approve if CI was broken and didn't schedule a job)
job_names = {name for name, status in unified_statuses}
for job in EXPECTED_CI_JOBS:
if job not in job_names:
# Did not find expected job name
return False
passed_ci = all(status for name, status in unified_statuses)
return passed_ci
def update_branch(user: str, repo: str, sha: str, branch_name: str) -> None:
git(["fetch", "origin", sha])
git(["reset", "--hard", "FETCH_HEAD"])
try:
git(["branch", "-D", branch_name])
except RuntimeError:
# Ignore failures (i.e. the branch did not exist in the first place)
pass
git(["checkout", "-b", branch_name])
# Create and push the branch
git(["push", "origin", "--force", branch_name])
print(f"Pushed branch {branch_name} with commit {sha}")
if __name__ == "__main__":
help = "Push the a branch to the last commit that passed all CI runs"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--dry-run", action="store_true", help="don't submit to GitHub")
parser.add_argument("--branch", default="last-successful", help="branch name")
parser.add_argument(
"--testonly-json", help="(testing) data to use instead of fetching from GitHub"
)
args = parser.parse_args()
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
# TODO: Remove this before landing
user, repo = ("apache", "tvm")
if args.testonly_json:
r = json.loads(args.testonly_json)
else:
github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
q = commits_query(user, repo)
r = github.graphql(q)
commits = r["data"]["repository"]["defaultBranchRef"]["target"]["history"]["nodes"]
# Limit GraphQL pagination
MAX_COMMITS_TO_CHECK = 50
i = 0
while i < MAX_COMMITS_TO_CHECK:
# Check each commit
for commit in commits:
if commit_passed_ci(commit):
print(f"Found last good commit: {commit['oid']}: {commit['messageHeadline']}")
if not args.dry_run:
update_branch(
user=user,
repo=repo,
sha=commit["oid"],
branch_name=args.branch,
)
# Nothing to do after updating the branch, exit early
exit(0)
# No good commit found, proceed to next page of results
edges = r["data"]["repository"]["defaultBranchRef"]["target"]["history"]["edges"]
if len(edges) == 0:
break
else:
q = commits_query(user, repo, cursor=edges[-1]["cursor"])
r = github.graphql(q)
commits = r["data"]["repository"]["defaultBranchRef"]["target"]["history"]["nodes"]
# Backstop to prevent looking through all the past commits
i += len(commits)
print(f"No good commits found in the last {len(commits)} commits")
exit(1)
| 6,954 | 31.652582 | 100 | py |
tvm | tvm-main/ci/scripts/github/ping_reviewers.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import re
import datetime
import json
import sys
import textwrap
from pathlib import Path
from typing import List
# Hackery to enable importing of utils from ci/scripts/jenkins
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
sys.path.append(str(REPO_ROOT / "ci" / "scripts" / "jenkins"))
from git_utils import git, parse_remote
GIT_DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
def prs_query(user: str, repo: str, cursor: str = None):
after = ""
if cursor is not None:
after = f', before:"{cursor}"'
time_keys = "createdAt updatedAt lastEditedAt publishedAt"
return f"""
{{
repository(name: "{repo}", owner: "{user}") {{
pullRequests(states: [OPEN], last: 10{after}) {{
edges {{
cursor
}}
nodes {{
number
url
body
{time_keys}
isDraft
author {{
login
}}
reviews(last:100) {{
nodes {{
{time_keys}
bodyText
author {{ login }}
comments(last:100) {{
nodes {{
{time_keys}
bodyText
}}
}}
}}
}}
comments(last:100) {{
nodes {{
authorAssociation
bodyText
{time_keys}
author {{
login
}}
}}
}}
}}
}}
}}
}}
"""
def find_reviewers(body: str) -> List[str]:
matches = re.findall(r"(cc( @[-A-Za-z0-9]+)+)", body, flags=re.MULTILINE)
matches = [full for full, last in matches]
reviewers = []
for match in matches:
if match.startswith("cc "):
match = match.replace("cc ", "")
users = [x.strip() for x in match.split("@")]
reviewers += users
reviewers = set(x for x in reviewers if x != "")
return list(reviewers)
def check_pr(pr, wait_time, now):
last_action = None
author = pr["author"]["login"]
def update_last(new_time, description):
if isinstance(new_time, str):
new_time = datetime.datetime.strptime(new_time, GIT_DATE_FORMAT)
if new_time is None:
print(f" time not found: {description}")
return
nonlocal last_action
if last_action is None or new_time > last_action[0]:
last_action = (new_time, description)
def check_obj(obj, name):
update_last(obj["publishedAt"], f"{name} publishedAt: {obj}")
update_last(obj["updatedAt"], f"{name} updatedAt: {obj}")
update_last(obj["lastEditedAt"], f"{name} lastEditedAt: {obj}")
update_last(obj["createdAt"], f"{name} lastEditedAt: {obj}")
check_obj(pr, "pr")
# GitHub counts comments left as part of a review separately than standalone
# comments
reviews = pr["reviews"]["nodes"]
review_comments = []
for review in reviews:
review_comments += review["comments"]["nodes"]
check_obj(review, "review")
# Collate all comments
comments = pr["comments"]["nodes"] + review_comments
# Find the last date of any comment
for comment in comments:
check_obj(comment, "comment")
time_since_last_action = now - last_action[0]
# Find reviewers in the PR's body
pr_body_reviewers = find_reviewers(pr["body"])
# Pull out reviewers from any cc @... text in a comment
cc_reviewers = [find_reviewers(c["bodyText"]) for c in comments]
cc_reviewers = [r for revs in cc_reviewers for r in revs]
# Anyone that has left a review as a reviewer (this may include the PR
# author since their responses count as reviews)
review_reviewers = list(set(r["author"]["login"] for r in reviews))
reviewers = cc_reviewers + review_reviewers + pr_body_reviewers
reviewers = list(set(reviewers))
reviewers = [r for r in reviewers if r != author]
if time_since_last_action > wait_time:
print(
" Pinging reviewers",
reviewers,
"on",
pr["url"],
"since it has been",
time_since_last_action,
f"since anything happened on that PR (last action: {last_action[1]})",
)
return reviewers
else:
print(
f" Not pinging PR {pr['number']} since it has been only {time_since_last_action} since the last action: {last_action[1]}"
)
return None
def make_ping_message(pr, reviewers):
reviewers = [f"@{r}" for r in reviewers]
author = f'@{pr["author"]["login"]}'
text = (
"It has been a while since this PR was updated, "
+ " ".join(reviewers)
+ " please leave a review or address the outstanding comments. "
+ f"{author} if this PR is still a work in progress, please [convert it to a draft](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/changing-the-stage-of-a-pull-request#converting-a-pull-request-to-a-draft)"
" until it is ready for review."
)
return text
if __name__ == "__main__":
help = "Comment on languishing issues and PRs"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--wait-time-minutes", required=True, type=int, help="ssh remote to parse")
parser.add_argument("--cutoff-pr-number", default=0, type=int, help="ssh remote to parse")
parser.add_argument("--dry-run", action="store_true", help="don't update GitHub")
parser.add_argument("--pr-json", help="(testing) data for testing to use instead of GitHub")
parser.add_argument("--now", help="(testing) custom string for current time")
args = parser.parse_args()
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
wait_time = datetime.timedelta(minutes=int(args.wait_time_minutes))
cutoff_pr_number = int(args.cutoff_pr_number)
print(
"Running with:\n"
f" time cutoff: {wait_time}\n"
f" number cutoff: {cutoff_pr_number}\n"
f" dry run: {args.dry_run}\n"
f" user/repo: {user}/{repo}\n",
end="",
)
if args.pr_json:
r = json.loads(args.pr_json)
else:
q = prs_query(user, repo)
r = github.graphql(q)
now = datetime.datetime.utcnow()
if args.now:
now = datetime.datetime.strptime(args.now, GIT_DATE_FORMAT)
# Loop until all PRs have been checked
while True:
prs = r["data"]["repository"]["pullRequests"]["nodes"]
# Don't look at draft PRs at all
prs_to_check = []
for pr in prs:
if pr["isDraft"]:
print(f"Skipping #{pr['number']} since it's a draft")
elif pr["number"] <= cutoff_pr_number:
print(
f"Skipping #{pr['number']} since it's too old ({pr['number']} <= {cutoff_pr_number})"
)
else:
print(f"Checking #{pr['number']}")
prs_to_check.append(pr)
print(f"Summary: Checking {len(prs_to_check)} of {len(prs)} fetched")
# Ping reviewers on each PR in the response if necessary
for pr in prs_to_check:
print("Checking", pr["url"])
reviewers = check_pr(pr, wait_time, now)
if reviewers is not None:
message = make_ping_message(pr, reviewers)
if args.dry_run:
print(
f"Would have commented on #{pr['number']}:\n{textwrap.indent(message, prefix=' ')}"
)
else:
r = github.post(f"issues/{pr['number']}/comments", {"body": message})
print(r)
edges = r["data"]["repository"]["pullRequests"]["edges"]
if len(edges) == 0:
# No more results to check
break
cursor = edges[0]["cursor"]
r = github.graphql(prs_query(user, repo, cursor))
| 9,111 | 33.384906 | 291 | py |
tvm | tvm-main/ci/scripts/github/github_skipped_tests_comment.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
import json
import os
import logging
import subprocess
from xml.etree import ElementTree
from pathlib import Path
from typing import Dict, Any, Optional
def run_subprocess(command):
logging.info(f"Running command {command}")
proc = subprocess.run(command, shell=True, stdout=subprocess.PIPE, encoding="utf-8")
if proc.returncode != 0:
raise RuntimeError(f"Command failed {command}:\nstdout:\n{proc.stdout}")
return proc
def retrieve_test_report(s3_url, target_dir):
command = f"aws --region us-west-2 s3 cp {s3_url} {target_dir} --recursive --no-sign-request"
run_subprocess(command)
def get_common_commit_sha():
command = "git merge-base origin/main HEAD"
proc = run_subprocess(command)
return proc.stdout.strip()
def get_main_jenkins_build_number(github, common_commit):
json = github.get(f"commits/{common_commit}/status")
for status in reversed(json["statuses"]):
if status["context"] != "tvm-ci/branch":
continue
state = status["state"]
target_url = str(status["target_url"])
build_number = (
target_url[target_url.find("job/main") : len(target_url)]
.strip("job/main/")
.strip("/display/redirect")
)
assert build_number.isdigit()
return {"build_number": build_number, "state": state}
raise RuntimeError(f"Failed to find main build number for commit {common_commit}")
def retrieve_test_reports(
common_main_build, pr_number, build_number, s3_prefix, pr_test_report_dir, main_test_report_dir
):
cur_build_s3_link = (
f"s3://{s3_prefix}/tvm/PR-{str(pr_number)}/{str(build_number)}/pytest-results"
)
retrieve_test_report(cur_build_s3_link, pr_test_report_dir)
common_build_s3_link = f"s3://{s3_prefix}/tvm/main/{common_main_build}/pytest-results"
retrieve_test_report(common_build_s3_link, main_test_report_dir)
def get_pr_and_build_numbers(target_url):
target_url = target_url[target_url.find("PR-") : len(target_url)]
split = target_url.split("/")
pr_number = split[0].strip("PR-")
build_number = split[1]
return {"pr_number": pr_number, "build_number": build_number}
def build_test_set(directory):
directory = Path(directory)
subdir_to_skipped = {}
subdirs = [
item for item in os.listdir(directory) if os.path.isdir(os.path.join(directory, item))
]
for subdir in subdirs:
subdir_to_skipped[subdir] = set()
for root, _, files in os.walk(directory / subdir):
for file in files:
test_report = ElementTree.parse(Path(root) / file)
for testcase in test_report.iter("testcase"):
skipped = testcase.find("skipped")
if skipped is not None:
key = testcase.attrib["classname"] + "#" + testcase.attrib["name"]
subdir_to_skipped[subdir].add(key)
return subdir_to_skipped
def to_node_name(dir_name: str):
return dir_name.replace("_", ": ", 1)
def build_diff_comment_with_main(
common_commit_sha,
skipped_list,
commit_sha,
):
if len(skipped_list) == 0:
return f"No diff in skipped tests with main found in this branch for commit {commit_sha}.\n"
text = (
f"The list below shows tests that ran in main {common_commit_sha} but were "
f"skipped in the CI build of {commit_sha}:\n"
f"```\n"
)
for skip in skipped_list:
text += skip + "\n"
text += f"```\n"
return text
def build_comment(
common_commit_sha,
common_main_build,
skipped_list,
additional_skipped_list,
pr_number,
build_number,
commit_sha,
jenkins_prefix,
):
if common_main_build["state"] != "success":
return f"Unable to run tests bot because main failed to pass CI at {common_commit_sha}."
text = build_diff_comment_with_main(common_commit_sha, skipped_list, commit_sha)
if len(additional_skipped_list) != 0:
text += "\n"
text += (
f"Additional tests that were skipped in the CI build and present in the [`required_tests_to_run`]"
f"(https://github.com/apache/tvm/blob/main/ci/scripts/github/required_tests_to_run.json) file:"
f"\n```\n"
)
for skip in additional_skipped_list:
text += skip + "\n"
text += f"```\n"
text += (
f"A detailed report of ran tests is [here](https://{jenkins_prefix}/job/tvm/job/PR-{str(pr_number)}"
f"/{str(build_number)}/testReport/)."
)
return text
def find_target_url(pr_head: Dict[str, Any]):
for status in pr_head["statusCheckRollup"]["contexts"]["nodes"]:
if status.get("context", "") == "tvm-ci/pr-head":
return status["targetUrl"]
raise RuntimeError(f"Unable to find tvm-ci/pr-head status in {pr_head}")
def get_skipped_tests_comment(
pr: Dict[str, Any],
github,
s3_prefix: str = "tvm-jenkins-artifacts-prod",
jenkins_prefix: str = "ci.tlcpack.ai",
pr_test_report_dir: str = "pr-reports",
main_test_report_dir: str = "main-reports",
common_commit_sha: Optional[str] = None,
common_main_build: Optional[Dict[str, Any]] = None,
additional_tests_to_check_file: str = "required_tests_to_run.json",
) -> str:
pr_head = pr["commits"]["nodes"][0]["commit"]
target_url = find_target_url(pr_head)
pr_and_build = get_pr_and_build_numbers(target_url)
logging.info(f"Getting comment for {pr_head} with target {target_url}")
commit_sha = pr_head["oid"]
is_dry_run = common_commit_sha is not None
if not is_dry_run:
logging.info("Fetching common commit sha and build info")
common_commit_sha = get_common_commit_sha()
common_main_build = get_main_jenkins_build_number(github, common_commit_sha)
retrieve_test_reports(
common_main_build=common_main_build["build_number"],
pr_number=pr_and_build["pr_number"],
build_number=pr_and_build["build_number"],
s3_prefix=s3_prefix,
main_test_report_dir=main_test_report_dir,
pr_test_report_dir=pr_test_report_dir,
)
else:
logging.info("Dry run, expecting PR and main reports on disk")
main_tests = build_test_set(main_test_report_dir)
build_tests = build_test_set(pr_test_report_dir)
skipped_list = []
for subdir, skipped_set in build_tests.items():
skipped_main = main_tests[subdir]
if skipped_main is None:
logging.warning(f"Could not find directory {subdir} in main.")
continue
diff_set = skipped_set - skipped_main
if len(diff_set) != 0:
for test in diff_set:
skipped_list.append(f"{to_node_name(subdir)} -> {test}")
# Sort the list to maintain an order in the output. Helps when validating the output in tests.
skipped_list.sort()
if len(skipped_list) == 0:
logging.info("No skipped tests found.")
if not is_dry_run:
current_file = Path(__file__).resolve()
additional_tests_to_check_file = Path(current_file).parent / "required_tests_to_run.json"
logging.info(
f"Checking additional tests in file {additional_tests_to_check_file} are not skipped."
)
try:
with open(additional_tests_to_check_file, "r") as f:
additional_tests_to_check = json.load(f)
except IOError:
logging.info(
f"Failed to read additional tests from file: {additional_tests_to_check_file}."
)
additional_tests_to_check = {}
# Assert that tests present in "required_tests_to_run.json" are not skipped.
additional_skipped_tests = []
for subdir, test_set in additional_tests_to_check.items():
if subdir not in build_tests.keys():
logging.warning(f"Could not find directory {subdir} in the build test set.")
continue
for test in test_set:
if test in build_tests[subdir]:
additional_skipped_tests.append(f"{to_node_name(subdir)} -> {test}")
if len(additional_skipped_tests) == 0:
logging.info("No skipped tests found in the additional list.")
body = build_comment(
common_commit_sha,
common_main_build,
skipped_list,
additional_skipped_tests,
pr_and_build["pr_number"],
pr_and_build["build_number"],
commit_sha,
jenkins_prefix,
)
return body
| 9,334 | 34.093985 | 110 | py |
tvm | tvm-main/ci/scripts/github/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Package to enable testing of Github scripts"""
from . import github_skipped_tests_comment, github_pr_comment, github_tag_teams, github_docs_comment
| 937 | 45.9 | 100 | py |
tvm | tvm-main/ci/scripts/github/github_pr_comment.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import os
import json
import sys
from pathlib import Path
# Hackery to enable importing of utils from ci/scripts/jenkins
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
sys.path.append(str(REPO_ROOT / "ci" / "scripts" / "jenkins"))
from git_utils import git, GitHubRepo, parse_remote, DRY_RUN
from cmd_utils import init_log
from github_commenter import BotCommentBuilder
from github_skipped_tests_comment import get_skipped_tests_comment
from github_tag_teams import get_tags
from github_docs_comment import get_doc_url
PR_QUERY = """
query ($owner: String!, $name: String!, $number: Int!) {
repository(owner: $owner, name: $name) {
pullRequest(number: $number) {
title
body
state
number
author {
login
}
labels(first:100) {
nodes {
name
}
}
comments(last: 100) {
pageInfo {
hasPreviousPage
}
nodes {
author {
login
}
databaseId
body
}
}
commits(last: 1) {
nodes {
commit {
oid
statusCheckRollup {
contexts(first: 100) {
pageInfo {
hasNextPage
}
nodes {
... on StatusContext {
state
context
targetUrl
}
}
}
}
}
}
}
}
}
}
"""
if __name__ == "__main__":
help = "Comment a welcome message on PRs"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--pr", required=True)
parser.add_argument("--test-data", help="(testing) mock GitHub API data")
parser.add_argument("--test-comments", help="(testing) testing comments")
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="run but don't send any request to GitHub",
)
args = parser.parse_args()
init_log()
remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)
test_data = None
if args.test_data is not None:
test_data = json.loads(args.test_data)
github = GitHubRepo(
user=user,
repo=repo,
token=DRY_RUN if args.dry_run else os.environ["GITHUB_TOKEN"],
test_data=test_data,
)
pr_data = github.graphql(
PR_QUERY,
{
"owner": user,
"name": repo,
"number": int(args.pr),
},
)
pr_data = pr_data["data"]["repository"]["pullRequest"]
commenter = BotCommentBuilder(github=github, data=pr_data)
if args.test_comments is not None:
test_comments = json.loads(args.test_comments)
skipped_tests = test_comments["skipped-tests"]
ccs = test_comments["ccs"]
docs_info = test_comments["docs"]
else:
skipped_tests = get_skipped_tests_comment(pr_data, github=github)
ccs = get_tags(pr_data, github, team_issue=10317)
docs_info = get_doc_url(pr_data)
items = {
"ccs": ccs,
"skipped-tests": skipped_tests,
"docs": docs_info,
}
commenter.post_items(items=items.items())
| 4,417 | 28.851351 | 81 | py |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/setup.py | from setuptools import setup, find_packages
setup(name='joint_extr', version='1.0', packages=find_packages()) | 110 | 36 | 65 | py |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/common/utils.py | import re
from tqdm import tqdm
from IPython.core.debugger import set_trace
import copy
import json
import os
class DefaultLogger:
def __init__(self, log_path, project, run_name, run_id, hyperparameter):
self.log_path = log_path
log_dir = "/".join(self.log_path.split("/")[:-1])
if not os.path.exists(log_dir):
os.makedirs(log_dir)
self.run_id = run_id
self.log("============================================================================")
self.log("project: {}, run_name: {}, run_id: {}\n".format(project, run_name, run_id))
hyperparameters_format = "--------------hypter_parameters------------------- \n{}\n-----------------------------------------"
self.log(hyperparameters_format.format(json.dumps(hyperparameter, indent = 4)))
def log(self, text):
text = "run_id: {}, {}".format(self.run_id, text)
print(text)
open(self.log_path, "a", encoding = "utf-8").write("{}\n".format(text))
class Preprocessor:
'''
1. transform the dataset to normal format, which can fit in our codes
2. add token level span to all entities in the relations, which will be used in tagging phase
'''
def __init__(self, tokenize_func, get_tok2char_span_map_func):
self._tokenize = tokenize_func
self._get_tok2char_span_map = get_tok2char_span_map_func
def transform_data(self, data, ori_format, dataset_type, add_id = True):
'''
This function can only deal with three original format used in the previous works.
If you want to feed new dataset to the model, just define your own function to transform data.
data: original data
ori_format: "casrel", "joint_re", "raw_nyt"
dataset_type: "train", "valid", "test"; only for generate id for the data
'''
normal_sample_list = []
for ind, sample in tqdm(enumerate(data), desc = "Transforming data format"):
if ori_format == "casrel":
text = sample["text"]
rel_list = sample["triple_list"]
subj_key, pred_key, obj_key = 0, 1, 2
elif ori_format == "etl_span":
text = " ".join(sample["tokens"])
rel_list = sample["spo_list"]
subj_key, pred_key, obj_key = 0, 1, 2
elif ori_format == "raw_nyt":
text = sample["sentText"]
rel_list = sample["relationMentions"]
subj_key, pred_key, obj_key = "em1Text", "label", "em2Text"
normal_sample = {
"text": text,
}
if add_id:
normal_sample["id"] = "{}_{}".format(dataset_type, ind)
normal_rel_list = []
for rel in rel_list:
normal_rel = {
"subject": rel[subj_key],
"predicate": rel[pred_key],
"object": rel[obj_key],
}
normal_rel_list.append(normal_rel)
normal_sample["relation_list"] = normal_rel_list
normal_sample_list.append(normal_sample)
return self._clean_sp_char(normal_sample_list)
def split_into_short_samples(self, sample_list, max_seq_len, sliding_len = 50, encoder = "BERT", data_type = "train"):
new_sample_list = []
for sample in tqdm(sample_list, desc = "Splitting into subtexts"):
text_id = sample["id"]
text = sample["text"]
tokens = self._tokenize(text)
tok2char_span = self._get_tok2char_span_map(text)
# sliding at token level
split_sample_list = []
for start_ind in range(0, len(tokens), sliding_len):
if encoder == "BERT": # if use bert, do not split a word into two samples
while "##" in tokens[start_ind]:
start_ind -= 1
end_ind = start_ind + max_seq_len
char_span_list = tok2char_span[start_ind:end_ind]
char_level_span = [char_span_list[0][0], char_span_list[-1][1]]
sub_text = text[char_level_span[0]:char_level_span[1]]
new_sample = {
"id": text_id,
"text": sub_text,
"tok_offset": start_ind,
"char_offset": char_level_span[0],
}
if data_type == "test": # test set
if len(sub_text) > 0:
split_sample_list.append(new_sample)
else: # train or valid dataset, only save spo and entities in the subtext
# spo
sub_rel_list = []
for rel in sample["relation_list"]:
subj_tok_span = rel["subj_tok_span"]
obj_tok_span = rel["obj_tok_span"]
# if subject and object are both in this subtext, add this spo to new sample
if subj_tok_span[0] >= start_ind and subj_tok_span[1] <= end_ind \
and obj_tok_span[0] >= start_ind and obj_tok_span[1] <= end_ind:
new_rel = copy.deepcopy(rel)
new_rel["subj_tok_span"] = [subj_tok_span[0] - start_ind, subj_tok_span[1] - start_ind] # start_ind: tok level offset
new_rel["obj_tok_span"] = [obj_tok_span[0] - start_ind, obj_tok_span[1] - start_ind]
new_rel["subj_char_span"][0] -= char_level_span[0] # char level offset
new_rel["subj_char_span"][1] -= char_level_span[0]
new_rel["obj_char_span"][0] -= char_level_span[0]
new_rel["obj_char_span"][1] -= char_level_span[0]
sub_rel_list.append(new_rel)
# entity
sub_ent_list = []
for ent in sample["entity_list"]:
tok_span = ent["tok_span"]
# if entity in this subtext, add the entity to new sample
if tok_span[0] >= start_ind and tok_span[1] <= end_ind:
new_ent = copy.deepcopy(ent)
new_ent["tok_span"] = [tok_span[0] - start_ind, tok_span[1] - start_ind]
new_ent["char_span"][0] -= char_level_span[0]
new_ent["char_span"][1] -= char_level_span[0]
sub_ent_list.append(new_ent)
# event
if "event_list" in sample:
sub_event_list = []
for event in sample["event_list"]:
trigger_tok_span = event["trigger_tok_span"]
if trigger_tok_span[1] > end_ind or trigger_tok_span[0] < start_ind:
continue
new_event = copy.deepcopy(event)
new_arg_list = []
for arg in new_event["argument_list"]:
if arg["tok_span"][0] >= start_ind and arg["tok_span"][1] <= end_ind:
new_arg_list.append(arg)
new_event["argument_list"] = new_arg_list
sub_event_list.append(new_event)
new_sample["event_list"] = sub_event_list # maybe empty
new_sample["entity_list"] = sub_ent_list # maybe empty
new_sample["relation_list"] = sub_rel_list # maybe empty
split_sample_list.append(new_sample)
# all segments covered, no need to continue
if end_ind > len(tokens):
break
new_sample_list.extend(split_sample_list)
return new_sample_list
def _clean_sp_char(self, dataset):
def clean_text(text):
text = re.sub("�", "", text)
# text = re.sub("([A-Za-z]+)", r" \1 ", text)
# text = re.sub("(\d+)", r" \1 ", text)
# text = re.sub("\s+", " ", text).strip()
return text
for sample in tqdm(dataset, desc = "Clean"):
sample["text"] = clean_text(sample["text"])
for rel in sample["relation_list"]:
rel["subject"] = clean_text(rel["subject"])
rel["object"] = clean_text(rel["object"])
return dataset
def clean_data_wo_span(self, ori_data, separate = False, data_type = "train"):
'''
rm duplicate whitespaces
and add whitespaces around tokens to keep special characters from them
'''
def clean_text(text):
text = re.sub("\s+", " ", text).strip()
if separate:
text = re.sub("([^A-Za-z0-9])", r" \1 ", text)
text = re.sub("\s+", " ", text).strip()
return text
for sample in tqdm(ori_data, desc = "clean data"):
sample["text"] = clean_text(sample["text"])
if data_type == "test":
continue
for rel in sample["relation_list"]:
rel["subject"] = clean_text(rel["subject"])
rel["object"] = clean_text(rel["object"])
return ori_data
def clean_data_w_span(self, ori_data):
'''
stripe whitespaces and change spans
add a stake to bad samples(char span error) and remove them from the clean data
'''
bad_samples, clean_data = [], []
def strip_white(entity, entity_char_span):
p = 0
while entity[p] == " ":
entity_char_span[0] += 1
p += 1
p = len(entity) - 1
while entity[p] == " ":
entity_char_span[1] -= 1
p -= 1
return entity.strip(), entity_char_span
for sample in tqdm(ori_data, desc = "clean data w char spans"):
text = sample["text"]
bad = False
for rel in sample["relation_list"]:
# rm whitespaces
rel["subject"], rel["subj_char_span"] = strip_white(rel["subject"], rel["subj_char_span"])
rel["object"], rel["obj_char_span"] = strip_white(rel["object"], rel["obj_char_span"])
subj_char_span = rel["subj_char_span"]
obj_char_span = rel["obj_char_span"]
if rel["subject"] not in text or rel["subject"] != text[subj_char_span[0]:subj_char_span[1]] or \
rel["object"] not in text or rel["object"] != text[obj_char_span[0]:obj_char_span[1]]:
rel["stake"] = 0
bad = True
if bad:
bad_samples.append(copy.deepcopy(sample))
new_rel_list = [rel for rel in sample["relation_list"] if "stake" not in rel]
if len(new_rel_list) > 0:
sample["relation_list"] = new_rel_list
clean_data.append(sample)
return clean_data, bad_samples
def _get_char2tok_span(self, text):
'''
map character index to token level span
'''
tok2char_span = self._get_tok2char_span_map(text)
char_num = None
for tok_ind in range(len(tok2char_span) - 1, -1, -1):
if tok2char_span[tok_ind][1] != 0:
char_num = tok2char_span[tok_ind][1]
break
char2tok_span = [[-1, -1] for _ in range(char_num)] # [-1, -1] is whitespace
for tok_ind, char_sp in enumerate(tok2char_span):
for char_ind in range(char_sp[0], char_sp[1]):
tok_sp = char2tok_span[char_ind]
# 因为char to tok 也可能出现1对多的情况,比如韩文。所以char_span的pos1以第一个tok_ind为准,pos2以最后一个tok_ind为准
if tok_sp[0] == -1:
tok_sp[0] = tok_ind
tok_sp[1] = tok_ind + 1
return char2tok_span
def _get_ent2char_spans(self, text, entities, ignore_subword_match = True):
'''
if ignore_subword_match is true, find entities with whitespace around, e.g. "entity" -> " entity "
'''
entities = sorted(entities, key = lambda x: len(x), reverse = True)
text_cp = " {} ".format(text) if ignore_subword_match else text
ent2char_spans = {}
for ent in entities:
spans = []
target_ent = " {} ".format(ent) if ignore_subword_match else ent
for m in re.finditer(re.escape(target_ent), text_cp):
if not ignore_subword_match and re.match("\d+", target_ent): # avoid matching a inner number of a number
if (m.span()[0] - 1 >= 0 and re.match("\d", text_cp[m.span()[0] - 1])) or (m.span()[1] < len(text_cp) and re.match("\d", text_cp[m.span()[1]])):
continue
span = [m.span()[0], m.span()[1] - 2] if ignore_subword_match else m.span()
spans.append(span)
# if len(spans) == 0:
# set_trace()
ent2char_spans[ent] = spans
return ent2char_spans
def add_char_span(self, dataset, ignore_subword_match = True):
miss_sample_list = []
for sample in tqdm(dataset, desc = "adding char level spans"):
entities = [rel["subject"] for rel in sample["relation_list"]]
entities.extend([rel["object"] for rel in sample["relation_list"]])
if "entity_list" in sample:
entities.extend([ent["text"] for ent in sample["entity_list"]])
ent2char_spans = self._get_ent2char_spans(sample["text"], entities, ignore_subword_match = ignore_subword_match)
new_relation_list = []
for rel in sample["relation_list"]:
subj_char_spans = ent2char_spans[rel["subject"]]
obj_char_spans = ent2char_spans[rel["object"]]
for subj_sp in subj_char_spans:
for obj_sp in obj_char_spans:
new_relation_list.append({
"subject": rel["subject"],
"object": rel["object"],
"subj_char_span": subj_sp,
"obj_char_span": obj_sp,
"predicate": rel["predicate"],
})
if len(sample["relation_list"]) > len(new_relation_list):
miss_sample_list.append(sample)
sample["relation_list"] = new_relation_list
if "entity_list" in sample:
new_ent_list = []
for ent in sample["entity_list"]:
for char_sp in ent2char_spans[ent["text"]]:
new_ent_list.append({
"text": ent["text"],
"type": ent["type"],
"char_span": char_sp,
})
sample["entity_list"] = new_ent_list
return dataset, miss_sample_list
def add_tok_span(self, dataset):
'''
dataset must has char level span
'''
def char_span2tok_span(char_span, char2tok_span):
tok_span_list = char2tok_span[char_span[0]:char_span[1]]
tok_span = [tok_span_list[0][0], tok_span_list[-1][1]]
return tok_span
for sample in tqdm(dataset, desc = "adding token level spans"):
text = sample["text"]
char2tok_span = self._get_char2tok_span(sample["text"])
for rel in sample["relation_list"]:
subj_char_span = rel["subj_char_span"]
obj_char_span = rel["obj_char_span"]
rel["subj_tok_span"] = char_span2tok_span(subj_char_span, char2tok_span)
rel["obj_tok_span"] = char_span2tok_span(obj_char_span, char2tok_span)
for ent in sample["entity_list"]:
char_span = ent["char_span"]
ent["tok_span"] = char_span2tok_span(char_span, char2tok_span)
if "event_list" in sample:
for event in sample["event_list"]:
event["trigger_tok_span"] = char_span2tok_span(event["trigger_char_span"], char2tok_span)
for arg in event["argument_list"]:
arg["tok_span"] = char_span2tok_span(arg["char_span"], char2tok_span)
return dataset | 16,727 | 47.346821 | 164 | py |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/common/components.py | from IPython.core.debugger import set_trace
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import math
class LayerNorm(nn.Module):
def __init__(self, input_dim, cond_dim = 0, center = True, scale = True, epsilon = None, conditional = False,
hidden_units = None, hidden_activation = 'linear', hidden_initializer = 'xaiver', **kwargs):
super(LayerNorm, self).__init__()
"""
input_dim: inputs.shape[-1]
cond_dim: cond.shape[-1]
"""
self.center = center
self.scale = scale
self.conditional = conditional
self.hidden_units = hidden_units
# self.hidden_activation = activations.get(hidden_activation) keras中activation为linear时,返回原tensor,unchanged
self.hidden_initializer = hidden_initializer
self.epsilon = epsilon or 1e-12
self.input_dim = input_dim
self.cond_dim = cond_dim
if self.center:
self.beta = Parameter(torch.zeros(input_dim))
if self.scale:
self.gamma = Parameter(torch.ones(input_dim))
if self.conditional:
if self.hidden_units is not None:
self.hidden_dense = nn.Linear(in_features = self.cond_dim, out_features = self.hidden_units, bias=False)
if self.center:
self.beta_dense = nn.Linear(in_features = self.cond_dim, out_features = input_dim, bias=False)
if self.scale:
self.gamma_dense = nn.Linear(in_features = self.cond_dim, out_features = input_dim, bias=False)
self.initialize_weights()
def initialize_weights(self):
if self.conditional:
if self.hidden_units is not None:
if self.hidden_initializer == 'normal':
torch.nn.init.normal(self.hidden_dense.weight)
elif self.hidden_initializer == 'xavier': # glorot_uniform
torch.nn.init.xavier_uniform_(self.hidden_dense.weight)
# 下面这两个为什么都初始化为0呢?
# 为了防止扰乱原来的预训练权重,两个变换矩阵可以全零初始化(单层神经网络可以用全零初始化,连续的多层神经网络才不应当用全零初始化),这样在初始状态,模型依然保持跟原来的预训练模型一致。
if self.center:
torch.nn.init.constant_(self.beta_dense.weight, 0)
if self.scale:
torch.nn.init.constant_(self.gamma_dense.weight, 0)
def forward(self, inputs, cond=None):
"""
如果是条件Layer Norm,则cond不是None
"""
if self.conditional:
if self.hidden_units is not None:
cond = self.hidden_dense(cond)
# for _ in range(K.ndim(inputs) - K.ndim(cond)): # K.ndim: 以整数形式返回张量中的轴数。
# TODO: 这两个为什么有轴数差呢? 为什么在 dim=1 上增加维度??
# 为了保持维度一致,cond可以是(batch_size, cond_dim)
for _ in range(len(inputs.shape) - len(cond.shape)):
cond = cond.unsqueeze(1) # cond = K.expand_dims(cond, 1)
# cond在加入beta和gamma之前做一次线性变换,以保证与input维度一致
if self.center:
beta = self.beta_dense(cond) + self.beta
if self.scale:
gamma = self.gamma_dense(cond) + self.gamma
else:
if self.center:
beta = self.beta
if self.scale:
gamma = self.gamma
outputs = inputs
if self.center:
mean = torch.mean(outputs, dim=-1).unsqueeze(-1)
outputs = outputs - mean
if self.scale:
variance = torch.mean(outputs**2, dim=-1).unsqueeze(-1)
std = (variance + self.epsilon) **2
outputs = outputs / std
outputs = outputs * gamma
if self.center:
outputs = outputs + beta
return outputs
class HandshakingKernel(nn.Module):
def __init__(self, hidden_size, shaking_type, inner_enc_type):
super().__init__()
self.shaking_type = shaking_type
if shaking_type == "cat":
self.combine_fc = nn.Linear(hidden_size * 2, hidden_size)
elif shaking_type == "cat_plus":
self.combine_fc = nn.Linear(hidden_size * 3, hidden_size)
elif shaking_type == "cln":
self.tp_cln = LayerNorm(hidden_size, hidden_size, conditional = True)
elif shaking_type == "cln_plus":
self.tp_cln = LayerNorm(hidden_size, hidden_size, conditional = True)
self.inner_context_cln = LayerNorm(hidden_size, hidden_size, conditional = True)
self.inner_enc_type = inner_enc_type
if inner_enc_type == "mix_pooling":
self.lamtha = Parameter(torch.rand(hidden_size))
elif inner_enc_type == "lstm":
self.inner_context_lstm = nn.LSTM(hidden_size,
hidden_size,
num_layers = 1,
bidirectional = False,
batch_first = True)
def enc_inner_hiddens(self, seq_hiddens, inner_enc_type = "lstm"):
# seq_hiddens: (batch_size, seq_len, hidden_size)
def pool(seqence, pooling_type):
if pooling_type == "mean_pooling":
pooling = torch.mean(seqence, dim = -2)
elif pooling_type == "max_pooling":
pooling, _ = torch.max(seqence, dim = -2)
elif pooling_type == "mix_pooling":
pooling = self.lamtha * torch.mean(seqence, dim = -2) + (1 - self.lamtha) * torch.max(seqence, dim = -2)[0]
return pooling
if "pooling" in inner_enc_type:
inner_context = torch.stack([pool(seq_hiddens[:, :i+1, :], inner_enc_type) for i in range(seq_hiddens.size()[1])], dim = 1)
elif inner_enc_type == "lstm":
inner_context, _ = self.inner_context_lstm(seq_hiddens)
return inner_context
def forward(self, seq_hiddens):
'''
seq_hiddens: (batch_size, seq_len, hidden_size)
return:
shaking_hiddenss: (batch_size, (1 + seq_len) * seq_len / 2, hidden_size) (32, 5+4+3+2+1, 5)
'''
seq_len = seq_hiddens.size()[-2]
shaking_hiddens_list = []
for ind in range(seq_len):
hidden_each_step = seq_hiddens[:, ind, :]
visible_hiddens = seq_hiddens[:, ind:, :] # ind: only look back
repeat_hiddens = hidden_each_step[:, None, :].repeat(1, seq_len - ind, 1)
if self.shaking_type == "cat":
shaking_hiddens = torch.cat([repeat_hiddens, visible_hiddens], dim = -1)
shaking_hiddens = torch.tanh(self.combine_fc(shaking_hiddens))
elif self.shaking_type == "cat_plus":
inner_context = self.enc_inner_hiddens(visible_hiddens, self.inner_enc_type)
shaking_hiddens = torch.cat([repeat_hiddens, visible_hiddens, inner_context], dim = -1)
shaking_hiddens = torch.tanh(self.combine_fc(shaking_hiddens))
elif self.shaking_type == "cln":
shaking_hiddens = self.tp_cln(visible_hiddens, repeat_hiddens)
elif self.shaking_type == "cln_plus":
inner_context = self.enc_inner_hiddens(visible_hiddens, self.inner_enc_type)
shaking_hiddens = self.tp_cln(visible_hiddens, repeat_hiddens)
shaking_hiddens = self.inner_context_cln(shaking_hiddens, inner_context)
shaking_hiddens_list.append(shaking_hiddens)
long_shaking_hiddens = torch.cat(shaking_hiddens_list, dim = 1)
return long_shaking_hiddens | 7,502 | 44.472727 | 135 | py |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/common/__init__.py | 0 | 0 | 0 | py | |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/preprocess/__init__.py | from joint_extraction import * | 30 | 30 | 30 | py |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/tplinker/tplinker.py | import re
from tqdm import tqdm
from IPython.core.debugger import set_trace
import copy
import torch
import torch.nn as nn
import json
from torch.nn.parameter import Parameter
from common.components import HandshakingKernel
import math
class HandshakingTaggingScheme(object):
"""docstring for HandshakingTaggingScheme"""
def __init__(self, rel2id, max_seq_len):
super(HandshakingTaggingScheme, self).__init__()
self.rel2id = rel2id
self.id2rel = {ind:rel for rel, ind in rel2id.items()}
self.tag2id_ent = {
"O": 0,
"ENT-H2T": 1, # entity head to entity tail
}
self.id2tag_ent = {id_:tag for tag, id_ in self.tag2id_ent.items()}
self.tag2id_head_rel = {
"O": 0,
"REL-SH2OH": 1, # subject head to object head
"REL-OH2SH": 2, # object head to subject head
}
self.id2tag_head_rel = {id_:tag for tag, id_ in self.tag2id_head_rel.items()}
self.tag2id_tail_rel = {
"O": 0,
"REL-ST2OT": 1, # subject tail to object tail
"REL-OT2ST": 2, # object tail to subject tail
}
self.id2tag_tail_rel = {id_:tag for tag, id_ in self.tag2id_tail_rel.items()}
# mapping shaking sequence and matrix
self.matrix_size = max_seq_len
# e.g. [(0, 0), (0, 1), (0, 2), (1, 1), (1, 2), (2, 2)]
self.shaking_ind2matrix_ind = [(ind, end_ind) for ind in range(self.matrix_size) for end_ind in list(range(self.matrix_size))[ind:]]
self.matrix_ind2shaking_ind = [[0 for i in range(self.matrix_size)] for j in range(self.matrix_size)]
for shaking_ind, matrix_ind in enumerate(self.shaking_ind2matrix_ind):
self.matrix_ind2shaking_ind[matrix_ind[0]][matrix_ind[1]] = shaking_ind
def get_spots(self, sample):
'''
entity spot and tail_rel spot: (span_pos1, span_pos2, tag_id)
head_rel spot: (rel_id, span_pos1, span_pos2, tag_id)
'''
ent_matrix_spots, head_rel_matrix_spots, tail_rel_matrix_spots = [], [], []
for rel in sample["relation_list"]:
subj_tok_span = rel["subj_tok_span"]
obj_tok_span = rel["obj_tok_span"]
ent_matrix_spots.append((subj_tok_span[0], subj_tok_span[1] - 1, self.tag2id_ent["ENT-H2T"]))
ent_matrix_spots.append((obj_tok_span[0], obj_tok_span[1] - 1, self.tag2id_ent["ENT-H2T"]))
if subj_tok_span[0] <= obj_tok_span[0]:
head_rel_matrix_spots.append((self.rel2id[rel["predicate"]], subj_tok_span[0], obj_tok_span[0], self.tag2id_head_rel["REL-SH2OH"]))
else:
head_rel_matrix_spots.append((self.rel2id[rel["predicate"]], obj_tok_span[0], subj_tok_span[0], self.tag2id_head_rel["REL-OH2SH"]))
if subj_tok_span[1] <= obj_tok_span[1]:
tail_rel_matrix_spots.append((self.rel2id[rel["predicate"]], subj_tok_span[1] - 1, obj_tok_span[1] - 1, self.tag2id_tail_rel["REL-ST2OT"]))
else:
tail_rel_matrix_spots.append((self.rel2id[rel["predicate"]], obj_tok_span[1] - 1, subj_tok_span[1] - 1, self.tag2id_tail_rel["REL-OT2ST"]))
return ent_matrix_spots, head_rel_matrix_spots, tail_rel_matrix_spots
def sharing_spots2shaking_tag(self, spots):
'''
convert spots to shaking seq tag
spots: [(start_ind, end_ind, tag_id), ], for entiy
return:
shake_seq_tag: (shaking_seq_len, )
'''
shaking_seq_len = self.matrix_size * (self.matrix_size + 1) // 2
shaking_seq_tag = torch.zeros(shaking_seq_len).long()
for sp in spots:
shaking_ind = self.matrix_ind2shaking_ind[sp[0]][sp[1]]
shaking_seq_tag[shaking_ind] = sp[2]
return shaking_seq_tag
def spots2shaking_tag(self, spots):
'''
convert spots to shaking seq tag
spots: [(rel_id, start_ind, end_ind, tag_id), ], for head relation and tail relation
return:
shake_seq_tag: (rel_size, shaking_seq_len, )
'''
shaking_seq_len = self.matrix_size * (self.matrix_size + 1) // 2
shaking_seq_tag = torch.zeros(len(self.rel2id), shaking_seq_len).long()
for sp in spots:
shaking_ind = self.matrix_ind2shaking_ind[sp[1]][sp[2]]
shaking_seq_tag[sp[0]][shaking_ind] = sp[3]
return shaking_seq_tag
def sharing_spots2shaking_tag4batch(self, batch_spots):
'''
convert spots to batch shaking seq tag
因长序列的stack是费时操作,所以写这个函数用作生成批量shaking tag
如果每个样本生成一条shaking tag再stack,一个32的batch耗时1s,太昂贵
spots: [(start_ind, end_ind, tag_id), ], for entiy
return:
batch_shake_seq_tag: (batch_size, shaking_seq_len)
'''
shaking_seq_len = self.matrix_size * (self.matrix_size + 1) // 2
batch_shaking_seq_tag = torch.zeros(len(batch_spots), shaking_seq_len).long()
for batch_id, spots in enumerate(batch_spots):
for sp in spots:
shaking_ind = self.matrix_ind2shaking_ind[sp[0]][sp[1]]
tag_id = sp[2]
batch_shaking_seq_tag[batch_id][shaking_ind] = tag_id
return batch_shaking_seq_tag
def spots2shaking_tag4batch(self, batch_spots):
'''
convert spots to batch shaking seq tag
spots: [(rel_id, start_ind, end_ind, tag_id), ], for head relation and tail_relation
return:
batch_shake_seq_tag: (batch_size, rel_size, shaking_seq_len)
'''
shaking_seq_len = self.matrix_size * (self.matrix_size + 1) // 2
batch_shaking_seq_tag = torch.zeros(len(batch_spots), len(self.rel2id), shaking_seq_len).long()
for batch_id, spots in enumerate(batch_spots):
for sp in spots:
shaking_ind = self.matrix_ind2shaking_ind[sp[1]][sp[2]]
tag_id = sp[3]
rel_id = sp[0]
batch_shaking_seq_tag[batch_id][rel_id][shaking_ind] = tag_id
return batch_shaking_seq_tag
def get_spots_fr_shaking_tag(self, shaking_tag):
'''
shaking_tag -> spots
shaking_tag: (rel_size, shaking_seq_len)
spots: [(rel_id, start_ind, end_ind, tag_id), ]
'''
spots = []
for shaking_inds in shaking_tag.nonzero():
rel_id = shaking_inds[0].item()
tag_id = shaking_tag[rel_id][shaking_inds[1]].item()
matrix_inds = self.shaking_ind2matrix_ind[shaking_inds[1]]
spot = (rel_id, matrix_inds[0], matrix_inds[1], tag_id)
spots.append(spot)
return spots
def get_sharing_spots_fr_shaking_tag(self, shaking_tag):
'''
shaking_tag -> spots
shaking_tag: (shaking_seq_len, )
spots: [(start_ind, end_ind, tag_id), ]
'''
spots = []
for shaking_ind in shaking_tag.nonzero():
shaking_ind_ = shaking_ind[0].item()
tag_id = shaking_tag[shaking_ind_]
matrix_inds = self.shaking_ind2matrix_ind[shaking_ind_]
spot = (matrix_inds[0], matrix_inds[1], tag_id)
spots.append(spot)
return spots
def decode_rel_fr_shaking_tag(self,
text,
ent_shaking_tag,
head_rel_shaking_tag,
tail_rel_shaking_tag,
tok2char_span,
tok_offset = 0, char_offset = 0):
'''
ent shaking tag: (shaking_seq_len, )
head rel and tail rel shaking_tag: size = (rel_size, shaking_seq_len, )
'''
rel_list = []
ent_matrix_spots = self.get_sharing_spots_fr_shaking_tag(ent_shaking_tag)
head_rel_matrix_spots = self.get_spots_fr_shaking_tag(head_rel_shaking_tag)
tail_rel_matrix_spots = self.get_spots_fr_shaking_tag(tail_rel_shaking_tag)
# entity
head_ind2entities = {}
for sp in ent_matrix_spots:
tag_id = sp[2]
if tag_id != self.tag2id_ent["ENT-H2T"]:
continue
char_span_list = tok2char_span[sp[0]:sp[1] + 1]
char_sp = [char_span_list[0][0], char_span_list[-1][1]]
ent_text = text[char_sp[0]:char_sp[1]]
head_key = sp[0] # take head as the key to entity list start with the head token
if head_key not in head_ind2entities:
head_ind2entities[head_key] = []
head_ind2entities[head_key].append({
"text": ent_text,
"tok_span": [sp[0], sp[1] + 1],
"char_span": char_sp,
})
# tail relation
tail_rel_memory_set = set()
for sp in tail_rel_matrix_spots:
rel_id = sp[0]
tag_id = sp[3]
if tag_id == self.tag2id_tail_rel["REL-ST2OT"]:
tail_rel_memory = "{}-{}-{}".format(rel_id, sp[1], sp[2])
tail_rel_memory_set.add(tail_rel_memory)
elif tag_id == self.tag2id_tail_rel["REL-OT2ST"]:
tail_rel_memory = "{}-{}-{}".format(rel_id, sp[2], sp[1])
tail_rel_memory_set.add(tail_rel_memory)
# head relation
for sp in head_rel_matrix_spots:
rel_id = sp[0]
tag_id = sp[3]
if tag_id == self.tag2id_head_rel["REL-SH2OH"]:
subj_head_key, obj_head_key = sp[1], sp[2]
elif tag_id == self.tag2id_head_rel["REL-OH2SH"]:
subj_head_key, obj_head_key = sp[2], sp[1]
if subj_head_key not in head_ind2entities or obj_head_key not in head_ind2entities:
# no entity start with subj_head_key and obj_head_key
continue
subj_list = head_ind2entities[subj_head_key] # all entities start with this subject head
obj_list = head_ind2entities[obj_head_key] # all entities start with this object head
# go over all subj-obj pair to check whether the relation exists
for subj in subj_list:
for obj in obj_list:
tail_rel_memory = "{}-{}-{}".format(rel_id, subj["tok_span"][1] - 1, obj["tok_span"][1] - 1)
if tail_rel_memory not in tail_rel_memory_set:
# no such relation
continue
rel_list.append({
"subject": subj["text"],
"object": obj["text"],
"subj_tok_span": [subj["tok_span"][0] + tok_offset, subj["tok_span"][1] + tok_offset],
"obj_tok_span": [obj["tok_span"][0] + tok_offset, obj["tok_span"][1] + tok_offset],
"subj_char_span": [subj["char_span"][0] + char_offset, subj["char_span"][1] + char_offset],
"obj_char_span": [obj["char_span"][0] + char_offset, obj["char_span"][1] + char_offset],
"predicate": self.id2rel[rel_id],
})
return rel_list
class DataMaker4Bert():
def __init__(self, tokenizer, handshaking_tagger):
self.tokenizer = tokenizer
self.handshaking_tagger = handshaking_tagger
def get_indexed_data(self, data, max_seq_len, data_type = "train"):
indexed_samples = []
for ind, sample in tqdm(enumerate(data), desc = "Generate indexed train or valid data"):
text = sample["text"]
# codes for bert input
codes = self.tokenizer.encode_plus(text,
return_offsets_mapping = True,
add_special_tokens = False,
max_length = max_seq_len,
truncation = True,
pad_to_max_length = True)
# tagging
spots_tuple = None
if data_type != "test":
spots_tuple = self.handshaking_tagger.get_spots(sample)
# get codes
input_ids = torch.tensor(codes["input_ids"]).long()
attention_mask = torch.tensor(codes["attention_mask"]).long()
token_type_ids = torch.tensor(codes["token_type_ids"]).long()
tok2char_span = codes["offset_mapping"]
sample_tp = (sample,
input_ids,
attention_mask,
token_type_ids,
tok2char_span,
spots_tuple,
)
indexed_samples.append(sample_tp)
return indexed_samples
def generate_batch(self, batch_data, data_type = "train"):
sample_list = []
input_ids_list = []
attention_mask_list = []
token_type_ids_list = []
tok2char_span_list = []
ent_spots_list = []
head_rel_spots_list = []
tail_rel_spots_list = []
for tp in batch_data:
sample_list.append(tp[0])
input_ids_list.append(tp[1])
attention_mask_list.append(tp[2])
token_type_ids_list.append(tp[3])
tok2char_span_list.append(tp[4])
if data_type != "test":
ent_matrix_spots, head_rel_matrix_spots, tail_rel_matrix_spots = tp[5]
ent_spots_list.append(ent_matrix_spots)
head_rel_spots_list.append(head_rel_matrix_spots)
tail_rel_spots_list.append(tail_rel_matrix_spots)
# @specific: indexed by bert tokenizer
batch_input_ids = torch.stack(input_ids_list, dim = 0)
batch_attention_mask = torch.stack(attention_mask_list, dim = 0)
batch_token_type_ids = torch.stack(token_type_ids_list, dim = 0)
batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = None, None, None
if data_type != "test":
batch_ent_shaking_tag = self.handshaking_tagger.sharing_spots2shaking_tag4batch(ent_spots_list)
batch_head_rel_shaking_tag = self.handshaking_tagger.spots2shaking_tag4batch(head_rel_spots_list)
batch_tail_rel_shaking_tag = self.handshaking_tagger.spots2shaking_tag4batch(tail_rel_spots_list)
return sample_list, \
batch_input_ids, batch_attention_mask, batch_token_type_ids, tok2char_span_list, \
batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag
class DataMaker4BiLSTM():
def __init__(self, text2indices, get_tok2char_span_map, handshaking_tagger):
self.text2indices = text2indices
self.handshaking_tagger = handshaking_tagger
self.get_tok2char_span_map = get_tok2char_span_map
def get_indexed_data(self, data, max_seq_len, data_type = "train"):
indexed_samples = []
for ind, sample in tqdm(enumerate(data), desc = "Generate indexed train or valid data"):
text = sample["text"]
# tagging
spots_tuple = None
if data_type != "test":
spots_tuple = self.handshaking_tagger.get_spots(sample)
tok2char_span = self.get_tok2char_span_map(text)
tok2char_span.extend([(-1, -1)] * (max_seq_len - len(tok2char_span)))
input_ids = self.text2indices(text, max_seq_len)
sample_tp = (sample,
input_ids,
tok2char_span,
spots_tuple,
)
indexed_samples.append(sample_tp)
return indexed_samples
def generate_batch(self, batch_data, data_type = "train"):
sample_list = []
input_ids_list = []
tok2char_span_list = []
ent_spots_list = []
head_rel_spots_list = []
tail_rel_spots_list = []
for tp in batch_data:
sample_list.append(tp[0])
input_ids_list.append(tp[1])
tok2char_span_list.append(tp[2])
if data_type != "test":
ent_matrix_spots, head_rel_matrix_spots, tail_rel_matrix_spots = tp[3]
ent_spots_list.append(ent_matrix_spots)
head_rel_spots_list.append(head_rel_matrix_spots)
tail_rel_spots_list.append(tail_rel_matrix_spots)
batch_input_ids = torch.stack(input_ids_list, dim = 0)
batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = None, None, None
if data_type != "test":
batch_ent_shaking_tag = self.handshaking_tagger.sharing_spots2shaking_tag4batch(ent_spots_list)
batch_head_rel_shaking_tag = self.handshaking_tagger.spots2shaking_tag4batch(head_rel_spots_list)
batch_tail_rel_shaking_tag = self.handshaking_tagger.spots2shaking_tag4batch(tail_rel_spots_list)
return sample_list, \
batch_input_ids, tok2char_span_list, \
batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag
class TPLinkerBert(nn.Module):
def __init__(self, encoder,
rel_size,
shaking_type,
inner_enc_type,
dist_emb_size,
ent_add_dist,
rel_add_dist
):
super().__init__()
self.encoder = encoder
hidden_size = encoder.config.hidden_size
self.ent_fc = nn.Linear(hidden_size, 2)
self.head_rel_fc_list = [nn.Linear(hidden_size, 3) for _ in range(rel_size)]
self.tail_rel_fc_list = [nn.Linear(hidden_size, 3) for _ in range(rel_size)]
for ind, fc in enumerate(self.head_rel_fc_list):
self.register_parameter("weight_4_head_rel{}".format(ind), fc.weight)
self.register_parameter("bias_4_head_rel{}".format(ind), fc.bias)
for ind, fc in enumerate(self.tail_rel_fc_list):
self.register_parameter("weight_4_tail_rel{}".format(ind), fc.weight)
self.register_parameter("bias_4_tail_rel{}".format(ind), fc.bias)
# handshaking kernel
self.handshaking_kernel = HandshakingKernel(hidden_size, shaking_type, inner_enc_type)
# distance embedding
self.dist_emb_size = dist_emb_size
self.dist_embbedings = None # it will be set in the first forwarding
self.ent_add_dist = ent_add_dist
self.rel_add_dist = rel_add_dist
def forward(self, input_ids, attention_mask, token_type_ids):
# input_ids, attention_mask, token_type_ids: (batch_size, seq_len)
context_outputs = self.encoder(input_ids, attention_mask, token_type_ids)
# last_hidden_state: (batch_size, seq_len, hidden_size)
last_hidden_state = context_outputs[0]
# shaking_hiddens: (batch_size, 1 + ... + seq_len, hidden_size)
shaking_hiddens = self.handshaking_kernel(last_hidden_state)
shaking_hiddens4ent = shaking_hiddens
shaking_hiddens4rel = shaking_hiddens
# add distance embeddings if it is set
if self.dist_emb_size != -1:
# set self.dist_embbedings
hidden_size = shaking_hiddens.size()[-1]
if self.dist_embbedings is None:
dist_emb = torch.zeros([self.dist_emb_size, hidden_size]).to(shaking_hiddens.device)
for d in range(self.dist_emb_size):
for i in range(hidden_size):
if i % 2 == 0:
dist_emb[d][i] = math.sin(d / 10000**(i / hidden_size))
else:
dist_emb[d][i] = math.cos(d / 10000**((i - 1) / hidden_size))
seq_len = input_ids.size()[1]
dist_embbeding_segs = []
for after_num in range(seq_len, 0, -1):
dist_embbeding_segs.append(dist_emb[:after_num, :])
self.dist_embbedings = torch.cat(dist_embbeding_segs, dim = 0)
if self.ent_add_dist:
shaking_hiddens4ent = shaking_hiddens + self.dist_embbedings[None,:,:].repeat(shaking_hiddens.size()[0], 1, 1)
if self.rel_add_dist:
shaking_hiddens4rel = shaking_hiddens + self.dist_embbedings[None,:,:].repeat(shaking_hiddens.size()[0], 1, 1)
# if self.dist_emb_size != -1 and self.ent_add_dist:
# shaking_hiddens4ent = shaking_hiddens + self.dist_embbedings[None,:,:].repeat(shaking_hiddens.size()[0], 1, 1)
# else:
# shaking_hiddens4ent = shaking_hiddens
# if self.dist_emb_size != -1 and self.rel_add_dist:
# shaking_hiddens4rel = shaking_hiddens + self.dist_embbedings[None,:,:].repeat(shaking_hiddens.size()[0], 1, 1)
# else:
# shaking_hiddens4rel = shaking_hiddens
ent_shaking_outputs = self.ent_fc(shaking_hiddens4ent)
head_rel_shaking_outputs_list = []
for fc in self.head_rel_fc_list:
head_rel_shaking_outputs_list.append(fc(shaking_hiddens4rel))
tail_rel_shaking_outputs_list = []
for fc in self.tail_rel_fc_list:
tail_rel_shaking_outputs_list.append(fc(shaking_hiddens4rel))
head_rel_shaking_outputs = torch.stack(head_rel_shaking_outputs_list, dim = 1)
tail_rel_shaking_outputs = torch.stack(tail_rel_shaking_outputs_list, dim = 1)
return ent_shaking_outputs, head_rel_shaking_outputs, tail_rel_shaking_outputs
class TPLinkerBiLSTM(nn.Module):
def __init__(self, init_word_embedding_matrix,
emb_dropout_rate,
enc_hidden_size,
dec_hidden_size,
rnn_dropout_rate,
rel_size,
shaking_type,
inner_enc_type,
dist_emb_size,
ent_add_dist,
rel_add_dist):
super().__init__()
self.word_embeds = nn.Embedding.from_pretrained(init_word_embedding_matrix, freeze = False)
self.emb_dropout = nn.Dropout(emb_dropout_rate)
self.enc_lstm = nn.LSTM(init_word_embedding_matrix.size()[-1],
enc_hidden_size // 2,
num_layers = 1,
bidirectional = True,
batch_first = True)
self.dec_lstm = nn.LSTM(enc_hidden_size,
dec_hidden_size // 2,
num_layers = 1,
bidirectional = True,
batch_first = True)
self.rnn_dropout = nn.Dropout(rnn_dropout_rate)
hidden_size = dec_hidden_size
self.ent_fc = nn.Linear(hidden_size, 2)
self.head_rel_fc_list = [nn.Linear(hidden_size, 3) for _ in range(rel_size)]
self.tail_rel_fc_list = [nn.Linear(hidden_size, 3) for _ in range(rel_size)]
for ind, fc in enumerate(self.head_rel_fc_list):
self.register_parameter("weight_4_head_rel{}".format(ind), fc.weight)
self.register_parameter("bias_4_head_rel{}".format(ind), fc.bias)
for ind, fc in enumerate(self.tail_rel_fc_list):
self.register_parameter("weight_4_tail_rel{}".format(ind), fc.weight)
self.register_parameter("bias_4_tail_rel{}".format(ind), fc.bias)
# handshaking kernel
self.handshaking_kernel = HandshakingKernel(hidden_size, shaking_type, inner_enc_type)
# distance embedding
self.dist_emb_size = dist_emb_size
self.dist_embbedings = None # it will be set in the first forwarding
self.ent_add_dist = ent_add_dist
self.rel_add_dist = rel_add_dist
def forward(self, input_ids):
# input_ids: (batch_size, seq_len)
# embedding: (batch_size, seq_len, emb_dim)
embedding = self.word_embeds(input_ids)
embedding = self.emb_dropout(embedding)
# lstm_outputs: (batch_size, seq_len, enc_hidden_size)
lstm_outputs, _ = self.enc_lstm(embedding)
lstm_outputs = self.rnn_dropout(lstm_outputs)
# lstm_outputs: (batch_size, seq_len, dec_hidden_size)
lstm_outputs, _ = self.dec_lstm(lstm_outputs)
lstm_outputs = self.rnn_dropout(lstm_outputs)
# shaking_hiddens: (batch_size, 1 + ... + seq_len, hidden_size)
shaking_hiddens = self.handshaking_kernel(lstm_outputs)
shaking_hiddens4ent = shaking_hiddens
shaking_hiddens4rel = shaking_hiddens
# add distance embeddings if it is set
if self.dist_emb_size != -1:
# set self.dist_embbedings
hidden_size = shaking_hiddens.size()[-1]
if self.dist_embbedings is None:
dist_emb = torch.zeros([self.dist_emb_size, hidden_size]).to(shaking_hiddens.device)
for d in range(self.dist_emb_size):
for i in range(hidden_size):
if i % 2 == 0:
dist_emb[d][i] = math.sin(d / 10000**(i / hidden_size))
else:
dist_emb[d][i] = math.cos(d / 10000**((i - 1) / hidden_size))
seq_len = input_ids.size()[1]
dist_embbeding_segs = []
for after_num in range(seq_len, 0, -1):
dist_embbeding_segs.append(dist_emb[:after_num, :])
self.dist_embbedings = torch.cat(dist_embbeding_segs, dim = 0)
if self.ent_add_dist:
shaking_hiddens4ent = shaking_hiddens + self.dist_embbedings[None,:,:].repeat(shaking_hiddens.size()[0], 1, 1)
if self.rel_add_dist:
shaking_hiddens4rel = shaking_hiddens + self.dist_embbedings[None,:,:].repeat(shaking_hiddens.size()[0], 1, 1)
ent_shaking_outputs = self.ent_fc(shaking_hiddens4ent)
head_rel_shaking_outputs_list = []
for fc in self.head_rel_fc_list:
head_rel_shaking_outputs_list.append(fc(shaking_hiddens4rel))
tail_rel_shaking_outputs_list = []
for fc in self.tail_rel_fc_list:
tail_rel_shaking_outputs_list.append(fc(shaking_hiddens4rel))
head_rel_shaking_outputs = torch.stack(head_rel_shaking_outputs_list, dim = 1)
tail_rel_shaking_outputs = torch.stack(tail_rel_shaking_outputs_list, dim = 1)
return ent_shaking_outputs, head_rel_shaking_outputs, tail_rel_shaking_outputs
class MetricsCalculator():
def __init__(self, handshaking_tagger):
self.handshaking_tagger = handshaking_tagger
def get_sample_accuracy(self, pred, truth):
'''
计算所有抽取字段都正确的样本比例
即该batch的输出与truth全等的样本比例
'''
# (batch_size, ..., seq_len, tag_size) -> (batch_size, ..., seq_len)
pred_id = torch.argmax(pred, dim = -1)
# (batch_size, ..., seq_len) -> (batch_size, ),把每个sample压成一条seq
pred_id = pred_id.view(pred_id.size()[0], -1)
truth = truth.view(truth.size()[0], -1)
# (batch_size, ),每个元素是pred与truth之间tag相同的数量
correct_tag_num = torch.sum(torch.eq(truth, pred_id).float(), dim = 1)
# seq维上所有tag必须正确,所以correct_tag_num必须等于seq的长度才算一个correct的sample
sample_acc_ = torch.eq(correct_tag_num, torch.ones_like(correct_tag_num) * truth.size()[-1]).float()
sample_acc = torch.mean(sample_acc_)
return sample_acc
def get_rel_cpg(self, sample_list, tok2char_span_list,
batch_pred_ent_shaking_outputs,
batch_pred_head_rel_shaking_outputs,
batch_pred_tail_rel_shaking_outputs,
pattern = "only_head_text"):
batch_pred_ent_shaking_tag = torch.argmax(batch_pred_ent_shaking_outputs, dim = -1)
batch_pred_head_rel_shaking_tag = torch.argmax(batch_pred_head_rel_shaking_outputs, dim = -1)
batch_pred_tail_rel_shaking_tag = torch.argmax(batch_pred_tail_rel_shaking_outputs, dim = -1)
correct_num, pred_num, gold_num = 0, 0, 0
for ind in range(len(sample_list)):
sample = sample_list[ind]
text = sample["text"]
tok2char_span = tok2char_span_list[ind]
pred_ent_shaking_tag = batch_pred_ent_shaking_tag[ind]
pred_head_rel_shaking_tag = batch_pred_head_rel_shaking_tag[ind]
pred_tail_rel_shaking_tag = batch_pred_tail_rel_shaking_tag[ind]
pred_rel_list = self.handshaking_tagger.decode_rel_fr_shaking_tag(text,
pred_ent_shaking_tag,
pred_head_rel_shaking_tag,
pred_tail_rel_shaking_tag,
tok2char_span)
gold_rel_list = sample["relation_list"]
if pattern == "only_head_index":
gold_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["predicate"], rel["obj_tok_span"][0]) for rel in gold_rel_list])
pred_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["predicate"], rel["obj_tok_span"][0]) for rel in pred_rel_list])
elif pattern == "whole_span":
gold_rel_set = set(["{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["subj_tok_span"][1], rel["predicate"], rel["obj_tok_span"][0], rel["obj_tok_span"][1]) for rel in gold_rel_list])
pred_rel_set = set(["{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["subj_tok_span"][1], rel["predicate"], rel["obj_tok_span"][0], rel["obj_tok_span"][1]) for rel in pred_rel_list])
elif pattern == "whole_text":
gold_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"], rel["predicate"], rel["object"]) for rel in gold_rel_list])
pred_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"], rel["predicate"], rel["object"]) for rel in pred_rel_list])
elif pattern == "only_head_text":
gold_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"].split(" ")[0], rel["predicate"], rel["object"].split(" ")[0]) for rel in gold_rel_list])
pred_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"].split(" ")[0], rel["predicate"], rel["object"].split(" ")[0]) for rel in pred_rel_list])
for rel_str in pred_rel_set:
if rel_str in gold_rel_set:
correct_num += 1
pred_num += len(pred_rel_set)
gold_num += len(gold_rel_set)
return correct_num, pred_num, gold_num
def get_prf_scores(self, correct_num, pred_num, gold_num):
minimini = 1e-10
precision = correct_num / (pred_num + minimini)
recall = correct_num / (gold_num + minimini)
f1 = 2 * precision * recall / (precision + recall + minimini)
return precision, recall, f1 | 31,326 | 46.108271 | 222 | py |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/tplinker/config.py | import string
import random
common = {
"exp_name": "nyt_star",
"rel2id": "rel2id.json",
"device_num": 0,
# "encoder": "BiLSTM",
"encoder": "BERT",
"hyper_parameters": {
"shaking_type": "cat", # cat, cat_plus, cln, cln_plus; Experiments show that cat/cat_plus work better with BiLSTM, while cln/cln_plus work better with BERT. The results in the paper are produced by "cat". So, if you want to reproduce the results, "cat" is enough, no matter for BERT or BiLSTM.
"inner_enc_type": "lstm", # valid only if cat_plus or cln_plus is set. It is the way how to encode inner tokens between each token pairs. If you only want to reproduce the results, just leave it alone.
"dist_emb_size": -1, # -1: do not use distance embedding; other number: need to be larger than the max_seq_len of the inputs. set -1 if you only want to reproduce the results in the paper.
"ent_add_dist": False, # set true if you want add distance embeddings for each token pairs. (for entity decoder)
"rel_add_dist": False, # the same as above (for relation decoder)
"match_pattern": "only_head_text", # only_head_text (nyt_star, webnlg_star), whole_text (nyt, webnlg), only_head_index, whole_span
},
}
common["run_name"] = "{}+{}+{}".format("TP1", common["hyper_parameters"]["shaking_type"], common["encoder"]) + ""
run_id = ''.join(random.sample(string.ascii_letters + string.digits, 8))
train_config = {
"train_data": "train_data.json",
"valid_data": "valid_data.json",
"rel2id": "rel2id.json",
"logger": "wandb", # if wandb, comment the following four lines
# # if logger is set as default, uncomment the following four lines
# "logger": "default",
# "run_id": run_id,
# "log_path": "./default_log_dir/default.log",
# "path_to_save_model": "./default_log_dir/{}".format(run_id),
# only save the model state dict if F1 score surpasses <f1_2_save>
"f1_2_save": 0,
# whether train_config from scratch
"fr_scratch": True,
# write down notes here if you want, it will be logged
"note": "start from scratch",
# if not fr scratch, set a model_state_dict
"model_state_dict_path": "",
"hyper_parameters": {
"batch_size": 6,
"epochs": 100,
"seed": 2333,
"log_interval": 10,
"max_seq_len": 100,
"sliding_len": 20,
"loss_weight_recover_steps": 6000, # to speed up the training process, the loss of EH-to-ET sequence is set higher than other sequences at the beginning, but it will recover in <loss_weight_recover_steps> steps.
"scheduler": "CAWR", # Step
},
}
eval_config = {
"model_state_dict_dir": "./wandb", # if use wandb, set "./wandb", or set "./default_log_dir" if you use default logger
"run_ids": ["10suiyrf", ],
"last_k_model": 1,
"test_data": "*test*.json", # "*test*.json"
# where to save results
"save_res": False,
"save_res_dir": "../results",
# score: set true only if test set is annotated with ground truth
"score": True,
"hyper_parameters": {
"batch_size": 32,
"force_split": False,
"max_test_seq_len": 512,
"sliding_len": 50,
},
}
bert_config = {
"data_home": "../data4bert",
"bert_path": "../../pretrained_models/bert-base-cased",
"hyper_parameters": {
"lr": 5e-5,
},
}
bilstm_config = {
"data_home": "../data4bilstm",
"token2idx": "token2idx.json",
"pretrained_word_embedding_path": "../../pretrained_emb/glove_300_nyt.emb",
"hyper_parameters": {
"lr": 1e-3,
"enc_hidden_size": 300,
"dec_hidden_size": 600,
"emb_dropout": 0.1,
"rnn_dropout": 0.1,
"word_embedding_dim": 300,
},
}
cawr_scheduler = {
# CosineAnnealingWarmRestarts
"T_mult": 1,
"rewarm_epoch_num": 2,
}
step_scheduler = {
# StepLR
"decay_rate": 0.999,
"decay_steps": 100,
}
# ---------------------------dicts above is all you need to set---------------------------------------------------
if common["encoder"] == "BERT":
hyper_params = {**common["hyper_parameters"], **bert_config["hyper_parameters"]}
common = {**common, **bert_config}
common["hyper_parameters"] = hyper_params
elif common["encoder"] == "BiLSTM":
hyper_params = {**common["hyper_parameters"], **bilstm_config["hyper_parameters"]}
common = {**common, **bilstm_config}
common["hyper_parameters"] = hyper_params
hyper_params = {**common["hyper_parameters"], **train_config["hyper_parameters"]}
train_config = {**train_config, **common}
train_config["hyper_parameters"] = hyper_params
if train_config["hyper_parameters"]["scheduler"] == "CAWR":
train_config["hyper_parameters"] = {**train_config["hyper_parameters"], **cawr_scheduler}
elif train_config["hyper_parameters"]["scheduler"] == "Step":
train_config["hyper_parameters"] = {**train_config["hyper_parameters"], **step_scheduler}
hyper_params = {**common["hyper_parameters"], **eval_config["hyper_parameters"]}
eval_config = {**eval_config, **common}
eval_config["hyper_parameters"] = hyper_params
| 5,148 | 39.226563 | 301 | py |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/tplinker/__init__.py | 0 | 0 | 0 | py | |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/tplinker/train.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import json
import os
from tqdm import tqdm
import re
from IPython.core.debugger import set_trace
from pprint import pprint
from transformers import AutoModel, BertTokenizerFast
import copy
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import torch.optim as optim
import glob
import time
import logging
from common.utils import Preprocessor, DefaultLogger
from tplinker import (HandshakingTaggingScheme,
DataMaker4Bert,
DataMaker4BiLSTM,
TPLinkerBert,
TPLinkerBiLSTM,
MetricsCalculator)
import wandb
import config
from glove import Glove
import numpy as np
# In[ ]:
# try:
# from yaml import CLoader as Loader, CDumper as Dumper
# except ImportError:
# from yaml import Loader, Dumper
# config = yaml.load(open("train_config.yaml", "r"), Loader = yaml.FullLoader)
# In[ ]:
config = config.train_config
hyper_parameters = config["hyper_parameters"]
# In[ ]:
os.environ["TOKENIZERS_PARALLELISM"] = "true"
os.environ["CUDA_VISIBLE_DEVICES"] = str(config["device_num"])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# In[ ]:
# for reproductivity
torch.manual_seed(hyper_parameters["seed"]) # pytorch random seed
torch.backends.cudnn.deterministic = True
# In[ ]:
data_home = config["data_home"]
experiment_name = config["exp_name"]
train_data_path = os.path.join(data_home, experiment_name, config["train_data"])
valid_data_path = os.path.join(data_home, experiment_name, config["valid_data"])
rel2id_path = os.path.join(data_home, experiment_name, config["rel2id"])
# In[ ]:
if config["logger"] == "wandb":
# init wandb
wandb.init(project = experiment_name,
name = config["run_name"],
config = hyper_parameters # Initialize config
)
wandb.config.note = config["note"]
model_state_dict_dir = wandb.run.dir
logger = wandb
else:
logger = DefaultLogger(config["log_path"], experiment_name, config["run_name"], config["run_id"], hyper_parameters)
model_state_dict_dir = config["path_to_save_model"]
if not os.path.exists(model_state_dict_dir):
os.makedirs(model_state_dict_dir)
# # Load Data
# In[ ]:
train_data = json.load(open(train_data_path, "r", encoding = "utf-8"))
valid_data = json.load(open(valid_data_path, "r", encoding = "utf-8"))
# # Split
# In[ ]:
# @specific
if config["encoder"] == "BERT":
tokenizer = BertTokenizerFast.from_pretrained(config["bert_path"], add_special_tokens = False, do_lower_case = False)
tokenize = tokenizer.tokenize
get_tok2char_span_map = lambda text: tokenizer.encode_plus(text, return_offsets_mapping = True, add_special_tokens = False)["offset_mapping"]
elif config["encoder"] in {"BiLSTM", }:
tokenize = lambda text: text.split(" ")
def get_tok2char_span_map(text):
tokens = text.split(" ")
tok2char_span = []
char_num = 0
for tok in tokens:
tok2char_span.append((char_num, char_num + len(tok)))
char_num += len(tok) + 1 # +1: whitespace
return tok2char_span
# In[ ]:
preprocessor = Preprocessor(tokenize_func = tokenize,
get_tok2char_span_map_func = get_tok2char_span_map)
# In[ ]:
# train and valid max token num
max_tok_num = 0
all_data = train_data + valid_data
for sample in all_data:
tokens = tokenize(sample["text"])
max_tok_num = max(max_tok_num, len(tokens))
max_tok_num
# In[ ]:
if max_tok_num > hyper_parameters["max_seq_len"]:
train_data = preprocessor.split_into_short_samples(train_data,
hyper_parameters["max_seq_len"],
sliding_len = hyper_parameters["sliding_len"],
encoder = config["encoder"]
)
valid_data = preprocessor.split_into_short_samples(valid_data,
hyper_parameters["max_seq_len"],
sliding_len = hyper_parameters["sliding_len"],
encoder = config["encoder"]
)
# In[ ]:
print("train: {}".format(len(train_data)), "valid: {}".format(len(valid_data)))
# # Tagger (Decoder)
# In[ ]:
max_seq_len = min(max_tok_num, hyper_parameters["max_seq_len"])
rel2id = json.load(open(rel2id_path, "r", encoding = "utf-8"))
handshaking_tagger = HandshakingTaggingScheme(rel2id = rel2id, max_seq_len = max_seq_len)
# # Dataset
# In[ ]:
if config["encoder"] == "BERT":
tokenizer = BertTokenizerFast.from_pretrained(config["bert_path"], add_special_tokens = False, do_lower_case = False)
data_maker = DataMaker4Bert(tokenizer, handshaking_tagger)
elif config["encoder"] in {"BiLSTM", }:
token2idx_path = os.path.join(data_home, experiment_name, config["token2idx"])
token2idx = json.load(open(token2idx_path, "r", encoding = "utf-8"))
idx2token = {idx:tok for tok, idx in token2idx.items()}
def text2indices(text, max_seq_len):
input_ids = []
tokens = text.split(" ")
for tok in tokens:
if tok not in token2idx:
input_ids.append(token2idx['<UNK>'])
else:
input_ids.append(token2idx[tok])
if len(input_ids) < max_seq_len:
input_ids.extend([token2idx['<PAD>']] * (max_seq_len - len(input_ids)))
input_ids = torch.tensor(input_ids[:max_seq_len])
return input_ids
data_maker = DataMaker4BiLSTM(text2indices, get_tok2char_span_map, handshaking_tagger)
# In[ ]:
class MyDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
# In[ ]:
indexed_train_data = data_maker.get_indexed_data(train_data, max_seq_len)
indexed_valid_data = data_maker.get_indexed_data(valid_data, max_seq_len)
# In[ ]:
train_dataloader = DataLoader(MyDataset(indexed_train_data),
batch_size = hyper_parameters["batch_size"],
shuffle = True,
num_workers = 6,
drop_last = False,
collate_fn = data_maker.generate_batch,
)
valid_dataloader = DataLoader(MyDataset(indexed_valid_data),
batch_size = hyper_parameters["batch_size"],
shuffle = True,
num_workers = 6,
drop_last = False,
collate_fn = data_maker.generate_batch,
)
# In[ ]:
# # have a look at dataloader
# train_data_iter = iter(train_dataloader)
# batch_data = next(train_data_iter)
# text_id_list, text_list, batch_input_ids, \
# batch_attention_mask, batch_token_type_ids, \
# offset_map_list, batch_ent_shaking_tag, \
# batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = batch_data
# print(text_list[0])
# print()
# print(tokenizer.decode(batch_input_ids[0].tolist()))
# print(batch_input_ids.size())
# print(batch_attention_mask.size())
# print(batch_token_type_ids.size())
# print(len(offset_map_list))
# print(batch_ent_shaking_tag.size())
# print(batch_head_rel_shaking_tag.size())
# print(batch_tail_rel_shaking_tag.size())
# # Model
# In[ ]:
if config["encoder"] == "BERT":
encoder = AutoModel.from_pretrained(config["bert_path"])
hidden_size = encoder.config.hidden_size
fake_inputs = torch.zeros([hyper_parameters["batch_size"], max_seq_len, hidden_size]).to(device)
rel_extractor = TPLinkerBert(encoder,
len(rel2id),
hyper_parameters["shaking_type"],
hyper_parameters["inner_enc_type"],
hyper_parameters["dist_emb_size"],
hyper_parameters["ent_add_dist"],
hyper_parameters["rel_add_dist"],
)
elif config["encoder"] in {"BiLSTM", }:
glove = Glove()
glove = glove.load(config["pretrained_word_embedding_path"])
# prepare embedding matrix
word_embedding_init_matrix = np.random.normal(-1, 1, size=(len(token2idx), hyper_parameters["word_embedding_dim"]))
count_in = 0
# 在预训练词向量中的用该预训练向量
# 不在预训练集里的用随机向量
for ind, tok in tqdm(idx2token.items(), desc="Embedding matrix initializing..."):
if tok in glove.dictionary:
count_in += 1
word_embedding_init_matrix[ind] = glove.word_vectors[glove.dictionary[tok]]
print("{:.4f} tokens are in the pretrain word embedding matrix".format(count_in / len(idx2token))) # 命中预训练词向量的比例
word_embedding_init_matrix = torch.FloatTensor(word_embedding_init_matrix)
fake_inputs = torch.zeros([hyper_parameters["batch_size"], max_seq_len, hyper_parameters["dec_hidden_size"]]).to(device)
rel_extractor = TPLinkerBiLSTM(word_embedding_init_matrix,
hyper_parameters["emb_dropout"],
hyper_parameters["enc_hidden_size"],
hyper_parameters["dec_hidden_size"],
hyper_parameters["rnn_dropout"],
len(rel2id),
hyper_parameters["shaking_type"],
hyper_parameters["inner_enc_type"],
hyper_parameters["dist_emb_size"],
hyper_parameters["ent_add_dist"],
hyper_parameters["rel_add_dist"],
)
rel_extractor = rel_extractor.to(device)
# In[ ]:
# all_paras = sum(x.numel() for x in rel_extractor.parameters())
# enc_paras = sum(x.numel() for x in encoder.parameters())
# In[ ]:
# print(all_paras, enc_paras)
# print(all_paras - enc_paras)
# # Metrics
# In[ ]:
def bias_loss(weights = None):
if weights is not None:
weights = torch.FloatTensor(weights).to(device)
cross_en = nn.CrossEntropyLoss(weight = weights)
return lambda pred, target: cross_en(pred.view(-1, pred.size()[-1]), target.view(-1))
loss_func = bias_loss()
# In[ ]:
metrics = MetricsCalculator(handshaking_tagger)
# # Train
# In[ ]:
# train step
def train_step(batch_train_data, optimizer, loss_weights):
if config["encoder"] == "BERT":
sample_list, batch_input_ids, batch_attention_mask, batch_token_type_ids, tok2char_span_list, batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = batch_train_data
batch_input_ids, batch_attention_mask, batch_token_type_ids, batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = (batch_input_ids.to(device),
batch_attention_mask.to(device),
batch_token_type_ids.to(device),
batch_ent_shaking_tag.to(device),
batch_head_rel_shaking_tag.to(device),
batch_tail_rel_shaking_tag.to(device)
)
elif config["encoder"] in {"BiLSTM", }:
sample_list, batch_input_ids, tok2char_span_list, batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = batch_train_data
batch_input_ids, batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = (batch_input_ids.to(device),
batch_ent_shaking_tag.to(device),
batch_head_rel_shaking_tag.to(device),
batch_tail_rel_shaking_tag.to(device)
)
# zero the parameter gradients
optimizer.zero_grad()
if config["encoder"] == "BERT":
ent_shaking_outputs, head_rel_shaking_outputs, tail_rel_shaking_outputs = rel_extractor(batch_input_ids,
batch_attention_mask,
batch_token_type_ids,
)
elif config["encoder"] in {"BiLSTM", }:
ent_shaking_outputs, head_rel_shaking_outputs, tail_rel_shaking_outputs = rel_extractor(batch_input_ids)
w_ent, w_rel = loss_weights["ent"], loss_weights["rel"]
loss = w_ent * loss_func(ent_shaking_outputs, batch_ent_shaking_tag) + w_rel * loss_func(head_rel_shaking_outputs, batch_head_rel_shaking_tag) + w_rel * loss_func(tail_rel_shaking_outputs, batch_tail_rel_shaking_tag)
loss.backward()
optimizer.step()
ent_sample_acc = metrics.get_sample_accuracy(ent_shaking_outputs,
batch_ent_shaking_tag)
head_rel_sample_acc = metrics.get_sample_accuracy(head_rel_shaking_outputs,
batch_head_rel_shaking_tag)
tail_rel_sample_acc = metrics.get_sample_accuracy(tail_rel_shaking_outputs,
batch_tail_rel_shaking_tag)
return loss.item(), ent_sample_acc.item(), head_rel_sample_acc.item(), tail_rel_sample_acc.item()
# valid step
def valid_step(batch_valid_data):
if config["encoder"] == "BERT":
sample_list, batch_input_ids, batch_attention_mask, batch_token_type_ids, tok2char_span_list, batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = batch_valid_data
batch_input_ids, batch_attention_mask, batch_token_type_ids, batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = (batch_input_ids.to(device),
batch_attention_mask.to(device),
batch_token_type_ids.to(device),
batch_ent_shaking_tag.to(device),
batch_head_rel_shaking_tag.to(device),
batch_tail_rel_shaking_tag.to(device)
)
elif config["encoder"] in {"BiLSTM", }:
sample_list, batch_input_ids, tok2char_span_list, batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = batch_valid_data
batch_input_ids, batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = (batch_input_ids.to(device),
batch_ent_shaking_tag.to(device),
batch_head_rel_shaking_tag.to(device),
batch_tail_rel_shaking_tag.to(device)
)
with torch.no_grad():
if config["encoder"] == "BERT":
ent_shaking_outputs, head_rel_shaking_outputs, tail_rel_shaking_outputs = rel_extractor(batch_input_ids,
batch_attention_mask,
batch_token_type_ids,
)
elif config["encoder"] in {"BiLSTM", }:
ent_shaking_outputs, head_rel_shaking_outputs, tail_rel_shaking_outputs = rel_extractor(batch_input_ids)
ent_sample_acc = metrics.get_sample_accuracy(ent_shaking_outputs,
batch_ent_shaking_tag)
head_rel_sample_acc = metrics.get_sample_accuracy(head_rel_shaking_outputs,
batch_head_rel_shaking_tag)
tail_rel_sample_acc = metrics.get_sample_accuracy(tail_rel_shaking_outputs,
batch_tail_rel_shaking_tag)
rel_cpg = metrics.get_rel_cpg(sample_list, tok2char_span_list,
ent_shaking_outputs,
head_rel_shaking_outputs,
tail_rel_shaking_outputs,
hyper_parameters["match_pattern"]
)
return ent_sample_acc.item(), head_rel_sample_acc.item(), tail_rel_sample_acc.item(), rel_cpg
# In[ ]:
max_f1 = 0.
def train_n_valid(train_dataloader, dev_dataloader, optimizer, scheduler, num_epoch):
def train(dataloader, ep):
# train
rel_extractor.train()
t_ep = time.time()
start_lr = optimizer.param_groups[0]['lr']
total_loss, total_ent_sample_acc, total_head_rel_sample_acc, total_tail_rel_sample_acc = 0., 0., 0., 0.
for batch_ind, batch_train_data in enumerate(dataloader):
t_batch = time.time()
z = (2 * len(rel2id) + 1)
steps_per_ep = len(dataloader)
total_steps = hyper_parameters["loss_weight_recover_steps"] + 1 # + 1 avoid division by zero error
current_step = steps_per_ep * ep + batch_ind
w_ent = max(1 / z + 1 - current_step / total_steps, 1 / z)
w_rel = min((len(rel2id) / z) * current_step / total_steps, (len(rel2id) / z))
loss_weights = {"ent": w_ent, "rel": w_rel}
loss, ent_sample_acc, head_rel_sample_acc, tail_rel_sample_acc = train_step(batch_train_data, optimizer, loss_weights)
scheduler.step()
total_loss += loss
total_ent_sample_acc += ent_sample_acc
total_head_rel_sample_acc += head_rel_sample_acc
total_tail_rel_sample_acc += tail_rel_sample_acc
avg_loss = total_loss / (batch_ind + 1)
avg_ent_sample_acc = total_ent_sample_acc / (batch_ind + 1)
avg_head_rel_sample_acc = total_head_rel_sample_acc / (batch_ind + 1)
avg_tail_rel_sample_acc = total_tail_rel_sample_acc / (batch_ind + 1)
batch_print_format = "\rproject: {}, run_name: {}, Epoch: {}/{}, batch: {}/{}, train_loss: {}, " + "t_ent_sample_acc: {}, t_head_rel_sample_acc: {}, t_tail_rel_sample_acc: {}," + "lr: {}, batch_time: {}, total_time: {} -------------"
print(batch_print_format.format(experiment_name, config["run_name"],
ep + 1, num_epoch,
batch_ind + 1, len(dataloader),
avg_loss,
avg_ent_sample_acc,
avg_head_rel_sample_acc,
avg_tail_rel_sample_acc,
optimizer.param_groups[0]['lr'],
time.time() - t_batch,
time.time() - t_ep,
), end="")
if config["logger"] == "wandb" and batch_ind % hyper_parameters["log_interval"] == 0:
logger.log({
"train_loss": avg_loss,
"train_ent_seq_acc": avg_ent_sample_acc,
"train_head_rel_acc": avg_head_rel_sample_acc,
"train_tail_rel_acc": avg_tail_rel_sample_acc,
"learning_rate": optimizer.param_groups[0]['lr'],
"time": time.time() - t_ep,
})
if config["logger"] != "wandb": # only log once for training if logger is not wandb
logger.log({
"train_loss": avg_loss,
"train_ent_seq_acc": avg_ent_sample_acc,
"train_head_rel_acc": avg_head_rel_sample_acc,
"train_tail_rel_acc": avg_tail_rel_sample_acc,
"learning_rate": optimizer.param_groups[0]['lr'],
"time": time.time() - t_ep,
})
def valid(dataloader, ep):
# valid
rel_extractor.eval()
t_ep = time.time()
total_ent_sample_acc, total_head_rel_sample_acc, total_tail_rel_sample_acc = 0., 0., 0.
total_rel_correct_num, total_rel_pred_num, total_rel_gold_num = 0, 0, 0
for batch_ind, batch_valid_data in enumerate(tqdm(dataloader, desc = "Validating")):
ent_sample_acc, head_rel_sample_acc, tail_rel_sample_acc, rel_cpg = valid_step(batch_valid_data)
total_ent_sample_acc += ent_sample_acc
total_head_rel_sample_acc += head_rel_sample_acc
total_tail_rel_sample_acc += tail_rel_sample_acc
total_rel_correct_num += rel_cpg[0]
total_rel_pred_num += rel_cpg[1]
total_rel_gold_num += rel_cpg[2]
avg_ent_sample_acc = total_ent_sample_acc / len(dataloader)
avg_head_rel_sample_acc = total_head_rel_sample_acc / len(dataloader)
avg_tail_rel_sample_acc = total_tail_rel_sample_acc / len(dataloader)
rel_prf = metrics.get_prf_scores(total_rel_correct_num, total_rel_pred_num, total_rel_gold_num)
log_dict = {
"val_ent_seq_acc": avg_ent_sample_acc,
"val_head_rel_acc": avg_head_rel_sample_acc,
"val_tail_rel_acc": avg_tail_rel_sample_acc,
"val_prec": rel_prf[0],
"val_recall": rel_prf[1],
"val_f1": rel_prf[2],
"time": time.time() - t_ep,
}
logger.log(log_dict)
pprint(log_dict)
return rel_prf[2]
for ep in range(num_epoch):
train(train_dataloader, ep)
valid_f1 = valid(valid_dataloader, ep)
global max_f1
if valid_f1 >= max_f1:
max_f1 = valid_f1
if valid_f1 > config["f1_2_save"]: # save the best model
modle_state_num = len(glob.glob(model_state_dict_dir + "/model_state_dict_*.pt"))
torch.save(rel_extractor.state_dict(), os.path.join(model_state_dict_dir, "model_state_dict_{}.pt".format(modle_state_num)))
# scheduler_state_num = len(glob.glob(schedule_state_dict_dir + "/scheduler_state_dict_*.pt"))
# torch.save(scheduler.state_dict(), os.path.join(schedule_state_dict_dir, "scheduler_state_dict_{}.pt".format(scheduler_state_num)))
print("Current avf_f1: {}, Best f1: {}".format(valid_f1, max_f1))
# In[ ]:
# optimizer
init_learning_rate = float(hyper_parameters["lr"])
optimizer = torch.optim.Adam(rel_extractor.parameters(), lr = init_learning_rate)
if hyper_parameters["scheduler"] == "CAWR":
T_mult = hyper_parameters["T_mult"]
rewarm_epoch_num = hyper_parameters["rewarm_epoch_num"]
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, len(train_dataloader) * rewarm_epoch_num, T_mult)
elif hyper_parameters["scheduler"] == "Step":
decay_rate = hyper_parameters["decay_rate"]
decay_steps = hyper_parameters["decay_steps"]
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size = decay_steps, gamma = decay_rate)
# In[ ]:
if not config["fr_scratch"]:
model_state_path = config["model_state_dict_path"]
rel_extractor.load_state_dict(torch.load(model_state_path))
print("------------model state {} loaded ----------------".format(model_state_path.split("/")[-1]))
train_n_valid(train_dataloader, valid_dataloader, optimizer, scheduler, hyper_parameters["epochs"])
| 24,510 | 39.050654 | 310 | py |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/tplinker_plus/config.py | import string
import random
common = {
"exp_name": "duie2", # ace05_lu
"rel2id": "rel2id.json",
"ent2id": "ent2id.json",
"device_num": 1,
# "encoder": "BiLSTM",
"encoder": "BERT",
"hyper_parameters": {
"shaking_type": "cln_plus",
"inner_enc_type": "lstm",
# match_pattern: only_head_text (nyt_star, webnlg_star), whole_text (nyt, webnlg), only_head_index, whole_span, event_extraction
"match_pattern": "whole_text",
},
}
common["run_name"] = "{}+{}+{}".format("TP2", common["hyper_parameters"]["shaking_type"], common["encoder"]) + ""
run_id = ''.join(random.sample(string.ascii_letters + string.digits, 8))
train_config = {
"train_data": "train_data.json",
"valid_data": "valid_data.json",
"rel2id": "rel2id.json",
"logger": "wandb", # if wandb, comment the following four lines
# # if logger is set as default, uncomment the following four lines and comment the line above
# "logger": "default",
# "run_id": run_id,
# "log_path": "./default_log_dir/default.log",
# "path_to_save_model": "./default_log_dir/{}".format(run_id),
# when to save the model state dict
"f1_2_save": 0,
# whether train_config from scratch
"fr_scratch": True,
# write down notes here if you want, it will be logged
"note": "start from scratch",
# if not fr scratch, set a model_state_dict
"model_state_dict_path": "", # valid only if "fr_scratch" is False
"hyper_parameters": {
"batch_size": 32,
"epochs": 100,
"seed": 2333,
"log_interval": 10,
"max_seq_len": 128,
"sliding_len": 20,
"scheduler": "CAWR", # Step
"ghm": False, # set True if you want to use GHM to adjust the weights of gradients, this will speed up the training process and might improve the results. (Note that ghm in current version is unstable now, may hurt the results)
"tok_pair_sample_rate": 1, # (0, 1] How many percent of token paris you want to sample for training, this would slow down the training if set to less than 1. It is only helpful when your GPU memory is not enought for the training.
},
}
eval_config = {
"model_state_dict_dir": "./wandb", # if use wandb, set "./wandb", or set "./default_log_dir" if you use default logger
"run_ids": ["1a70p109", ],
"last_k_model": 1,
"test_data": "*test*.json", # "*test*.json"
# results
"save_res": False,
"save_res_dir": "../results",
# score: set true only if test set is tagged
"score": True,
"hyper_parameters": {
"batch_size": 32,
"force_split": False,
"max_seq_len": 512,
"sliding_len": 50,
},
}
bert_config = {
"data_home": "../data4bert",
"bert_path": "../../pretrained_models/chinese-bert-wwm-ext-hit", # bert-base-cased, chinese-bert-wwm-ext-hit
"hyper_parameters": {
"lr": 5e-5,
},
}
bilstm_config = {
"data_home": "../data4bilstm",
"token2idx": "token2idx.json",
"pretrained_word_embedding_path": "../../pretrained_emb/glove_300_nyt.emb",
"hyper_parameters": {
"lr": 1e-3,
"enc_hidden_size": 300,
"dec_hidden_size": 600,
"emb_dropout": 0.1,
"rnn_dropout": 0.1,
"word_embedding_dim": 300,
},
}
cawr_scheduler = {
# CosineAnnealingWarmRestarts
"T_mult": 1,
"rewarm_epoch_num": 2,
}
step_scheduler = {
# StepLR
"decay_rate": 0.999,
"decay_steps": 100,
}
# ---------------------------dicts above is all you need to set---------------------------------------------------
if common["encoder"] == "BERT":
hyper_params = {**common["hyper_parameters"], **bert_config["hyper_parameters"]}
common = {**common, **bert_config}
common["hyper_parameters"] = hyper_params
elif common["encoder"] == "BiLSTM":
hyper_params = {**common["hyper_parameters"], **bilstm_config["hyper_parameters"]}
common = {**common, **bilstm_config}
common["hyper_parameters"] = hyper_params
hyper_params = {**common["hyper_parameters"], **train_config["hyper_parameters"]}
train_config = {**train_config, **common}
train_config["hyper_parameters"] = hyper_params
if train_config["hyper_parameters"]["scheduler"] == "CAWR":
train_config["hyper_parameters"] = {**train_config["hyper_parameters"], **cawr_scheduler}
elif train_config["hyper_parameters"]["scheduler"] == "Step":
train_config["hyper_parameters"] = {**train_config["hyper_parameters"], **step_scheduler}
hyper_params = {**common["hyper_parameters"], **eval_config["hyper_parameters"]}
eval_config = {**eval_config, **common}
eval_config["hyper_parameters"] = hyper_params | 4,688 | 35.92126 | 238 | py |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/tplinker_plus/__init__.py | 0 | 0 | 0 | py | |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/tplinker_plus/tplinker_plus.py | import re
from tqdm import tqdm
import torch
from IPython.core.debugger import set_trace
import copy
import torch
import torch.nn as nn
import json
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import math
from common.components import HandshakingKernel
from collections import Counter
class HandshakingTaggingScheme(object):
def __init__(self, rel2id, max_seq_len, entity_type2id):
super().__init__()
self.rel2id = rel2id
self.id2rel = {ind:rel for rel, ind in rel2id.items()}
self.separator = "\u2E80"
self.link_types = {"SH2OH", # subject head to object head
"OH2SH", # object head to subject head
"ST2OT", # subject tail to object tail
"OT2ST", # object tail to subject tail
}
self.tags = {self.separator.join([rel, lt]) for rel in self.rel2id.keys() for lt in self.link_types}
self.ent2id = entity_type2id
self.id2ent = {ind:ent for ent, ind in self.ent2id.items()}
self.tags |= {self.separator.join([ent, "EH2ET"]) for ent in self.ent2id.keys()} # EH2ET: entity head to entity tail
self.tags = sorted(self.tags)
self.tag2id = {t:idx for idx, t in enumerate(self.tags)}
self.id2tag = {idx:t for t, idx in self.tag2id.items()}
self.matrix_size = max_seq_len
# map
# e.g. [(0, 0), (0, 1), (0, 2), (1, 1), (1, 2), (2, 2)]
self.shaking_idx2matrix_idx = [(ind, end_ind) for ind in range(self.matrix_size) for end_ind in list(range(self.matrix_size))[ind:]]
self.matrix_idx2shaking_idx = [[0 for i in range(self.matrix_size)] for j in range(self.matrix_size)]
for shaking_ind, matrix_ind in enumerate(self.shaking_idx2matrix_idx):
self.matrix_idx2shaking_idx[matrix_ind[0]][matrix_ind[1]] = shaking_ind
def get_tag_size(self):
return len(self.tag2id)
def get_spots(self, sample):
'''
matrix_spots: [(tok_pos1, tok_pos2, tag_id), ]
'''
matrix_spots = []
spot_memory_set = set()
def add_spot(spot):
memory = "{},{},{}".format(*spot)
if memory not in spot_memory_set:
matrix_spots.append(spot)
spot_memory_set.add(memory)
# # if entity_list exist, need to distinguish entity types
# if self.ent2id is not None and "entity_list" in sample:
for ent in sample["entity_list"]:
add_spot((ent["tok_span"][0], ent["tok_span"][1] - 1, self.tag2id[self.separator.join([ent["type"], "EH2ET"])]))
for rel in sample["relation_list"]:
subj_tok_span = rel["subj_tok_span"]
obj_tok_span = rel["obj_tok_span"]
rel = rel["predicate"]
# if self.ent2id is None: # set all entities to default type
# add_spot((subj_tok_span[0], subj_tok_span[1] - 1, self.tag2id[self.separator.join(["DEFAULT", "EH2ET"])]))
# add_spot((obj_tok_span[0], obj_tok_span[1] - 1, self.tag2id[self.separator.join(["DEFAULT", "EH2ET"])]))
if subj_tok_span[0] <= obj_tok_span[0]:
add_spot((subj_tok_span[0], obj_tok_span[0], self.tag2id[self.separator.join([rel, "SH2OH"])]))
else:
add_spot((obj_tok_span[0], subj_tok_span[0], self.tag2id[self.separator.join([rel, "OH2SH"])]))
if subj_tok_span[1] <= obj_tok_span[1]:
add_spot((subj_tok_span[1] - 1, obj_tok_span[1] - 1, self.tag2id[self.separator.join([rel, "ST2OT"])]))
else:
add_spot((obj_tok_span[1] - 1, subj_tok_span[1] - 1, self.tag2id[self.separator.join([rel, "OT2ST"])]))
return matrix_spots
def spots2shaking_tag(self, spots):
'''
convert spots to matrix tag
spots: [(start_ind, end_ind, tag_id), ]
return:
shaking_tag: (shaking_seq_len, tag_size)
'''
shaking_seq_len = self.matrix_size * (self.matrix_size + 1) // 2
shaking_tag = torch.zeros(shaking_seq_len, len(self.tag2id)).long()
for sp in spots:
shaking_idx = self.matrix_idx2shaking_idx[sp[0]][sp[1]]
shaking_tag[shaking_idx][sp[2]] = 1
return shaking_tag
def spots2shaking_tag4batch(self, batch_spots):
'''
batch_spots: a batch of spots, [spots1, spots2, ...]
spots: [(start_ind, end_ind, tag_id), ]
return:
batch_shaking_tag: (batch_size, shaking_seq_len, tag_size)
'''
shaking_seq_len = self.matrix_size * (self.matrix_size + 1) // 2
batch_shaking_tag = torch.zeros(len(batch_spots), shaking_seq_len, len(self.tag2id)).long()
for batch_id, spots in enumerate(batch_spots):
for sp in spots:
shaking_idx = self.matrix_idx2shaking_idx[sp[0]][sp[1]]
batch_shaking_tag[batch_id][shaking_idx][sp[2]] = 1
return batch_shaking_tag
def get_spots_fr_shaking_tag(self, shaking_tag):
'''
shaking_tag -> spots
shaking_tag: (shaking_seq_len, tag_id)
spots: [(start_ind, end_ind, tag_id), ]
'''
spots = []
nonzero_points = torch.nonzero(shaking_tag, as_tuple = False)
for point in nonzero_points:
shaking_idx, tag_idx = point[0].item(), point[1].item()
pos1, pos2 = self.shaking_idx2matrix_idx[shaking_idx]
spot = (pos1, pos2, tag_idx)
spots.append(spot)
return spots
def decode_rel(self,
text,
shaking_tag,
tok2char_span,
tok_offset = 0, char_offset = 0):
'''
shaking_tag: (shaking_seq_len, tag_id_num)
'''
rel_list = []
matrix_spots = self.get_spots_fr_shaking_tag(shaking_tag)
# entity
head_ind2entities = {}
ent_list = []
for sp in matrix_spots:
tag = self.id2tag[sp[2]]
ent_type, link_type = tag.split(self.separator)
if link_type != "EH2ET" or sp[0] > sp[1]: # for an entity, the start position can not be larger than the end pos.
continue
char_span_list = tok2char_span[sp[0]:sp[1] + 1]
char_sp = [char_span_list[0][0], char_span_list[-1][1]]
ent_text = text[char_sp[0]:char_sp[1]]
entity = {
"type": ent_type,
"text": ent_text,
"tok_span": [sp[0], sp[1] + 1],
"char_span": char_sp,
}
head_key = str(sp[0]) # take ent_head_pos as the key to entity list
if head_key not in head_ind2entities:
head_ind2entities[head_key] = []
head_ind2entities[head_key].append(entity)
ent_list.append(entity)
# tail link
tail_link_memory_set = set()
for sp in matrix_spots:
tag = self.id2tag[sp[2]]
rel, link_type = tag.split(self.separator)
if link_type == "ST2OT":
tail_link_memory = self.separator.join([rel, str(sp[0]), str(sp[1])])
tail_link_memory_set.add(tail_link_memory)
elif link_type == "OT2ST":
tail_link_memory = self.separator.join([rel, str(sp[1]), str(sp[0])])
tail_link_memory_set.add(tail_link_memory)
# head link
for sp in matrix_spots:
tag = self.id2tag[sp[2]]
rel, link_type = tag.split(self.separator)
if link_type == "SH2OH":
subj_head_key, obj_head_key = str(sp[0]), str(sp[1])
elif link_type == "OH2SH":
subj_head_key, obj_head_key = str(sp[1]), str(sp[0])
else:
continue
if subj_head_key not in head_ind2entities or obj_head_key not in head_ind2entities:
# no entity start with subj_head_key and obj_head_key
continue
subj_list = head_ind2entities[subj_head_key] # all entities start with this subject head
obj_list = head_ind2entities[obj_head_key] # all entities start with this object head
# go over all subj-obj pair to check whether the tail link exists
for subj in subj_list:
for obj in obj_list:
tail_link_memory = self.separator.join([rel, str(subj["tok_span"][1] - 1), str(obj["tok_span"][1] - 1)])
if tail_link_memory not in tail_link_memory_set:
# no such relation
continue
rel_list.append({
"subject": subj["text"],
"object": obj["text"],
"subj_tok_span": [subj["tok_span"][0] + tok_offset, subj["tok_span"][1] + tok_offset],
"obj_tok_span": [obj["tok_span"][0] + tok_offset, obj["tok_span"][1] + tok_offset],
"subj_char_span": [subj["char_span"][0] + char_offset, subj["char_span"][1] + char_offset],
"obj_char_span": [obj["char_span"][0] + char_offset, obj["char_span"][1] + char_offset],
"predicate": rel,
})
# recover the positons in the original text
for ent in ent_list:
ent["char_span"] = [ent["char_span"][0] + char_offset, ent["char_span"][1] + char_offset]
ent["tok_span"] = [ent["tok_span"][0] + tok_offset, ent["tok_span"][1] + tok_offset]
return rel_list, ent_list
def trans2ee(self, rel_list, ent_list):
sepatator = "_" # \u2E80
trigger_set, arg_iden_set, arg_class_set = set(), set(), set()
trigger_offset2vote = {}
trigger_offset2trigger_text = {}
trigger_offset2trigger_char_span = {}
# get candidate trigger types from relation
for rel in rel_list:
trigger_offset = rel["obj_tok_span"]
trigger_offset_str = "{},{}".format(trigger_offset[0], trigger_offset[1])
trigger_offset2trigger_text[trigger_offset_str] = rel["object"]
trigger_offset2trigger_char_span[trigger_offset_str] = rel["obj_char_span"]
_, event_type = rel["predicate"].split(sepatator)
if trigger_offset_str not in trigger_offset2vote:
trigger_offset2vote[trigger_offset_str] = {}
trigger_offset2vote[trigger_offset_str][event_type] = trigger_offset2vote[trigger_offset_str].get(event_type, 0) + 1
# get candidate trigger types from entity types
for ent in ent_list:
t1, t2 = ent["type"].split(sepatator)
assert t1 == "Trigger" or t1 == "Argument"
if t1 == "Trigger": # trigger
event_type = t2
trigger_span = ent["tok_span"]
trigger_offset_str = "{},{}".format(trigger_span[0], trigger_span[1])
trigger_offset2trigger_text[trigger_offset_str] = ent["text"]
trigger_offset2trigger_char_span[trigger_offset_str] = ent["char_span"]
if trigger_offset_str not in trigger_offset2vote:
trigger_offset2vote[trigger_offset_str] = {}
trigger_offset2vote[trigger_offset_str][event_type] = trigger_offset2vote[trigger_offset_str].get(event_type, 0) + 1.1 # if even, entity type makes the call
# voting
tirigger_offset2event = {}
for trigger_offet_str, event_type2score in trigger_offset2vote.items():
event_type = sorted(event_type2score.items(), key = lambda x: x[1], reverse = True)[0][0]
tirigger_offset2event[trigger_offet_str] = event_type # final event type
# generate event list
trigger_offset2arguments = {}
for rel in rel_list:
trigger_offset = rel["obj_tok_span"]
argument_role, event_type = rel["predicate"].split(sepatator)
trigger_offset_str = "{},{}".format(trigger_offset[0], trigger_offset[1])
if tirigger_offset2event[trigger_offset_str] != event_type: # filter false relations
# set_trace()
continue
# append arguments
if trigger_offset_str not in trigger_offset2arguments:
trigger_offset2arguments[trigger_offset_str] = []
trigger_offset2arguments[trigger_offset_str].append({
"text": rel["subject"],
"type": argument_role,
"char_span": rel["subj_char_span"],
"tok_span": rel["subj_tok_span"],
})
event_list = []
for trigger_offset_str, event_type in tirigger_offset2event.items():
arguments = trigger_offset2arguments[trigger_offset_str] if trigger_offset_str in trigger_offset2arguments else []
event = {
"trigger": trigger_offset2trigger_text[trigger_offset_str],
"trigger_char_span": trigger_offset2trigger_char_span[trigger_offset_str],
"trigger_tok_span": trigger_offset_str.split(","),
"trigger_type": event_type,
"argument_list": arguments,
}
event_list.append(event)
return event_list
class DataMaker4Bert():
def __init__(self, tokenizer, shaking_tagger):
self.tokenizer = tokenizer
self.shaking_tagger = shaking_tagger
def get_indexed_data(self, data, max_seq_len, data_type = "train"):
indexed_samples = []
for ind, sample in tqdm(enumerate(data), desc = "Generate indexed train or valid data"):
text = sample["text"]
# codes for bert input
codes = self.tokenizer.encode_plus(text,
return_offsets_mapping = True,
add_special_tokens = False,
max_length = max_seq_len,
truncation = True,
pad_to_max_length = True)
# tagging
matrix_spots = None
if data_type != "test":
matrix_spots = self.shaking_tagger.get_spots(sample)
# get codes
input_ids = torch.tensor(codes["input_ids"]).long()
attention_mask = torch.tensor(codes["attention_mask"]).long()
token_type_ids = torch.tensor(codes["token_type_ids"]).long()
tok2char_span = codes["offset_mapping"]
sample_tp = (sample,
input_ids,
attention_mask,
token_type_ids,
tok2char_span,
matrix_spots,
)
indexed_samples.append(sample_tp)
return indexed_samples
def generate_batch(self, batch_data, data_type = "train"):
sample_list = []
input_ids_list = []
attention_mask_list = []
token_type_ids_list = []
tok2char_span_list = []
matrix_spots_list = []
for tp in batch_data:
sample_list.append(tp[0])
input_ids_list.append(tp[1])
attention_mask_list.append(tp[2])
token_type_ids_list.append(tp[3])
tok2char_span_list.append(tp[4])
if data_type != "test":
matrix_spots_list.append(tp[5])
# @specific: indexed by bert tokenizer
batch_input_ids = torch.stack(input_ids_list, dim = 0)
batch_attention_mask = torch.stack(attention_mask_list, dim = 0)
batch_token_type_ids = torch.stack(token_type_ids_list, dim = 0)
batch_shaking_tag = None
if data_type != "test":
batch_shaking_tag = self.shaking_tagger.spots2shaking_tag4batch(matrix_spots_list)
return sample_list, \
batch_input_ids, batch_attention_mask, batch_token_type_ids, tok2char_span_list, \
batch_shaking_tag
class DataMaker4BiLSTM():
def __init__(self, text2indices, get_tok2char_span_map, shaking_tagger):
self.text2indices = text2indices
self.shaking_tagger = shaking_tagger
self.get_tok2char_span_map = get_tok2char_span_map
def get_indexed_data(self, data, max_seq_len, data_type = "train"):
indexed_samples = []
for ind, sample in tqdm(enumerate(data), desc = "Generate indexed train or valid data"):
text = sample["text"]
# tagging
matrix_spots = None
if data_type != "test":
matrix_spots = self.shaking_tagger.get_spots(sample)
tok2char_span = self.get_tok2char_span_map(text)
tok2char_span.extend([(-1, -1)] * (max_seq_len - len(tok2char_span)))
input_ids = self.text2indices(text, max_seq_len)
sample_tp = (sample,
input_ids,
tok2char_span,
matrix_spots,
)
indexed_samples.append(sample_tp)
return indexed_samples
def generate_batch(self, batch_data, data_type = "train"):
sample_list = []
input_ids_list = []
tok2char_span_list = []
matrix_spots_list = []
for tp in batch_data:
sample_list.append(tp[0])
input_ids_list.append(tp[1])
tok2char_span_list.append(tp[2])
if data_type != "test":
matrix_spots_list.append(tp[3])
batch_input_ids = torch.stack(input_ids_list, dim = 0)
batch_shaking_tag = None
if data_type != "test":
batch_shaking_tag = self.shaking_tagger.spots2shaking_tag4batch(matrix_spots_list)
return sample_list, \
batch_input_ids, tok2char_span_list, \
batch_shaking_tag
class TPLinkerPlusBert(nn.Module):
def __init__(self, encoder,
tag_size,
shaking_type,
inner_enc_type,
tok_pair_sample_rate = 1):
super().__init__()
self.encoder = encoder
self.tok_pair_sample_rate = tok_pair_sample_rate
shaking_hidden_size = encoder.config.hidden_size
self.fc = nn.Linear(shaking_hidden_size, tag_size)
# handshaking kernel
self.handshaking_kernel = HandshakingKernel(shaking_hidden_size, shaking_type, inner_enc_type)
def forward(self, input_ids,
attention_mask,
token_type_ids
):
# input_ids, attention_mask, token_type_ids: (batch_size, seq_len)
context_outputs = self.encoder(input_ids, attention_mask, token_type_ids)
# last_hidden_state: (batch_size, seq_len, hidden_size)
last_hidden_state = context_outputs[0]
seq_len = last_hidden_state.size()[1]
# shaking_hiddens: (batch_size, shaking_seq_len, hidden_size)
shaking_hiddens = self.handshaking_kernel(last_hidden_state)
sampled_tok_pair_indices = None
if self.training:
# randomly sample segments of token pairs
shaking_seq_len = shaking_hiddens.size()[1]
segment_len = int(shaking_seq_len * self.tok_pair_sample_rate)
seg_num = math.ceil(shaking_seq_len // segment_len)
start_ind = torch.randint(seg_num, []) * segment_len
end_ind = min(start_ind + segment_len, shaking_seq_len)
# sampled_tok_pair_indices: (batch_size, ~segment_len) ~end_ind - start_ind <= segment_len
sampled_tok_pair_indices = torch.arange(start_ind, end_ind)[None, :].repeat(shaking_hiddens.size()[0], 1)
# sampled_tok_pair_indices = torch.randint(shaking_seq_len, (shaking_hiddens.size()[0], segment_len))
sampled_tok_pair_indices = sampled_tok_pair_indices.to(shaking_hiddens.device)
# sampled_tok_pair_indices will tell model what token pairs should be fed into fcs
# shaking_hiddens: (batch_size, ~segment_len, hidden_size)
shaking_hiddens = shaking_hiddens.gather(1, sampled_tok_pair_indices[:,:,None].repeat(1, 1, shaking_hiddens.size()[-1]))
# outputs: (batch_size, segment_len, tag_size) or (batch_size, shaking_seq_len, tag_size)
outputs = self.fc(shaking_hiddens)
return outputs, sampled_tok_pair_indices
class TPLinkerPlusBiLSTM(nn.Module):
def __init__(self, init_word_embedding_matrix,
emb_dropout_rate,
enc_hidden_size,
dec_hidden_size,
rnn_dropout_rate,
tag_size,
shaking_type,
inner_enc_type,
tok_pair_sample_rate = 1
):
super().__init__()
self.word_embeds = nn.Embedding.from_pretrained(init_word_embedding_matrix, freeze = False)
self.emb_dropout = nn.Dropout(emb_dropout_rate)
self.enc_lstm = nn.LSTM(init_word_embedding_matrix.size()[-1],
enc_hidden_size // 2,
num_layers = 1,
bidirectional = True,
batch_first = True)
self.dec_lstm = nn.LSTM(enc_hidden_size,
dec_hidden_size // 2,
num_layers = 1,
bidirectional = True,
batch_first = True)
self.rnn_dropout = nn.Dropout(rnn_dropout_rate)
self.tok_pair_sample_rate = tok_pair_sample_rate
shaking_hidden_size = dec_hidden_size
self.fc = nn.Linear(shaking_hidden_size, tag_size)
# handshaking kernel
self.handshaking_kernel = HandshakingKernel(shaking_hidden_size, shaking_type, inner_enc_type)
def forward(self, input_ids):
# input_ids: (batch_size, seq_len)
# embedding: (batch_size, seq_len, emb_dim)
embedding = self.word_embeds(input_ids)
embedding = self.emb_dropout(embedding)
# lstm_outputs: (batch_size, seq_len, enc_hidden_size)
lstm_outputs, _ = self.enc_lstm(embedding)
lstm_outputs = self.rnn_dropout(lstm_outputs)
# lstm_outputs: (batch_size, seq_len, dec_hidden_size)
lstm_outputs, _ = self.dec_lstm(lstm_outputs)
lstm_outputs = self.rnn_dropout(lstm_outputs)
seq_len = lstm_outputs.size()[1]
# shaking_hiddens: (batch_size, shaking_seq_len, dec_hidden_size)
shaking_hiddens = self.handshaking_kernel(lstm_outputs)
sampled_tok_pair_indices = None
if self.training:
# randomly sample segments of token pairs
shaking_seq_len = shaking_hiddens.size()[1]
segment_len = int(shaking_seq_len * self.tok_pair_sample_rate)
seg_num = math.ceil(shaking_seq_len // segment_len)
start_ind = torch.randint(seg_num, []) * segment_len
end_ind = min(start_ind + segment_len, shaking_seq_len)
# sampled_tok_pair_indices: (batch_size, ~segment_len) ~end_ind - start_ind <= segment_len
sampled_tok_pair_indices = torch.arange(start_ind, end_ind)[None, :].repeat(shaking_hiddens.size()[0], 1)
# sampled_tok_pair_indices = torch.randint(shaking_hiddens, (shaking_hiddens.size()[0], segment_len))
sampled_tok_pair_indices = sampled_tok_pair_indices.to(shaking_hiddens.device)
# sampled_tok_pair_indices will tell model what token pairs should be fed into fcs
# shaking_hiddens: (batch_size, ~segment_len, hidden_size)
shaking_hiddens = shaking_hiddens.gather(1, sampled_tok_pair_indices[:,:,None].repeat(1, 1, shaking_hiddens.size()[-1]))
# outputs: (batch_size, segment_len, tag_size) or (batch_size, shaking_hiddens, tag_size)
outputs = self.fc(shaking_hiddens)
return outputs, sampled_tok_pair_indices
class MetricsCalculator():
def __init__(self, shaking_tagger):
self.shaking_tagger = shaking_tagger
self.last_weights = None # for exponential moving averaging
def GHM(self, gradient, bins = 10, beta = 0.9):
'''
gradient_norm: gradient_norms of all examples in this batch; (batch_size, shaking_seq_len)
'''
avg = torch.mean(gradient)
std = torch.std(gradient) + 1e-12
gradient_norm = torch.sigmoid((gradient - avg) / std) # normalization and pass through sigmoid to 0 ~ 1.
min_, max_ = torch.min(gradient_norm), torch.max(gradient_norm)
gradient_norm = (gradient_norm - min_) / (max_ - min_)
gradient_norm = torch.clamp(gradient_norm, 0, 0.9999999) # ensure elements in gradient_norm != 1.
example_sum = torch.flatten(gradient_norm).size()[0] # N
# calculate weights
current_weights = torch.zeros(bins).to(gradient.device)
hits_vec = torch.zeros(bins).to(gradient.device)
count_hits = 0 # coungradient_normof hits
for i in range(bins):
bar = float((i + 1) / bins)
hits = torch.sum((gradient_norm <= bar)) - count_hits
count_hits += hits
hits_vec[i] = hits.item()
current_weights[i] = example_sum / bins / (hits.item() + example_sum / bins )
# EMA: exponential moving averaging
# print()
# print("hits_vec: {}".format(hits_vec))
# print("current_weights: {}".format(current_weights))
if self.last_weights is None:
self.last_weights = torch.ones(bins).to(gradient.device) # init by ones
current_weights = self.last_weights * beta + (1 - beta) * current_weights
self.last_weights = current_weights
# print("ema current_weights: {}".format(current_weights))
# weights4examples: pick weights for all examples
weight_pk_idx = (gradient_norm / (1 / bins)).long()[:, :, None]
weights_rp = current_weights[None, None, :].repeat(gradient_norm.size()[0], gradient_norm.size()[1], 1)
weights4examples = torch.gather(weights_rp, -1, weight_pk_idx).squeeze(-1)
weights4examples /= torch.sum(weights4examples)
return weights4examples * gradient # return weighted gradients
# loss func
def _multilabel_categorical_crossentropy(self, y_pred, y_true, ghm = True):
"""
y_pred: (batch_size, shaking_seq_len, type_size)
y_true: (batch_size, shaking_seq_len, type_size)
y_true and y_pred have the same shape,elements in y_true are either 0 or 1,
1 tags positive classes,0 tags negtive classes(means tok-pair does not have this type of link).
"""
y_pred = (1 - 2 * y_true) * y_pred # -1 -> pos classes, 1 -> neg classes
y_pred_neg = y_pred - y_true * 1e12 # mask the pred oudtuts of pos classes
y_pred_pos = y_pred - (1 - y_true) * 1e12 # mask the pred oudtuts of neg classes
zeros = torch.zeros_like(y_pred[..., :1]) # st - st
y_pred_neg = torch.cat([y_pred_neg, zeros], dim = -1)
y_pred_pos = torch.cat([y_pred_pos, zeros], dim = -1)
neg_loss = torch.logsumexp(y_pred_neg, dim = -1)
pos_loss = torch.logsumexp(y_pred_pos, dim = -1)
if ghm:
return (self.GHM(neg_loss + pos_loss, bins = 1000)).sum()
else:
return (neg_loss + pos_loss).mean()
def loss_func(self, y_pred, y_true, ghm):
return self._multilabel_categorical_crossentropy(y_pred, y_true, ghm = ghm)
def get_sample_accuracy(self, pred, truth):
'''
计算该batch的pred与truth全等的样本比例
'''
# # (batch_size, ..., seq_len, tag_size) -> (batch_size, ..., seq_len)
# pred = torch.argmax(pred, dim = -1)
# (batch_size, ..., seq_len) -> (batch_size, seq_len)
pred = pred.view(pred.size()[0], -1)
truth = truth.view(truth.size()[0], -1)
# (batch_size, ),每个元素是pred与truth之间tag相同的数量
correct_tag_num = torch.sum(torch.eq(truth, pred).float(), dim = 1)
# seq维上所有tag必须正确,所以correct_tag_num必须等于seq的长度才算一个correct的sample
sample_acc_ = torch.eq(correct_tag_num, torch.ones_like(correct_tag_num) * truth.size()[-1]).float()
sample_acc = torch.mean(sample_acc_)
return sample_acc
def get_mark_sets_event(self, event_list):
trigger_iden_set, trigger_class_set, arg_iden_set, arg_class_set = set(), set(), set(), set()
for event in event_list:
event_type = event["trigger_type"]
trigger_offset = event["trigger_tok_span"]
trigger_iden_set.add("{}\u2E80{}".format(trigger_offset[0], trigger_offset[1]))
trigger_class_set.add("{}\u2E80{}\u2E80{}".format(event_type, trigger_offset[0], trigger_offset[1]))
for arg in event["argument_list"]:
argument_offset = arg["tok_span"]
argument_role = arg["type"]
arg_iden_set.add("{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(event_type, trigger_offset[0], trigger_offset[1], argument_offset[0], argument_offset[1]))
arg_class_set.add("{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(event_type, trigger_offset[0], trigger_offset[1], argument_offset[0], argument_offset[1], argument_role))
return trigger_iden_set, \
trigger_class_set, \
arg_iden_set, \
arg_class_set
# def get_mark_sets_rel(self, pred_rel_list, gold_rel_list, pred_ent_list, gold_ent_list, pattern = "only_head_text", gold_event_list = None):
# if pattern == "only_head_index":
# gold_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["predicate"], rel["obj_tok_span"][0]) for rel in gold_rel_list])
# pred_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["predicate"], rel["obj_tok_span"][0]) for rel in pred_rel_list])
# gold_ent_set = set(["{}\u2E80{}".format(ent["tok_span"][0], ent["type"]) for ent in gold_ent_list])
# pred_ent_set = set(["{}\u2E80{}".format(ent["tok_span"][0], ent["type"]) for ent in pred_ent_list])
# elif pattern == "whole_span":
# gold_rel_set = set(["{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["subj_tok_span"][1], rel["predicate"], rel["obj_tok_span"][0], rel["obj_tok_span"][1]) for rel in gold_rel_list])
# pred_rel_set = set(["{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["subj_tok_span"][1], rel["predicate"], rel["obj_tok_span"][0], rel["obj_tok_span"][1]) for rel in pred_rel_list])
# gold_ent_set = set(["{}\u2E80{}\u2E80{}".format(ent["tok_span"][0], ent["tok_span"][1], ent["type"]) for ent in gold_ent_list])
# pred_ent_set = set(["{}\u2E80{}\u2E80{}".format(ent["tok_span"][0], ent["tok_span"][1], ent["type"]) for ent in pred_ent_list])
# elif pattern == "whole_text":
# gold_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"], rel["predicate"], rel["object"]) for rel in gold_rel_list])
# pred_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"], rel["predicate"], rel["object"]) for rel in pred_rel_list])
# gold_ent_set = set(["{}\u2E80{}".format(ent["text"], ent["type"]) for ent in gold_ent_list])
# pred_ent_set = set(["{}\u2E80{}".format(ent["text"], ent["type"]) for ent in pred_ent_list])
# elif pattern == "only_head_text":
# gold_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"].split(" ")[0], rel["predicate"], rel["object"].split(" ")[0]) for rel in gold_rel_list])
# pred_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"].split(" ")[0], rel["predicate"], rel["object"].split(" ")[0]) for rel in pred_rel_list])
# gold_ent_set = set(["{}\u2E80{}".format(ent["text"].split(" ")[0], ent["type"]) for ent in gold_ent_list])
# pred_ent_set = set(["{}\u2E80{}".format(ent["text"].split(" ")[0], ent["type"]) for ent in pred_ent_list])
# return pred_rel_set, gold_rel_set, pred_ent_set, gold_ent_set
def get_mark_sets_rel(self, rel_list, ent_list, pattern = "only_head_text"):
if pattern == "only_head_index":
rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["predicate"], rel["obj_tok_span"][0]) for rel in rel_list])
ent_set = set(["{}\u2E80{}".format(ent["tok_span"][0], ent["type"]) for ent in ent_list])
elif pattern == "whole_span":
rel_set = set(["{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["subj_tok_span"][1], rel["predicate"], rel["obj_tok_span"][0], rel["obj_tok_span"][1]) for rel in rel_list])
ent_set = set(["{}\u2E80{}\u2E80{}".format(ent["tok_span"][0], ent["tok_span"][1], ent["type"]) for ent in ent_list])
elif pattern == "whole_text":
rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"], rel["predicate"], rel["object"]) for rel in rel_list])
ent_set = set(["{}\u2E80{}".format(ent["text"], ent["type"]) for ent in ent_list])
elif pattern == "only_head_text":
rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"].split(" ")[0], rel["predicate"], rel["object"].split(" ")[0]) for rel in rel_list])
ent_set = set(["{}\u2E80{}".format(ent["text"].split(" ")[0], ent["type"]) for ent in ent_list])
return rel_set, ent_set
def _cal_cpg(self, pred_set, gold_set, cpg):
'''
cpg is a list: [correct_num, pred_num, gold_num]
'''
for mark_str in pred_set:
if mark_str in gold_set:
cpg[0] += 1
cpg[1] += len(pred_set)
cpg[2] += len(gold_set)
def cal_rel_cpg(self, pred_rel_list, pred_ent_list, gold_rel_list, gold_ent_list, ere_cpg_dict, pattern):
'''
ere_cpg_dict = {
"rel_cpg": [0, 0, 0],
"ent_cpg": [0, 0, 0],
}
pattern: metric pattern
'''
gold_rel_set, gold_ent_set = self.get_mark_sets_rel(gold_rel_list, gold_ent_list, pattern)
pred_rel_set, pred_ent_set = self.get_mark_sets_rel(pred_rel_list, pred_ent_list, pattern)
self._cal_cpg(pred_rel_set, gold_rel_set, ere_cpg_dict["rel_cpg"])
self._cal_cpg(pred_ent_set, gold_ent_set, ere_cpg_dict["ent_cpg"])
def cal_event_cpg(self, pred_event_list, gold_event_list, ee_cpg_dict):
'''
ee_cpg_dict = {
"trigger_iden_cpg": [0, 0, 0],
"trigger_class_cpg": [0, 0, 0],
"arg_iden_cpg": [0, 0, 0],
"arg_class_cpg": [0, 0, 0],
}
'''
pred_trigger_iden_set, \
pred_trigger_class_set, \
pred_arg_iden_set, \
pred_arg_class_set = self.get_mark_sets_event(pred_event_list)
gold_trigger_iden_set, \
gold_trigger_class_set, \
gold_arg_iden_set, \
gold_arg_class_set = self.get_mark_sets_event(gold_event_list)
self._cal_cpg(pred_trigger_iden_set, gold_trigger_iden_set, ee_cpg_dict["trigger_iden_cpg"])
self._cal_cpg(pred_trigger_class_set, gold_trigger_class_set, ee_cpg_dict["trigger_class_cpg"])
self._cal_cpg(pred_arg_iden_set, gold_arg_iden_set, ee_cpg_dict["arg_iden_cpg"])
self._cal_cpg(pred_arg_class_set, gold_arg_class_set, ee_cpg_dict["arg_class_cpg"])
def get_cpg(self, sample_list,
tok2char_span_list,
batch_pred_shaking_tag,
pattern = "only_head_text"):
'''
return correct number, predict number, gold number (cpg)
'''
ee_cpg_dict = {
"trigger_iden_cpg": [0, 0, 0],
"trigger_class_cpg": [0, 0, 0],
"arg_iden_cpg": [0, 0, 0],
"arg_class_cpg": [0, 0, 0],
}
ere_cpg_dict = {
"rel_cpg": [0, 0, 0],
"ent_cpg": [0, 0, 0],
}
# go through all sentences
for ind in range(len(sample_list)):
sample = sample_list[ind]
text = sample["text"]
tok2char_span = tok2char_span_list[ind]
pred_shaking_tag = batch_pred_shaking_tag[ind]
pred_rel_list, pred_ent_list = self.shaking_tagger.decode_rel(text,
pred_shaking_tag,
tok2char_span) # decoding
gold_rel_list = sample["relation_list"]
gold_ent_list = sample["entity_list"]
if pattern == "event_extraction":
pred_event_list = self.shaking_tagger.trans2ee(pred_rel_list, pred_ent_list) # transform to event list
gold_event_list = sample["event_list"]
self.cal_event_cpg(pred_event_list, gold_event_list, ee_cpg_dict)
else:
self.cal_rel_cpg(pred_rel_list, pred_ent_list, gold_rel_list, gold_ent_list, ere_cpg_dict, pattern)
if pattern == "event_extraction":
return ee_cpg_dict
else:
return ere_cpg_dict
def get_prf_scores(self, correct_num, pred_num, gold_num):
minimini = 1e-12
precision = correct_num / (pred_num + minimini)
recall = correct_num / (gold_num + minimini)
f1 = 2 * precision * recall / (precision + recall + minimini)
return precision, recall, f1 | 37,902 | 47.59359 | 220 | py |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/tplinker_plus/train.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import json
import os
from tqdm import tqdm
import re
from IPython.core.debugger import set_trace
from pprint import pprint
import unicodedata
from transformers import BertModel, BertTokenizerFast
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
import torch.optim as optim
import glob
import time
import logging
from common.utils import Preprocessor, DefaultLogger
from tplinker_plus import (HandshakingTaggingScheme,
DataMaker4Bert,
DataMaker4BiLSTM,
TPLinkerPlusBert,
TPLinkerPlusBiLSTM,
MetricsCalculator)
import wandb
from glove import Glove
import numpy as np
import config
# In[ ]:
config = config.train_config
hyper_parameters = config["hyper_parameters"]
# In[ ]:
os.environ["TOKENIZERS_PARALLELISM"] = "true"
os.environ["CUDA_VISIBLE_DEVICES"] = str(config["device_num"])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# In[ ]:
# for reproductivity
torch.manual_seed(hyper_parameters["seed"]) # pytorch random seed
torch.backends.cudnn.deterministic = True
# In[ ]:
data_home = config["data_home"]
experiment_name = config["exp_name"]
train_data_path = os.path.join(data_home, experiment_name, config["train_data"])
valid_data_path = os.path.join(data_home, experiment_name, config["valid_data"])
rel2id_path = os.path.join(data_home, experiment_name, config["rel2id"])
ent2id_path = os.path.join(data_home, experiment_name, config["ent2id"])
# In[ ]:
if config["logger"] == "wandb":
# init wandb
wandb.init(project = experiment_name,
name = config["run_name"],
config = hyper_parameters # Initialize config
)
wandb.config.note = config["note"]
model_state_dict_dir = wandb.run.dir
logger = wandb
else:
logger = DefaultLogger(config["log_path"], experiment_name, config["run_name"], config["run_id"], hyper_parameters)
model_state_dict_dir = config["path_to_save_model"]
if not os.path.exists(model_state_dict_dir):
os.makedirs(model_state_dict_dir)
# # Load Data
# In[ ]:
train_data = json.load(open(train_data_path, "r", encoding = "utf-8"))
valid_data = json.load(open(valid_data_path, "r", encoding = "utf-8"))
# # Split
# In[ ]:
# @specific
if config["encoder"] == "BERT":
tokenizer = BertTokenizerFast.from_pretrained(config["bert_path"], add_special_tokens = False, do_lower_case = False)
tokenize = tokenizer.tokenize
get_tok2char_span_map = lambda text: tokenizer.encode_plus(text, return_offsets_mapping = True, add_special_tokens = False)["offset_mapping"]
elif config["encoder"] in {"BiLSTM", }:
tokenize = lambda text: text.split(" ")
def get_tok2char_span_map(text):
tokens = text.split(" ")
tok2char_span = []
char_num = 0
for tok in tokens:
tok2char_span.append((char_num, char_num + len(tok)))
char_num += len(tok) + 1 # +1: whitespace
return tok2char_span
# In[ ]:
preprocessor = Preprocessor(tokenize_func = tokenize,
get_tok2char_span_map_func = get_tok2char_span_map)
# In[ ]:
# train and valid max token num
max_tok_num = 0
all_data = train_data + valid_data
for sample in all_data:
tokens = tokenize(sample["text"])
max_tok_num = max(max_tok_num, len(tokens))
max_tok_num
# In[ ]:
if max_tok_num > hyper_parameters["max_seq_len"]:
train_data = preprocessor.split_into_short_samples(train_data,
hyper_parameters["max_seq_len"],
sliding_len = hyper_parameters["sliding_len"],
encoder = config["encoder"]
)
valid_data = preprocessor.split_into_short_samples(valid_data,
hyper_parameters["max_seq_len"],
sliding_len = hyper_parameters["sliding_len"],
encoder = config["encoder"]
)
# In[ ]:
print("train: {}".format(len(train_data)), "valid: {}".format(len(valid_data)))
# In[ ]:
# count_neg = 0 # 74.8% are neg samples 0.7485367594575303
# for example in train_data + valid_data:
# if len(example["relation_list"]) == 0 and len(example["entity_list"]) == 0:
# count_neg += 1
# print(count_neg/len(indexed_train_data + indexed_valid_data))
# # Tagger (Decoder)
# In[ ]:
max_seq_len = min(max_tok_num, hyper_parameters["max_seq_len"])
rel2id = json.load(open(rel2id_path, "r", encoding = "utf-8"))
ent2id = json.load(open(ent2id_path, "r", encoding = "utf-8"))
handshaking_tagger = HandshakingTaggingScheme(rel2id, max_seq_len, ent2id)
tag_size = handshaking_tagger.get_tag_size()
# In[ ]:
def sample_equal_to(sample1, sample2):
assert sample1["id"] == sample2["id"]
assert sample1["text"] == sample2["text"]
memory_set = set()
for rel in sample2["relation_list"]:
memory = "{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(rel["subject"],
rel["predicate"],
rel["object"],
*rel["subj_tok_span"],
*rel["obj_tok_span"])
memory_set.add(memory)
for rel in sample1["relation_list"]:
memory = "{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(rel["subject"],
rel["predicate"],
rel["object"],
*rel["subj_tok_span"],
*rel["obj_tok_span"])
if memory not in memory_set:
set_trace()
return False
return True
# In[ ]:
# # check tagging and decoding
# batch_size = hyper_parameters["batch_size"]
# for idx in tqdm(range(0, len(train_data), batch_size), desc = "check tagging and decoding"):
# batch_matrix_spots = []
# batch_data = train_data[idx:idx + batch_size]
# for sample in batch_data:
# matrix_spots = handshaking_tagger.get_spots(sample)
# # %timeit shaking_tagger.get_spots(sample)
# batch_matrix_spots.append(matrix_spots)
# # tagging
# # batch_shaking_tag: (batch_size, rel_id, seq_len, seq_len)
# batch_shaking_tag = handshaking_tagger.spots2shaking_tag4batch(batch_matrix_spots)
# # %timeit shaking_tagger.spots2shaking_tag4batch(batch_matrix_spots) #0.3s
# for batch_idx in range(len(batch_data)):
# gold_sample = batch_data[batch_idx]
# shaking_tag = batch_shaking_tag[batch_idx]
# # decode
# text = batch_data[batch_idx]["text"]
# tok2char_span = get_tok2char_span_map(text)
# rel_list = handshaking_tagger.decode_rel(text, shaking_tag, tok2char_span)
# pred_sample = {
# "text": text,
# "id": gold_sample["id"],
# "relation_list": rel_list,
# }
# if not sample_equal_to(pred_sample, gold_sample) or not sample_equal_to(gold_sample, pred_sample):
# set_trace()
# # Dataset
# In[ ]:
if config["encoder"] == "BERT":
tokenizer = BertTokenizerFast.from_pretrained(config["bert_path"], add_special_tokens = False, do_lower_case = False)
data_maker = DataMaker4Bert(tokenizer, handshaking_tagger)
elif config["encoder"] in {"BiLSTM", }:
token2idx_path = os.path.join(data_home, experiment_name, config["token2idx"])
token2idx = json.load(open(token2idx_path, "r", encoding = "utf-8"))
idx2token = {idx:tok for tok, idx in token2idx.items()}
def text2indices(text, max_seq_len):
input_ids = []
tokens = text.split(" ")
for tok in tokens:
if tok not in token2idx:
input_ids.append(token2idx['<UNK>'])
else:
input_ids.append(token2idx[tok])
if len(input_ids) < max_seq_len:
input_ids.extend([token2idx['<PAD>']] * (max_seq_len - len(input_ids)))
input_ids = torch.tensor(input_ids[:max_seq_len])
return input_ids
data_maker = DataMaker4BiLSTM(text2indices, get_tok2char_span_map, handshaking_tagger)
# In[ ]:
class MyDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
# In[ ]:
indexed_train_data = data_maker.get_indexed_data(train_data, max_seq_len)
indexed_valid_data = data_maker.get_indexed_data(valid_data, max_seq_len)
# In[ ]:
train_dataloader = DataLoader(MyDataset(indexed_train_data),
batch_size = hyper_parameters["batch_size"],
shuffle = True,
num_workers = 6,
drop_last = False,
collate_fn = data_maker.generate_batch,
)
valid_dataloader = DataLoader(MyDataset(indexed_valid_data),
batch_size = hyper_parameters["batch_size"],
shuffle = True,
num_workers = 6,
drop_last = False,
collate_fn = data_maker.generate_batch,
)
# In[ ]:
# # have a look at dataloader
# train_data_iter = iter(train_dataloader)
# batch_data = next(train_data_iter)
# text_id_list, text_list, batch_input_ids, \
# batch_attention_mask, batch_token_type_ids, \
# tok2char_span_list, batch_shaking_tag = batch_data
# print(text_list[0])
# print()
# print(tokenizer.decode(batch_input_ids[0].tolist()))
# print(batch_input_ids.size())
# print(batch_attention_mask.size())
# print(batch_token_type_ids.size())
# print(len(tok2char_span_list))
# print(batch_shaking_tag.size())
# # decode
# idx = 2
# print(text_list[idx])
# shaking_tag = batch_shaking_tag[idx]
# text = text_list[idx]
# tok2char_span = tok2char_span_list[idx]
# handshaking_tagger.decode_rel(text, shaking_tag, tok2char_span)
# # Model
# In[ ]:
if config["encoder"] == "BERT":
encoder = BertModel.from_pretrained(config["bert_path"])
hidden_size = encoder.config.hidden_size
rel_extractor = TPLinkerPlusBert(encoder,
tag_size,
hyper_parameters["shaking_type"],
hyper_parameters["inner_enc_type"],
hyper_parameters["tok_pair_sample_rate"]
)
elif config["encoder"] in {"BiLSTM", }:
glove = Glove()
glove = glove.load(config["pretrained_word_embedding_path"])
# prepare embedding matrix
word_embedding_init_matrix = np.random.normal(-1, 1, size=(len(token2idx), hyper_parameters["word_embedding_dim"]))
count_in = 0
# 在预训练词向量中的用该预训练向量
# 不在预训练集里的用随机向量
for ind, tok in tqdm(idx2token.items(), desc="Embedding matrix initializing..."):
if tok in glove.dictionary:
count_in += 1
word_embedding_init_matrix[ind] = glove.word_vectors[glove.dictionary[tok]]
print("{:.4f} tokens are in the pretrain word embedding matrix".format(count_in / len(idx2token))) # 命中预训练词向量的比例
word_embedding_init_matrix = torch.FloatTensor(word_embedding_init_matrix)
rel_extractor = TPLinkerPlusBiLSTM(word_embedding_init_matrix,
hyper_parameters["emb_dropout"],
hyper_parameters["enc_hidden_size"],
hyper_parameters["dec_hidden_size"],
hyper_parameters["rnn_dropout"],
tag_size,
hyper_parameters["shaking_type"],
hyper_parameters["inner_enc_type"],
hyper_parameters["tok_pair_sample_rate"],
)
rel_extractor = rel_extractor.to(device)
# In[ ]:
# # test outputs
# rel_extractor.train()
# with torch.no_grad():
# outputs, sampled_tok_pair_indices = rel_extractor(batch_input_ids.to(device),
# batch_attention_mask.to(device),
# batch_token_type_ids.to(device),
# )
# print(outputs.size())
# if rel_extractor.training:
# print(sampled_tok_pair_indices.size())
# # Metrics
# In[ ]:
metrics = MetricsCalculator(handshaking_tagger)
loss_func = lambda y_pred, y_true: metrics.loss_func(y_pred, y_true, ghm = hyper_parameters["ghm"])
# # Train
# In[ ]:
# train step
def train_step(batch_train_data, optimizer):
if config["encoder"] == "BERT":
sample_list, batch_input_ids, batch_attention_mask, batch_token_type_ids, tok2char_span_list, batch_shaking_tag = batch_train_data
batch_input_ids, batch_attention_mask, batch_token_type_ids, batch_shaking_tag = (batch_input_ids.to(device),
batch_attention_mask.to(device),
batch_token_type_ids.to(device),
batch_shaking_tag.to(device)
)
elif config["encoder"] in {"BiLSTM", }:
sample_list, batch_input_ids, tok2char_span_list, batch_shaking_tag = batch_train_data
batch_input_ids, batch_shaking_tag = (batch_input_ids.to(device),
batch_shaking_tag.to(device)
)
# zero the parameter gradients
optimizer.zero_grad()
if config["encoder"] == "BERT":
pred_small_shaking_outputs, sampled_tok_pair_indices = rel_extractor(batch_input_ids,
batch_attention_mask,
batch_token_type_ids
)
elif config["encoder"] in {"BiLSTM", }:
pred_small_shaking_outputs, sampled_tok_pair_indices = rel_extractor(batch_input_ids)
# sampled_tok_pair_indices: (batch_size, ~segment_len)
# batch_small_shaking_tag: (batch_size, ~segment_len, tag_size)
batch_small_shaking_tag = batch_shaking_tag.gather(1, sampled_tok_pair_indices[:, :, None].repeat(1, 1, tag_size))
loss = loss_func(pred_small_shaking_outputs, batch_small_shaking_tag)
# set_trace()
# t1 = time.time()
loss.backward()
optimizer.step()
# print("bp: {}".format(time.time() - t1))
pred_small_shaking_tag = (pred_small_shaking_outputs > 0.).long()
sample_acc = metrics.get_sample_accuracy(pred_small_shaking_tag,
batch_small_shaking_tag)
return loss.item(), sample_acc.item()
# valid step
def valid_step(batch_valid_data):
if config["encoder"] == "BERT":
sample_list, batch_input_ids, batch_attention_mask, batch_token_type_ids, tok2char_span_list, batch_shaking_tag = batch_valid_data
batch_input_ids, batch_attention_mask, batch_token_type_ids, batch_shaking_tag = (batch_input_ids.to(device),
batch_attention_mask.to(device),
batch_token_type_ids.to(device),
batch_shaking_tag.to(device)
)
elif config["encoder"] in {"BiLSTM", }:
sample_list, batch_input_ids, tok2char_span_list, batch_shaking_tag = batch_valid_data
batch_input_ids, batch_shaking_tag = (batch_input_ids.to(device),
batch_shaking_tag.to(device)
)
with torch.no_grad():
if config["encoder"] == "BERT":
pred_shaking_outputs, _ = rel_extractor(batch_input_ids,
batch_attention_mask,
batch_token_type_ids,
)
elif config["encoder"] in {"BiLSTM", }:
pred_shaking_outputs, _ = rel_extractor(batch_input_ids)
pred_shaking_tag = (pred_shaking_outputs > 0.).long()
sample_acc = metrics.get_sample_accuracy(pred_shaking_tag,
batch_shaking_tag)
cpg_dict = metrics.get_cpg(sample_list,
tok2char_span_list,
pred_shaking_tag,
hyper_parameters["match_pattern"])
return sample_acc.item(), cpg_dict
# In[ ]:
max_f1 = 0.
def train_n_valid(train_dataloader, dev_dataloader, optimizer, scheduler, num_epoch):
def train(dataloader, ep):
# train
rel_extractor.train()
t_ep = time.time()
total_loss, total_sample_acc = 0., 0.
for batch_ind, batch_train_data in enumerate(dataloader):
t_batch = time.time()
loss, sample_acc = train_step(batch_train_data, optimizer)
total_loss += loss
total_sample_acc += sample_acc
avg_loss = total_loss / (batch_ind + 1)
# scheduler
if hyper_parameters["scheduler"] == "ReduceLROnPlateau":
scheduler.step(avg_loss)
else:
scheduler.step()
avg_sample_acc = total_sample_acc / (batch_ind + 1)
batch_print_format = "\rproject: {}, run_name: {}, Epoch: {}/{}, batch: {}/{}, train_loss: {}, " + "t_sample_acc: {}," + "lr: {}, batch_time: {}, total_time: {} -------------"
print(batch_print_format.format(experiment_name, config["run_name"],
ep + 1, num_epoch,
batch_ind + 1, len(dataloader),
avg_loss,
avg_sample_acc,
optimizer.param_groups[0]['lr'],
time.time() - t_batch,
time.time() - t_ep,
), end="")
if config["logger"] == "wandb" and batch_ind % hyper_parameters["log_interval"] == 0:
logger.log({
"train_loss": avg_loss,
"train_small_shaking_seq_acc": avg_sample_acc,
"learning_rate": optimizer.param_groups[0]['lr'],
"time": time.time() - t_ep,
})
if config["logger"] != "wandb": # only log once for training if logger is not wandb
logger.log({
"train_loss": avg_loss,
"train_small_shaking_seq_acc": avg_sample_acc,
"learning_rate": optimizer.param_groups[0]['lr'],
"time": time.time() - t_ep,
})
def valid(dataloader, ep):
# valid
rel_extractor.eval()
t_ep = time.time()
total_sample_acc = 0.
# total_rel_correct_num, total_rel_pred_num, total_rel_gold_num = 0, 0, 0
# total_ent_correct_num, total_ent_pred_num, total_ent_gold_num = 0, 0, 0
total_cpg_dict = {}
for batch_ind, batch_valid_data in enumerate(tqdm(dataloader, desc = "Validating")):
sample_acc, cpg_dict = valid_step(batch_valid_data)
total_sample_acc += sample_acc
# init total_cpg_dict
for k in cpg_dict.keys():
if k not in total_cpg_dict:
total_cpg_dict[k] = [0, 0, 0]
for k, cpg in cpg_dict.items():
for idx, n in enumerate(cpg):
total_cpg_dict[k][idx] += cpg[idx]
# total_rel_correct_num += rel_cpg[0]
# total_rel_pred_num += rel_cpg[1]
# total_rel_gold_num += rel_cpg[2]
# total_ent_correct_num += ent_cpg[0]
# total_ent_pred_num += ent_cpg[1]
# total_ent_gold_num += ent_cpg[2]
avg_sample_acc = total_sample_acc / len(dataloader)
if "rel_cpg" in total_cpg_dict:
rel_prf = metrics.get_prf_scores(total_cpg_dict["rel_cpg"][0], total_cpg_dict["rel_cpg"][1], total_cpg_dict["rel_cpg"][2])
ent_prf = metrics.get_prf_scores(total_cpg_dict["ent_cpg"][0], total_cpg_dict["ent_cpg"][1], total_cpg_dict["ent_cpg"][2])
final_score = rel_prf[2]
log_dict = {
"val_shaking_tag_acc": avg_sample_acc,
"val_rel_prec": rel_prf[0],
"val_rel_recall": rel_prf[1],
"val_rel_f1": rel_prf[2],
"val_ent_prec": ent_prf[0],
"val_ent_recall": ent_prf[1],
"val_ent_f1": ent_prf[2],
"time": time.time() - t_ep,
}
elif "trigger_iden_cpg" in total_cpg_dict:
trigger_iden_prf = metrics.get_prf_scores(total_cpg_dict["trigger_iden_cpg"][0],
total_cpg_dict["trigger_iden_cpg"][1],
total_cpg_dict["trigger_iden_cpg"][2])
trigger_class_prf = metrics.get_prf_scores(total_cpg_dict["trigger_class_cpg"][0],
total_cpg_dict["trigger_class_cpg"][1],
total_cpg_dict["trigger_class_cpg"][2])
arg_iden_prf = metrics.get_prf_scores(total_cpg_dict["arg_iden_cpg"][0], total_cpg_dict["arg_iden_cpg"][1], total_cpg_dict["arg_iden_cpg"][2])
arg_class_prf = metrics.get_prf_scores(total_cpg_dict["arg_class_cpg"][0], total_cpg_dict["arg_class_cpg"][1], total_cpg_dict["arg_class_cpg"][2])
final_score = arg_class_prf[2]
log_dict = {
"val_shaking_tag_acc": avg_sample_acc,
"val_trigger_iden_prec": trigger_iden_prf[0],
"val_trigger_iden_recall": trigger_iden_prf[1],
"val_trigger_iden_f1": trigger_iden_prf[2],
"val_trigger_class_prec": trigger_class_prf[0],
"val_trigger_class_recall": trigger_class_prf[1],
"val_trigger_class_f1": trigger_class_prf[2],
"val_arg_iden_prec": arg_iden_prf[0],
"val_arg_iden_recall": arg_iden_prf[1],
"val_arg_iden_f1": arg_iden_prf[2],
"val_arg_class_prec": arg_class_prf[0],
"val_arg_class_recall": arg_class_prf[1],
"val_arg_class_f1": arg_class_prf[2],
"time": time.time() - t_ep,
}
logger.log(log_dict)
pprint(log_dict)
return final_score
for ep in range(num_epoch):
train(train_dataloader, ep)
valid_f1 = valid(valid_dataloader, ep)
global max_f1
if valid_f1 >= max_f1:
max_f1 = valid_f1
if valid_f1 > config["f1_2_save"]: # save the best model
modle_state_num = len(glob.glob(model_state_dict_dir + "/model_state_dict_*.pt"))
torch.save(rel_extractor.state_dict(), os.path.join(model_state_dict_dir, "model_state_dict_{}.pt".format(modle_state_num)))
# scheduler_state_num = len(glob.glob(schedule_state_dict_dir + "/scheduler_state_dict_*.pt"))
# torch.save(scheduler.state_dict(), os.path.join(schedule_state_dict_dir, "scheduler_state_dict_{}.pt".format(scheduler_state_num)))
print("Current avf_f1: {}, Best f1: {}".format(valid_f1, max_f1))
# In[ ]:
# optimizer
init_learning_rate = float(hyper_parameters["lr"])
optimizer = torch.optim.Adam(rel_extractor.parameters(), lr = init_learning_rate)
# In[ ]:
if hyper_parameters["scheduler"] == "CAWR":
T_mult = hyper_parameters["T_mult"]
rewarm_epoch_num = hyper_parameters["rewarm_epoch_num"]
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, len(train_dataloader) * rewarm_epoch_num, T_mult)
elif hyper_parameters["scheduler"] == "Step":
decay_rate = hyper_parameters["decay_rate"]
decay_steps = hyper_parameters["decay_steps"]
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size = decay_steps, gamma = decay_rate)
elif hyper_parameters["scheduler"] == "ReduceLROnPlateau":
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, "min", verbose = True, patience = 6)
# In[ ]:
if not config["fr_scratch"]:
model_state_path = config["model_state_dict_path"]
rel_extractor.load_state_dict(torch.load(model_state_path))
print("------------model state {} loaded ----------------".format(model_state_path.split("/")[-1]))
train_n_valid(train_dataloader, valid_dataloader, optimizer, scheduler, hyper_parameters["epochs"])
| 26,441 | 37.266281 | 252 | py |
netcdf4-python | netcdf4-python-master/setup.py | import os, sys, subprocess, glob
import os.path as osp
import shutil
import configparser
from setuptools import setup, Extension
from setuptools.dist import Distribution
open_kwargs = {'encoding': 'utf-8'}
def check_hdf5version(hdf5_includedir):
try:
f = open(os.path.join(hdf5_includedir, 'H5public.h'), **open_kwargs)
except OSError:
return None
hdf5_version = None
for line in f:
if line.startswith('#define H5_VERS_INFO'):
hdf5_version = line.split('"')[1]
return hdf5_version
def get_hdf5_version(direc):
# check to see if hdf5 headers in direc, return version number or None
hdf5_version = None
sys.stdout.write('checking %s ...\n' % direc)
hdf5_version = check_hdf5version(direc)
if hdf5_version is None:
sys.stdout.write('hdf5 headers not found in %s\n' % direc)
return None
else:
sys.stdout.write('%s headers found in %s\n' %
(hdf5_version,direc))
return hdf5_version
def check_ifnetcdf4(netcdf4_includedir):
try:
f = open(os.path.join(netcdf4_includedir, 'netcdf.h'), **open_kwargs)
except OSError:
return False
isnetcdf4 = False
for line in f:
if line.startswith('nc_inq_compound'):
isnetcdf4 = True
return isnetcdf4
def check_api(inc_dirs,netcdf_lib_version):
has_rename_grp = False
has_nc_inq_path = False
has_nc_inq_format_extended = False
has_cdf5_format = False
has_nc_open_mem = False
has_nc_create_mem = False
has_parallel_support = False
has_parallel4_support = False
has_pnetcdf_support = False
has_szip_support = False
has_quantize = False
has_zstandard = False
has_bzip2 = False
has_blosc = False
has_ncfilter = False
has_set_alignment = False
has_nc_rc_set = False
for d in inc_dirs:
try:
f = open(os.path.join(d, 'netcdf.h'), **open_kwargs)
except OSError:
continue
has_nc_open_mem = os.path.exists(os.path.join(d, 'netcdf_mem.h'))
has_nc_filter = os.path.exists(os.path.join(d, 'netcdf_filter.h'))
for line in f:
if line.startswith('nc_rename_grp'):
has_rename_grp = True
if line.startswith('nc_inq_path'):
has_nc_inq_path = True
if line.startswith('nc_inq_format_extended'):
has_nc_inq_format_extended = True
if line.startswith('#define NC_FORMAT_64BIT_DATA'):
has_cdf5_format = True
if line.startswith('nc_def_var_quantize'):
has_quantize = True
if line.startswith('nc_set_alignment'):
has_set_alignment = True
if line.startswith('EXTERNL int nc_rc_set'):
has_nc_rc_set = True
if has_nc_open_mem:
try:
f = open(os.path.join(d, 'netcdf_mem.h'), **open_kwargs)
except OSError:
continue
for line in f:
if line.startswith('EXTERNL int nc_create_mem'):
has_nc_create_mem = True
if has_nc_filter:
try:
f = open(os.path.join(d, 'netcdf_filter.h'), **open_kwargs)
except OSError:
continue
for line in f:
if line.startswith('EXTERNL int nc_def_var_zstandard'):
has_zstandard = True
if line.startswith('EXTERNL int nc_def_var_bzip2'):
has_bzip2 = True
if line.startswith('EXTERNL int nc_def_var_blosc'):
has_blosc = True
if line.startswith('EXTERNL int nc_inq_filter_avail'):
has_ncfilter = True
ncmetapath = os.path.join(d,'netcdf_meta.h')
if os.path.exists(ncmetapath):
for line in open(ncmetapath):
if line.startswith('#define NC_HAS_CDF5'):
try:
has_cdf5_format = bool(int(line.split()[2]))
except ValueError:
pass # keep default False if value cannot be parsed
if line.startswith('#define NC_HAS_PARALLEL'):
try:
has_parallel_support = bool(int(line.split()[2]))
except ValueError:
pass
if line.startswith('#define NC_HAS_PARALLEL4'):
try:
has_parallel4_support = bool(int(line.split()[2]))
except ValueError:
pass
if line.startswith('#define NC_HAS_PNETCDF'):
try:
has_pnetcdf_support = bool(int(line.split()[2]))
except ValueError:
pass
if line.startswith('#define NC_HAS_SZIP_WRITE'):
try:
has_szip_support = bool(int(line.split()[2]))
except ValueError:
pass
# NC_HAS_PARALLEL4 missing in 4.6.1 (issue #964)
if not has_parallel4_support and has_parallel_support and not has_pnetcdf_support:
has_parallel4_support = True
# for 4.6.1, if NC_HAS_PARALLEL=NC_HAS_PNETCDF=1, guess that
# parallel HDF5 is enabled (must guess since there is no
# NC_HAS_PARALLEL4)
elif netcdf_lib_version == "4.6.1" and not has_parallel4_support and has_parallel_support:
has_parallel4_support = True
break
return has_rename_grp, has_nc_inq_path, has_nc_inq_format_extended, \
has_cdf5_format, has_nc_open_mem, has_nc_create_mem, \
has_parallel4_support, has_pnetcdf_support, has_szip_support, has_quantize, \
has_zstandard, has_bzip2, has_blosc, has_set_alignment, has_ncfilter, \
has_nc_rc_set
def getnetcdfvers(libdirs):
"""
Get the version string for the first netcdf lib found in libdirs.
(major.minor.release). If nothing found, return None.
"""
import os, re, sys, ctypes
if sys.platform.startswith('win'):
regexp = re.compile('^netcdf.dll$')
elif sys.platform.startswith('cygwin'):
bindirs = []
for d in libdirs:
bindirs.append(os.path.dirname(d) + '/bin')
regexp = re.compile(r'^cygnetcdf-\d.dll')
elif sys.platform.startswith('darwin'):
regexp = re.compile(r'^libnetcdf.dylib')
else:
regexp = re.compile(r'^libnetcdf.so')
if sys.platform.startswith('cygwin'):
dirs = bindirs
else:
dirs = libdirs
for d in dirs:
try:
candidates = [x for x in os.listdir(d) if regexp.match(x)]
if len(candidates) != 0:
candidates.sort(
key=lambda x: len(x)) # Prefer libfoo.so to libfoo.so.X.Y.Z
path = os.path.abspath(os.path.join(d, candidates[0]))
lib = ctypes.cdll.LoadLibrary(path)
inq_libvers = lib.nc_inq_libvers
inq_libvers.restype = ctypes.c_char_p
vers = lib.nc_inq_libvers()
return vers.split()[0]
except Exception:
pass # We skip invalid entries, because that's what the C compiler does
return None
def extract_version(CYTHON_FNAME):
version = None
with open(CYTHON_FNAME) as fi:
for line in fi:
if (line.startswith('__version__')):
_, version = line.split('=')
version = version.strip()[1:-1] # Remove quotation characters.
break
return version
HDF5_dir = os.environ.get('HDF5_DIR')
HDF5_incdir = os.environ.get('HDF5_INCDIR')
HDF5_libdir = os.environ.get('HDF5_LIBDIR')
netCDF4_dir = os.environ.get('NETCDF4_DIR')
netCDF4_incdir = os.environ.get('NETCDF4_INCDIR')
netCDF4_libdir = os.environ.get('NETCDF4_LIBDIR')
szip_dir = os.environ.get('SZIP_DIR')
szip_libdir = os.environ.get('SZIP_LIBDIR')
szip_incdir = os.environ.get('SZIP_INCDIR')
hdf4_dir = os.environ.get('HDF4_DIR')
hdf4_libdir = os.environ.get('HDF4_LIBDIR')
hdf4_incdir = os.environ.get('HDF4_INCDIR')
jpeg_dir = os.environ.get('JPEG_DIR')
jpeg_libdir = os.environ.get('JPEG_LIBDIR')
jpeg_incdir = os.environ.get('JPEG_INCDIR')
curl_dir = os.environ.get('CURL_DIR')
curl_libdir = os.environ.get('CURL_LIBDIR')
curl_incdir = os.environ.get('CURL_INCDIR')
mpi_incdir = os.environ.get('MPI_INCDIR')
USE_NCCONFIG = os.environ.get('USE_NCCONFIG')
if USE_NCCONFIG is not None:
USE_NCCONFIG = bool(int(USE_NCCONFIG))
USE_SETUPCFG = os.environ.get('USE_SETUPCFG')
# override use of setup.cfg with env var.
if USE_SETUPCFG is not None:
USE_SETUPCFG = bool(int(USE_SETUPCFG))
else:
USE_SETUPCFG = True
setup_cfg = 'setup.cfg'
# contents of setup.cfg will override env vars, unless
# USE_SETUPCFG evaluates to False.
ncconfig = None
use_ncconfig = None
if USE_SETUPCFG and os.path.exists(setup_cfg):
sys.stdout.write('reading from setup.cfg...\n')
config = configparser.ConfigParser()
config.read(setup_cfg)
try:
HDF5_dir = config.get("directories", "HDF5_dir")
except:
pass
try:
HDF5_libdir = config.get("directories", "HDF5_libdir")
except:
pass
try:
HDF5_incdir = config.get("directories", "HDF5_incdir")
except:
pass
try:
netCDF4_dir = config.get("directories", "netCDF4_dir")
except:
pass
try:
netCDF4_libdir = config.get("directories", "netCDF4_libdir")
except:
pass
try:
netCDF4_incdir = config.get("directories", "netCDF4_incdir")
except:
pass
try:
szip_dir = config.get("directories", "szip_dir")
except:
pass
try:
szip_libdir = config.get("directories", "szip_libdir")
except:
pass
try:
szip_incdir = config.get("directories", "szip_incdir")
except:
pass
try:
hdf4_dir = config.get("directories", "hdf4_dir")
except:
pass
try:
hdf4_libdir = config.get("directories", "hdf4_libdir")
except:
pass
try:
hdf4_incdir = config.get("directories", "hdf4_incdir")
except:
pass
try:
jpeg_dir = config.get("directories", "jpeg_dir")
except:
pass
try:
jpeg_libdir = config.get("directories", "jpeg_libdir")
except:
pass
try:
jpeg_incdir = config.get("directories", "jpeg_incdir")
except:
pass
try:
curl_dir = config.get("directories", "curl_dir")
except:
pass
try:
curl_libdir = config.get("directories", "curl_libdir")
except:
pass
try:
curl_incdir = config.get("directories", "curl_incdir")
except:
pass
try:
mpi_incdir = config.get("directories","mpi_incdir")
except:
pass
try:
use_ncconfig = config.getboolean("options", "use_ncconfig")
except:
pass
try:
ncconfig = config.get("options", "ncconfig")
except:
pass
try:
if ncconfig is None:
if netCDF4_dir is not None:
ncconfig = os.path.join(netCDF4_dir, 'bin/nc-config')
else: # otherwise, just hope it's in the users PATH.
ncconfig = 'nc-config'
HAS_NCCONFIG = subprocess.call([ncconfig, '--libs'],
stdout=subprocess.PIPE) == 0
except OSError:
HAS_NCCONFIG = False
# make sure USE_NCCONFIG from environment takes
# precendence over use_ncconfig from setup.cfg (issue #341).
if USE_NCCONFIG is None and use_ncconfig is not None:
USE_NCCONFIG = use_ncconfig
elif USE_NCCONFIG is None:
# if nc-config exists, and USE_NCCONFIG not set, try to use it.
if HAS_NCCONFIG: USE_NCCONFIG=True
#elif USE_NCCONFIG is None:
# USE_NCCONFIG = False # don't try to use nc-config if USE_NCCONFIG not set
try:
HAS_PKG_CONFIG = subprocess.call(['pkg-config', '--libs', 'hdf5'],
stdout=subprocess.PIPE) == 0
except OSError:
HAS_PKG_CONFIG = False
def _populate_hdf5_info(dirstosearch, inc_dirs, libs, lib_dirs):
global HDF5_incdir, HDF5_dir, HDF5_libdir
nohdf5dirs = HDF5_incdir is None and HDF5_libdir is None and HDF5_dir is None
if HAS_PKG_CONFIG and nohdf5dirs:
# if HDF5 dirs not specified, and pkg-config available, use it
dep = subprocess.Popen(['pkg-config', '--cflags', 'hdf5'],
stdout=subprocess.PIPE).communicate()[0]
inc_dirs.extend([str(i[2:].decode()) for i in dep.split() if
i[0:2].decode() == '-I'])
dep = subprocess.Popen(['pkg-config', '--libs', 'hdf5'],
stdout=subprocess.PIPE).communicate()[0]
libs.extend(
[str(l[2:].decode()) for l in dep.split() if l[0:2].decode() == '-l'])
lib_dirs.extend(
[str(l[2:].decode()) for l in dep.split() if l[0:2].decode() == '-L'])
dep = subprocess.Popen(['pkg-config', '--cflags', 'hdf5'],
stdout=subprocess.PIPE).communicate()[0]
inc_dirs.extend(
[str(i[2:].decode()) for i in dep.split() if i[0:2].decode() == '-I'])
else:
if HDF5_incdir is None and HDF5_dir is None:
sys.stdout.write("""
HDF5_DIR environment variable not set, checking some standard locations ..\n""")
for direc in dirstosearch:
hdf5_version = get_hdf5_version(os.path.join(direc, 'include'))
if hdf5_version is None:
continue
else:
HDF5_dir = direc
HDF5_incdir = os.path.join(direc, 'include')
sys.stdout.write('%s found in %s\n' %
(hdf5_version,HDF5_dir))
break
if HDF5_dir is None:
raise ValueError('did not find HDF5 headers')
else:
if HDF5_incdir is None:
HDF5_incdir = os.path.join(HDF5_dir, 'include')
hdf5_version = get_hdf5_version(HDF5_incdir)
if hdf5_version is None:
raise ValueError('did not find HDF5 headers in %s' % HDF5_incdir)
else:
sys.stdout.write('%s found in %s\n' %
(hdf5_version,HDF5_dir))
if HDF5_libdir is None and HDF5_dir is not None:
HDF5_libdir = os.path.join(HDF5_dir, 'lib')
if HDF5_libdir is not None: lib_dirs.append(HDF5_libdir)
if HDF5_incdir is not None: inc_dirs.append(HDF5_incdir)
libs.extend(['hdf5_hl', 'hdf5'])
dirstosearch = []
if os.environ.get("CONDA_PREFIX"):
dirstosearch.append(os.environ["CONDA_PREFIX"]) # linux,macosx
dirstosearch.append(os.path.join(os.environ["CONDA_PREFIX"],'Library')) # windows
dirstosearch += [os.path.expanduser('~'), '/usr/local', '/sw', '/opt',
'/opt/local', '/opt/homebrew', '/usr']
# try nc-config first
if USE_NCCONFIG and HAS_NCCONFIG: # Try nc-config.
sys.stdout.write('using %s...\n' % ncconfig)
dep = subprocess.Popen([ncconfig, '--libs'],
stdout=subprocess.PIPE).communicate()[0]
libs = [str(l[2:].decode()) for l in dep.split() if l[0:2].decode() == '-l']
lib_dirs = [str(l[2:].decode()) for l in dep.split() if
l[0:2].decode() == '-L']
dep = subprocess.Popen([ncconfig, '--cflags'],
stdout=subprocess.PIPE).communicate()[0]
inc_dirs = [str(i[2:].decode()) for i in dep.split() if
i[0:2].decode() == '-I']
# check to see if hdf5 found in directories returned by nc-config
hdf5_version = None
for direc in inc_dirs:
hdf5_version = get_hdf5_version(direc)
if hdf5_version is not None:
break
# if hdf5 not found, search other standard locations (including those specified in env vars).
if hdf5_version is None:
sys.stdout.write('nc-config did provide path to HDF5 headers, search standard locations...')
_populate_hdf5_info(dirstosearch, inc_dirs, libs, lib_dirs)
# If nc-config doesn't work, fall back on brute force method.
else:
lib_dirs = []
inc_dirs = []
libs = []
# _populate_hdf5_info will use HDF5_dir, HDF5_libdir and HDF5_incdir if they are set.
# otherwise pkg-config will be tried, and if that fails, dirstosearch will be searched.
_populate_hdf5_info(dirstosearch, inc_dirs, libs, lib_dirs)
if netCDF4_incdir is None and netCDF4_dir is None:
sys.stdout.write("""
NETCDF4_DIR environment variable not set, checking standard locations.. \n""")
for direc in dirstosearch:
sys.stdout.write('checking %s ...\n' % direc)
isnetcdf4 = check_ifnetcdf4(os.path.join(direc, 'include'))
if not isnetcdf4:
continue
else:
netCDF4_dir = direc
netCDF4_incdir = os.path.join(direc, 'include')
sys.stdout.write('netCDF4 found in %s\n' % netCDF4_dir)
break
if netCDF4_dir is None:
raise ValueError('did not find netCDF version 4 headers')
else:
if netCDF4_incdir is None:
netCDF4_incdir = os.path.join(netCDF4_dir, 'include')
isnetcdf4 = check_ifnetcdf4(netCDF4_incdir)
if not isnetcdf4:
raise ValueError(
'did not find netCDF version 4 headers %s' % netCDF4_incdir)
if netCDF4_libdir is None and netCDF4_dir is not None:
netCDF4_libdir = os.path.join(netCDF4_dir, 'lib')
if sys.platform == 'win32':
libs.extend(['netcdf', 'zlib'])
else:
libs.extend(['netcdf', 'z'])
if netCDF4_libdir is not None: lib_dirs.append(netCDF4_libdir)
if netCDF4_incdir is not None: inc_dirs.append(netCDF4_incdir)
# add szip to link if desired.
if szip_libdir is None and szip_dir is not None:
szip_libdir = os.path.join(szip_dir, 'lib')
if szip_incdir is None and szip_dir is not None:
szip_incdir = os.path.join(szip_dir, 'include')
if szip_incdir is not None and szip_libdir is not None:
libs.append('sz')
lib_dirs.append(szip_libdir)
inc_dirs.append(szip_incdir)
# add hdf4 to link if desired.
if hdf4_libdir is None and hdf4_dir is not None:
hdf4_libdir = os.path.join(hdf4_dir, 'lib')
if hdf4_incdir is None and hdf4_dir is not None:
hdf4_incdir = os.path.join(hdf4_dir, 'include')
if hdf4_incdir is not None and hdf4_libdir is not None:
libs.append('mfhdf')
libs.append('df')
lib_dirs.append(hdf4_libdir)
inc_dirs.append(hdf4_incdir)
# add jpeg to link if desired.
if jpeg_libdir is None and jpeg_dir is not None:
jpeg_libdir = os.path.join(jpeg_dir, 'lib')
if jpeg_incdir is None and jpeg_dir is not None:
jpeg_incdir = os.path.join(jpeg_dir, 'include')
if jpeg_incdir is not None and jpeg_libdir is not None:
libs.append('jpeg')
lib_dirs.append(jpeg_libdir)
inc_dirs.append(jpeg_incdir)
# add curl to link if desired.
if curl_libdir is None and curl_dir is not None:
curl_libdir = os.path.join(curl_dir, 'lib')
if curl_incdir is None and curl_dir is not None:
curl_incdir = os.path.join(curl_dir, 'include')
if curl_incdir is not None and curl_libdir is not None:
libs.append('curl')
lib_dirs.append(curl_libdir)
inc_dirs.append(curl_incdir)
if sys.platform == 'win32':
runtime_lib_dirs = []
else:
runtime_lib_dirs = lib_dirs
# Do not require numpy for just querying the package
# Taken from the h5py setup file.
if any('--' + opt in sys.argv for opt in Distribution.display_option_names +
['help-commands', 'help']) or sys.argv[1] == 'egg_info':
pass
else:
# append numpy include dir.
import numpy
inc_dirs.append(numpy.get_include())
# get netcdf library version.
netcdf_lib_version = getnetcdfvers(lib_dirs)
if netcdf_lib_version is None:
sys.stdout.write('unable to detect netcdf library version\n')
else:
netcdf_lib_version = str(netcdf_lib_version)
sys.stdout.write('using netcdf library version %s\n' % netcdf_lib_version)
cmdclass = {}
DEFINE_MACROS = [("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")]
netcdf4_src_root = osp.join(osp.join('src','netCDF4'), '_netCDF4')
netcdf4_src_c = netcdf4_src_root + '.c'
netcdf4_src_pyx = netcdf4_src_root + '.pyx'
if 'sdist' not in sys.argv[1:] and 'clean' not in sys.argv[1:] and '--version' not in sys.argv[1:]:
sys.stdout.write('using Cython to compile netCDF4.pyx...\n')
# remove _netCDF4.c file if it exists, so cython will recompile _netCDF4.pyx.
# run for build *and* install (issue #263). Otherwise 'pip install' will
# not regenerate _netCDF4.c, even if the C lib supports the new features.
if len(sys.argv) >= 2:
if os.path.exists(netcdf4_src_c):
os.remove(netcdf4_src_c)
# this determines whether renameGroup and filepath methods will work.
has_rename_grp, has_nc_inq_path, has_nc_inq_format_extended, \
has_cdf5_format, has_nc_open_mem, has_nc_create_mem, \
has_parallel4_support, has_pnetcdf_support, has_szip_support, has_quantize, \
has_zstandard, has_bzip2, has_blosc, has_set_alignment, has_ncfilter, has_nc_rc_set = \
check_api(inc_dirs,netcdf_lib_version)
# for netcdf 4.4.x CDF5 format is always enabled.
if netcdf_lib_version is not None and\
(netcdf_lib_version > "4.4" and netcdf_lib_version < "4.5"):
has_cdf5_format = True
# disable parallel support if mpi4py not available.
#try:
# import mpi4py
#except ImportError:
# f.write('disabling mpi parallel support because mpi4py not found\n')
# has_parallel4_support = False
# has_pnetcdf_support = False
f = open(osp.join('include', 'constants.pyx'), 'w')
if has_rename_grp:
sys.stdout.write('netcdf lib has group rename capability\n')
f.write('DEF HAS_RENAME_GRP = 1\n')
else:
sys.stdout.write('netcdf lib does not have group rename capability\n')
f.write('DEF HAS_RENAME_GRP = 0\n')
if has_nc_inq_path:
sys.stdout.write('netcdf lib has nc_inq_path function\n')
f.write('DEF HAS_NC_INQ_PATH = 1\n')
else:
sys.stdout.write('netcdf lib does not have nc_inq_path function\n')
f.write('DEF HAS_NC_INQ_PATH = 0\n')
if has_nc_inq_format_extended:
sys.stdout.write('netcdf lib has nc_inq_format_extended function\n')
f.write('DEF HAS_NC_INQ_FORMAT_EXTENDED = 1\n')
else:
sys.stdout.write(
'netcdf lib does not have nc_inq_format_extended function\n')
f.write('DEF HAS_NC_INQ_FORMAT_EXTENDED = 0\n')
if has_nc_open_mem:
sys.stdout.write('netcdf lib has nc_open_mem function\n')
f.write('DEF HAS_NC_OPEN_MEM = 1\n')
else:
sys.stdout.write('netcdf lib does not have nc_open_mem function\n')
f.write('DEF HAS_NC_OPEN_MEM = 0\n')
if has_nc_create_mem:
sys.stdout.write('netcdf lib has nc_create_mem function\n')
f.write('DEF HAS_NC_CREATE_MEM = 1\n')
else:
sys.stdout.write('netcdf lib does not have nc_create_mem function\n')
f.write('DEF HAS_NC_CREATE_MEM = 0\n')
if has_cdf5_format:
sys.stdout.write('netcdf lib has cdf-5 format capability\n')
f.write('DEF HAS_CDF5_FORMAT = 1\n')
else:
sys.stdout.write('netcdf lib does not have cdf-5 format capability\n')
f.write('DEF HAS_CDF5_FORMAT = 0\n')
if has_parallel4_support:
sys.stdout.write('netcdf lib has netcdf4 parallel functions\n')
f.write('DEF HAS_PARALLEL4_SUPPORT = 1\n')
else:
sys.stdout.write('netcdf lib does not have netcdf4 parallel functions\n')
f.write('DEF HAS_PARALLEL4_SUPPORT = 0\n')
if has_pnetcdf_support:
sys.stdout.write('netcdf lib has pnetcdf parallel functions\n')
f.write('DEF HAS_PNETCDF_SUPPORT = 1\n')
else:
sys.stdout.write('netcdf lib does not have pnetcdf parallel functions\n')
f.write('DEF HAS_PNETCDF_SUPPORT = 0\n')
if has_quantize:
sys.stdout.write('netcdf lib has bit-grooming/quantization functions\n')
f.write('DEF HAS_QUANTIZATION_SUPPORT = 1\n')
else:
sys.stdout.write('netcdf lib does not have bit-grooming/quantization functions\n')
f.write('DEF HAS_QUANTIZATION_SUPPORT = 0\n')
if has_zstandard:
sys.stdout.write('netcdf lib has zstandard compression functions\n')
f.write('DEF HAS_ZSTANDARD_SUPPORT = 1\n')
else:
sys.stdout.write('netcdf lib does not have zstandard compression functions\n')
f.write('DEF HAS_ZSTANDARD_SUPPORT = 0\n')
if has_bzip2:
sys.stdout.write('netcdf lib has bzip2 compression functions\n')
f.write('DEF HAS_BZIP2_SUPPORT = 1\n')
else:
sys.stdout.write('netcdf lib does not have bzip2 compression functions\n')
f.write('DEF HAS_BZIP2_SUPPORT = 0\n')
if has_blosc:
sys.stdout.write('netcdf lib has blosc compression functions\n')
f.write('DEF HAS_BLOSC_SUPPORT = 1\n')
else:
sys.stdout.write('netcdf lib does not have blosc compression functions\n')
f.write('DEF HAS_BLOSC_SUPPORT = 0\n')
if has_szip_support:
sys.stdout.write('netcdf lib has szip compression functions\n')
f.write('DEF HAS_SZIP_SUPPORT = 1\n')
else:
sys.stdout.write('netcdf lib does not have szip compression functions\n')
f.write('DEF HAS_SZIP_SUPPORT = 0\n')
if has_set_alignment:
sys.stdout.write('netcdf lib has nc_set_alignment function\n')
f.write('DEF HAS_SET_ALIGNMENT = 1\n')
else:
sys.stdout.write('netcdf lib does not have nc_set_alignment function\n')
f.write('DEF HAS_SET_ALIGNMENT = 0\n')
if has_ncfilter:
sys.stdout.write('netcdf lib has nc_inq_filter_avail function\n')
f.write('DEF HAS_NCFILTER = 1\n')
else:
sys.stdout.write('netcdf lib does not have nc_inq_filter_avail function\n')
f.write('DEF HAS_NCFILTER = 0\n')
if has_nc_rc_set:
sys.stdout.write('netcdf lib has nc_rc_set function\n')
f.write('DEF HAS_NCRCSET = 1\n')
else:
sys.stdout.write('netcdf lib does not have nc_rc_set function\n')
f.write('DEF HAS_NCRCSET = 0\n')
f.close()
if has_parallel4_support or has_pnetcdf_support:
import mpi4py
inc_dirs.append(mpi4py.get_include())
# mpi_incdir should not be needed if using nc-config
# (should be included in nc-config --cflags)
if mpi_incdir is not None: inc_dirs.append(mpi_incdir)
ext_modules = [Extension("netCDF4._netCDF4",
[netcdf4_src_pyx],
define_macros=DEFINE_MACROS,
libraries=libs,
library_dirs=lib_dirs,
include_dirs=inc_dirs + ['include'],
runtime_library_dirs=runtime_lib_dirs)]
# set language_level directive to 3
for e in ext_modules:
e.cython_directives = {'language_level': "3"} #
else:
ext_modules = None
# if NETCDF_PLUGIN_DIR set, install netcdf-c compression plugins inside package
# (should point to location of lib__nc* files built by netcdf-c)
copied_plugins=False
if os.environ.get("NETCDF_PLUGIN_DIR"):
plugin_dir = os.environ.get("NETCDF_PLUGIN_DIR")
plugins = glob.glob(os.path.join(plugin_dir, "lib__nc*"))
if not plugins:
sys.stdout.write('no plugin files in NETCDF_PLUGIN_DIR, not installing..\n')
data_files = []
else:
data_files = plugins
sys.stdout.write('installing netcdf compression plugins from %s ...\n' % plugin_dir)
sofiles = [os.path.basename(sofilepath) for sofilepath in data_files]
sys.stdout.write(repr(sofiles)+'\n')
if 'sdist' not in sys.argv[1:] and 'clean' not in sys.argv[1:] and '--version' not in sys.argv[1:]:
for f in data_files:
shutil.copy(f, osp.join(os.getcwd(),osp.join(osp.join('src','netCDF4'),'plugins')))
copied_plugins=True
else:
sys.stdout.write('NETCDF_PLUGIN_DIR not set, no netcdf compression plugins installed\n')
data_files = []
# See pyproject.toml for project metadata
setup(
name="netCDF4", # need by GitHub dependency graph
version=extract_version(netcdf4_src_pyx),
ext_modules=ext_modules,
)
# remove plugin files copied from outside source tree
if copied_plugins:
for f in sofiles:
filepath = osp.join(osp.join(osp.join('src','netCDF4'),'plugins'),f)
if os.path.exists(filepath):
os.remove(filepath)
| 28,981 | 37.285337 | 107 | py |
netcdf4-python | netcdf4-python-master/checkversion.py | import netCDF4, numpy
print('netcdf4-python version: %s'%netCDF4.__version__)
print('HDF5 lib version: %s'%netCDF4.__hdf5libversion__)
print('netcdf lib version: %s'%netCDF4.__netcdf4libversion__)
print('numpy version %s' % numpy.__version__)
| 263 | 43 | 65 | py |
netcdf4-python | netcdf4-python-master/examples/bench_diskless.py | # benchmark reads and writes, with and without compression.
# tests all four supported file formats.
from numpy.random.mtrand import uniform
import netCDF4
from timeit import Timer
import os, sys
# create an n1dim by n2dim by n3dim random array.
n1dim = 30
n2dim = 15
n3dim = 73
n4dim = 144
ntrials = 10
sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim))
array = uniform(size=(n1dim,n2dim,n3dim,n4dim))
def write_netcdf(filename,zlib=False,least_significant_digit=None,format='NETCDF4',closeit=False):
file = netCDF4.Dataset(filename,'w',format=format,diskless=True,persist=True)
file.createDimension('n1', n1dim)
file.createDimension('n2', n2dim)
file.createDimension('n3', n3dim)
file.createDimension('n4', n4dim)
foo = file.createVariable('data',\
'f8',('n1','n2','n3','n4'),zlib=zlib,least_significant_digit=None)
foo.testme="hi I am an attribute"
foo.testme1="hi I am an attribute"
foo.testme2="hi I am an attribute"
foo.testme3="hi I am an attribute"
foo.testme4="hi I am an attribute"
foo.testme5="hi I am an attribute"
foo[:] = array
if closeit: file.close()
return file
def read_netcdf(ncfile):
data = ncfile.variables['data'][:]
for format in ['NETCDF4','NETCDF3_CLASSIC','NETCDF3_64BIT']:
sys.stdout.write('testing file format %s ...\n' % format)
# writing, no compression.
t = Timer("write_netcdf('test1.nc',closeit=True,format='%s')" % format,"from __main__ import write_netcdf")
sys.stdout.write('writing took %s seconds\n' %\
repr(sum(t.repeat(ntrials,1))/ntrials))
# test reading.
ncfile = write_netcdf('test1.nc',format=format)
t = Timer("read_netcdf(ncfile)","from __main__ import read_netcdf,ncfile")
sys.stdout.write('reading took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
# test diskless=True in nc_open
format='NETCDF3_CLASSIC'
trials=50
sys.stdout.write('test caching of file in memory on open for %s\n' % format)
sys.stdout.write('testing file format %s ...\n' % format)
write_netcdf('test1.nc',format=format,closeit=True)
ncfile = netCDF4.Dataset('test1.nc',diskless=False)
t = Timer("read_netcdf(ncfile)","from __main__ import read_netcdf,ncfile")
sys.stdout.write('reading (from disk) took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
ncfile.close()
ncfile = netCDF4.Dataset('test1.nc',diskless=True)
# setting diskless=True should cache the file in memory,
# resulting in faster reads.
t = Timer("read_netcdf(ncfile)","from __main__ import read_netcdf,ncfile")
sys.stdout.write('reading (cached in memory) took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
ncfile.close()
| 2,750 | 39.455882 | 111 | py |
netcdf4-python | netcdf4-python-master/examples/bench_compress2.py | # benchmark reads and writes, with and without compression.
# tests all four supported file formats.
from numpy.random.mtrand import uniform
import netCDF4
from timeit import Timer
import os, sys
# create an n1dim by n2dim by n3dim random array.
n1dim = 30
n2dim = 15
n3dim = 73
n4dim = 144
ntrials = 10
sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim))
sys.stdout.write('(average of %s trials)\n\n' % ntrials)
array = uniform(size=(n1dim,n2dim,n3dim,n4dim))
def write_netcdf(filename,complevel,lsd):
file = netCDF4.Dataset(filename,'w',format='NETCDF4')
file.createDimension('n1', n1dim)
file.createDimension('n2', n2dim)
file.createDimension('n3', n3dim)
file.createDimension('n4', n4dim)
foo = file.createVariable('data',\
'f8',('n1','n2','n3','n4'),\
zlib=True,shuffle=True,complevel=complevel,\
least_significant_digit=lsd)
foo[:] = array
file.close()
def read_netcdf(filename):
file = netCDF4.Dataset(filename)
data = file.variables['data'][:]
file.close()
lsd = None
sys.stdout.write('using least_significant_digit %s\n\n' % lsd)
for complevel in range(0,10,2):
sys.stdout.write('testing compression with complevel %s...\n' % complevel)
# writing.
t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf")
sys.stdout.write('writing took %s seconds\n' %\
repr(sum(t.repeat(ntrials,1))/ntrials))
# test reading.
t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf")
sys.stdout.write('reading took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
# print out size of resulting files.
sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size))
complevel = 4
sys.stdout.write('\nusing complevel %s\n\n' % complevel)
for lsd in range(1,6):
sys.stdout.write('testing compression with least_significant_digit %s...\n' % lsd)
# writing.
t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf")
sys.stdout.write('writing took %s seconds\n' %\
repr(sum(t.repeat(ntrials,1))/ntrials))
# test reading.
t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf")
sys.stdout.write('reading took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
# print out size of resulting files.
sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size))
| 2,604 | 38.469697 | 106 | py |
netcdf4-python | netcdf4-python-master/examples/test_stringarr.py | from netCDF4 import Dataset, stringtochar, chartostring
import random, numpy
# test utilities for converting arrays of fixed-length strings
# to arrays of characters (with an extra dimension), and vice-versa.
# netCDF does not have a fixed-length string data-type (only characters
# and variable length strings). The convenience function chartostring
# converts an array of characters to an array of fixed-length strings.
# The array of fixed length strings has one less dimension, and the
# length of the strings is equal to the rightmost dimension of the
# array of characters. The convenience function stringtochar goes
# the other way, converting an array of fixed-length strings to an
# array of characters with an extra dimension (the number of characters
# per string) appended on the right.
FILE_NAME = 'tst_stringarr.nc'
FILE_FORMAT = 'NETCDF4_CLASSIC'
chars = '1234567890aabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
nc = Dataset(FILE_NAME,'w',format=FILE_FORMAT)
n2 = 10; nchar = 12; nrecs = 4
nc.createDimension('n1',None)
nc.createDimension('n2',n2)
nc.createDimension('nchar',nchar)
v = nc.createVariable('strings','S1',('n1','n2','nchar'))
for nrec in range(nrecs):
data = []
data = numpy.empty((n2,),'S'+repr(nchar))
# fill data with random nchar character strings
for n in range(n2):
data[n] = ''.join([random.choice(chars) for i in range(nchar)])
print(nrec,data)
# convert data to array of characters with an extra dimension
# (the number of characters per string) added to the right.
datac = stringtochar(data)
v[nrec] = datac
nc.close()
nc = Dataset(FILE_NAME)
v = nc.variables['strings']
print(v.shape, v.dtype)
for nrec in range(nrecs):
# read character array back, convert to an array of strings
# of length equal to the rightmost dimension.
print(nrec, chartostring(v[nrec]))
nc.close()
| 1,883 | 37.44898 | 73 | py |
netcdf4-python | netcdf4-python-master/examples/tutorial.py | from netCDF4 import Dataset
# code from tutorial.
# create a file (Dataset object, also the root group).
rootgrp = Dataset('test.nc', 'w', format='NETCDF4')
print(rootgrp.file_format)
rootgrp.close()
# create some groups.
rootgrp = Dataset('test.nc', 'a')
fcstgrp = rootgrp.createGroup('forecasts')
analgrp = rootgrp.createGroup('analyses')
fcstgrp1 = rootgrp.createGroup('/forecasts/model1')
fcstgrp2 = rootgrp.createGroup('/forecasts/model2')
# walk the group tree using a Python generator.
def walktree(top):
yield top.groups.values()
for value in top.groups.values():
yield from walktree(value)
print(rootgrp)
for children in walktree(rootgrp):
for child in children:
print(child)
# dimensions.
level = rootgrp.createDimension('level', None)
time = rootgrp.createDimension('time', None)
lat = rootgrp.createDimension('lat', 73)
lon = rootgrp.createDimension('lon', 144)
print(rootgrp.dimensions)
print(len(lon))
print(lon.isunlimited())
print(time.isunlimited())
for dimobj in rootgrp.dimensions.values():
print(dimobj)
print(time)
# variables.
times = rootgrp.createVariable('time','f8',('time',))
levels = rootgrp.createVariable('level','i4',('level',))
latitudes = rootgrp.createVariable('lat','f4',('lat',))
longitudes = rootgrp.createVariable('lon','f4',('lon',))
# 2 unlimited dimensions.
#temp = rootgrp.createVariable('temp','f4',('time','level','lat','lon',))
# this makes the compression 'lossy' (preserving a precision of 1/1000)
# try it and see how much smaller the file gets.
temp = rootgrp.createVariable('temp','f4',('time','level','lat','lon',),least_significant_digit=3)
print(temp)
# create variable in a group using a path.
temp = rootgrp.createVariable('/forecasts/model1/temp','f4',('time','level','lat','lon',))
print(rootgrp['/forecasts/model1']) # print the Group instance
print(rootgrp['/forecasts/model1/temp']) # print the Variable instance
# attributes.
import time
rootgrp.description = 'bogus example script'
rootgrp.history = 'Created ' + time.ctime(time.time())
rootgrp.source = 'netCDF4 python module tutorial'
latitudes.units = 'degrees north'
longitudes.units = 'degrees east'
levels.units = 'hPa'
temp.units = 'K'
times.units = 'hours since 0001-01-01 00:00:00.0'
times.calendar = 'gregorian'
for name in rootgrp.ncattrs():
print('Global attr', name, '=', getattr(rootgrp,name))
print(rootgrp)
print(rootgrp.__dict__)
print(rootgrp.variables)
import numpy as np
# no unlimited dimension, just assign to slice.
lats = np.arange(-90,91,2.5)
lons = np.arange(-180,180,2.5)
latitudes[:] = lats
longitudes[:] = lons
print('latitudes =\n',latitudes[:])
print('longitudes =\n',longitudes[:])
# append along two unlimited dimensions by assigning to slice.
nlats = len(rootgrp.dimensions['lat'])
nlons = len(rootgrp.dimensions['lon'])
print('temp shape before adding data = ',temp.shape)
from numpy.random.mtrand import uniform # random number generator.
temp[0:5,0:10,:,:] = uniform(size=(5,10,nlats,nlons))
print('temp shape after adding data = ',temp.shape)
# levels have grown, but no values yet assigned.
print('levels shape after adding pressure data = ',levels.shape)
# assign values to levels dimension variable.
levels[:] = [1000.,850.,700.,500.,300.,250.,200.,150.,100.,50.]
# fancy slicing
tempdat = temp[::2, [1,3,6], lats>0, lons>0]
print('shape of fancy temp slice = ',tempdat.shape)
print(temp[0, 0, [0,1,2,3], [0,1,2,3]].shape)
# fill in times.
from datetime import datetime, timedelta
from netCDF4 import num2date, date2num, date2index
dates = [datetime(2001,3,1)+n*timedelta(hours=12) for n in range(temp.shape[0])]
times[:] = date2num(dates,units=times.units,calendar=times.calendar)
print("time values (in units {}):\n{}".format(times.units, times[:]))
dates = num2date(times[:],units=times.units,calendar=times.calendar)
print("dates corresponding to time values:\n{}".format(dates))
rootgrp.close()
# create a series of netCDF files with a variable sharing
# the same unlimited dimension.
for nfile in range(10):
f = Dataset('mftest'+repr(nfile)+'.nc','w',format='NETCDF4_CLASSIC')
f.createDimension('x',None)
x = f.createVariable('x','i',('x',))
x[0:10] = np.arange(nfile*10,10*(nfile+1))
f.close()
# now read all those files in at once, in one Dataset.
from netCDF4 import MFDataset
f = MFDataset('mftest*nc')
print(f.variables['x'][:])
# example showing how to save numpy complex arrays using compound types.
f = Dataset('complex.nc','w')
size = 3 # length of 1-d complex array
# create sample complex data.
datac = np.exp(1j*(1.+np.linspace(0, np.pi, size)))
print(datac.dtype)
# create complex128 compound data type.
complex128 = np.dtype([('real',np.float64),('imag',np.float64)])
complex128_t = f.createCompoundType(complex128,'complex128')
# create a variable with this data type, write some data to it.
f.createDimension('x_dim',None)
v = f.createVariable('cmplx_var',complex128_t,'x_dim')
data = np.empty(size,complex128) # numpy structured array
data['real'] = datac.real; data['imag'] = datac.imag
v[:] = data
# close and reopen the file, check the contents.
f.close()
f = Dataset('complex.nc')
print(f)
print(f.variables['cmplx_var'])
print(f.cmptypes)
print(f.cmptypes['complex128'])
v = f.variables['cmplx_var']
print(v.shape)
datain = v[:] # read in all the data into a numpy structured array
# create an empty numpy complex array
datac2 = np.empty(datain.shape,np.complex128)
# .. fill it with contents of structured array.
datac2.real = datain['real']
datac2.imag = datain['imag']
print(datac.dtype,datac)
print(datac2.dtype,datac2)
# more complex compound type example.
f = Dataset('compound_example.nc','w') # create a new dataset.
# create an unlimited dimension call 'station'
f.createDimension('station',None)
# define a compound data type (can contain arrays, or nested compound types).
winddtype = np.dtype([('speed','f4'),('direction','i4')])
statdtype = np.dtype([('latitude', 'f4'), ('longitude', 'f4'),
('surface_wind',winddtype),
('temp_sounding','f4',10),('press_sounding','i4',10),
('location_name','S12')])
# use this data type definitions to create a compound data types
# called using the createCompoundType Dataset method.
# create a compound type for vector wind which will be nested inside
# the station data type. This must be done first!
wind_data_t = f.createCompoundType(winddtype,'wind_data')
# now that wind_data_t is defined, create the station data type.
station_data_t = f.createCompoundType(statdtype,'station_data')
# create nested compound data types to hold the units variable attribute.
winddtype_units = np.dtype([('speed','S12'),('direction','S12')])
statdtype_units = np.dtype([('latitude', 'S12'), ('longitude', 'S12'),
('surface_wind',winddtype_units),
('temp_sounding','S12'),
('location_name','S12'),
('press_sounding','S12')])
# create the wind_data_units type first, since it will nested inside
# the station_data_units data type.
wind_data_units_t = f.createCompoundType(winddtype_units,'wind_data_units')
station_data_units_t =\
f.createCompoundType(statdtype_units,'station_data_units')
# create a variable of of type 'station_data_t'
statdat = f.createVariable('station_obs', station_data_t, ('station',))
# create a numpy structured array, assign data to it.
data = np.empty(1,statdtype)
data['latitude'] = 40.
data['longitude'] = -105.
data['surface_wind']['speed'] = 12.5
data['surface_wind']['direction'] = 270
data['temp_sounding'] = (280.3,272.,270.,269.,266.,258.,254.1,250.,245.5,240.)
data['press_sounding'] = range(800,300,-50)
data['location_name'] = 'Boulder, CO'
# assign structured array to variable slice.
statdat[0] = data
# or just assign a tuple of values to variable slice
# (will automatically be converted to a structured array).
statdat[1] = np.array((40.78,-73.99,(-12.5,90),
(290.2,282.5,279.,277.9,276.,266.,264.1,260.,255.5,243.),
range(900,400,-50),'New York, NY'),data.dtype)
print(f.cmptypes)
windunits = np.empty(1,winddtype_units)
stationobs_units = np.empty(1,statdtype_units)
windunits['speed'] = 'm/s'
windunits['direction'] = 'degrees'
stationobs_units['latitude'] = 'degrees N'
stationobs_units['longitude'] = 'degrees W'
stationobs_units['surface_wind'] = windunits
stationobs_units['location_name'] = 'None'
stationobs_units['temp_sounding'] = 'Kelvin'
stationobs_units['press_sounding'] = 'hPa'
print(stationobs_units.dtype)
statdat.units = stationobs_units
# close and reopen the file.
f.close()
f = Dataset('compound_example.nc')
print(f)
statdat = f.variables['station_obs']
print(statdat)
# print out data in variable.
print('data in a variable of compound type:')
print(statdat[:])
f.close()
f = Dataset('tst_vlen.nc','w')
vlen_t = f.createVLType(np.int32, 'phony_vlen')
x = f.createDimension('x',3)
y = f.createDimension('y',4)
vlvar = f.createVariable('phony_vlen_var', vlen_t, ('y','x'))
import random
data = np.empty(len(y)*len(x),object)
for n in range(len(y)*len(x)):
data[n] = np.arange(random.randint(1,10),dtype='int32')+1
data = np.reshape(data,(len(y),len(x)))
vlvar[:] = data
print(vlvar)
print('vlen variable =\n',vlvar[:])
print(f)
print(f.variables['phony_vlen_var'])
print(f.vltypes['phony_vlen'])
z = f.createDimension('z', 10)
strvar = f.createVariable('strvar',str,'z')
chars = '1234567890aabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
data = np.empty(10,object)
for n in range(10):
stringlen = random.randint(2,12)
data[n] = ''.join([random.choice(chars) for i in range(stringlen)])
strvar[:] = data
print('variable-length string variable:\n',strvar[:])
print(f)
print(f.variables['strvar'])
f.close()
# Enum type example.
f = Dataset('clouds.nc','w')
# python dict describing the allowed values and their names.
enum_dict = {'Altocumulus': 7, 'Missing': 255, 'Stratus': 2, 'Clear': 0,
'Nimbostratus': 6, 'Cumulus': 4, 'Altostratus': 5, 'Cumulonimbus': 1,
'Stratocumulus': 3}
# create the Enum type called 'cloud_t'.
cloud_type = f.createEnumType(np.uint8,'cloud_t',enum_dict)
print(cloud_type)
time = f.createDimension('time',None)
# create a 1d variable of type 'cloud_type' called 'primary_clouds'.
# The fill_value is set to the 'Missing' named value.
cloud_var = f.createVariable('primary_cloud',cloud_type,'time',\
fill_value=enum_dict['Missing'])
# write some data to the variable.
cloud_var[:] = [enum_dict['Clear'],enum_dict['Stratus'],enum_dict['Cumulus'],\
enum_dict['Missing'],enum_dict['Cumulonimbus']]
# close file, reopen it.
f.close()
f = Dataset('clouds.nc')
cloud_var = f.variables['primary_cloud']
print(cloud_var)
print(cloud_var.datatype.enum_dict)
print(cloud_var[:])
f.close()
# dealing with strings
from netCDF4 import stringtochar
nc = Dataset('stringtest.nc','w',format='NETCDF4_CLASSIC')
nc.createDimension('nchars',3)
nc.createDimension('nstrings',None)
v = nc.createVariable('strings','S1',('nstrings','nchars'))
datain = np.array(['foo','bar'],dtype='S3')
v[:] = stringtochar(datain) # manual conversion to char array
print(v[:]) # data returned as char array
v._Encoding = 'ascii' # this enables automatic conversion
v[:] = datain # conversion to char array done internally
print(v[:]) # data returned in numpy string array
nc.close()
# strings in compound types
nc = Dataset('compoundstring_example.nc','w')
dtype = np.dtype([('observation', 'f4'),
('station_name','S12')])
station_data_t = nc.createCompoundType(dtype,'station_data')
nc.createDimension('station',None)
statdat = nc.createVariable('station_obs', station_data_t, ('station',))
data = np.empty(2,station_data_t.dtype_view)
data['observation'][:] = (123.,3.14)
data['station_name'][:] = ('Boulder','New York')
print(statdat.dtype) # strings actually stored as character arrays
statdat[:] = data # strings converted to character arrays internally
print(statdat[:]) # character arrays converted back to strings
print(statdat[:].dtype)
statdat.set_auto_chartostring(False) # turn off auto-conversion
statdat[:] = data.view(station_data_t.dtype)
print(statdat[:]) # now structured array with char array subtype is returned
nc.close()
# create a diskless (in-memory) Dataset, and persist the file
# to disk when it is closed.
nc = Dataset('diskless_example.nc','w',diskless=True,persist=True)
d = nc.createDimension('x',None)
v = nc.createVariable('v',np.int32,'x')
v[0:5] = np.arange(5)
print(nc)
print(nc['v'][:])
nc.close() # file saved to disk
# create an in-memory dataset from an existing python memory
# buffer.
# read the newly created netcdf file into a python bytes object.
f = open('diskless_example.nc', 'rb')
nc_bytes = f.read(); f.close()
# create a netCDF in-memory dataset from the bytes object.
nc = Dataset('inmemory.nc', memory=nc_bytes)
print(nc)
print(nc['v'][:])
nc.close()
# create an in-memory Dataset and retrieve memory buffer
# estimated size is 1028 bytes - this is actually only
# used if format is NETCDF3 (ignored for NETCDF4/HDF5 files).
nc = Dataset('inmemory.nc', mode='w',memory=1028)
d = nc.createDimension('x',None)
v = nc.createVariable('v',np.int32,'x')
v[0:5] = np.arange(5)
nc_buf = nc.close() # close returns memoryview
print(type(nc_buf))
# save nc_buf to disk, read it back in and check.
f = open('inmemory.nc', 'wb')
f.write(nc_buf); f.close()
nc = Dataset('inmemory.nc')
print(nc)
print(nc['v'][:])
nc.close()
| 13,476 | 36.645251 | 98 | py |
netcdf4-python | netcdf4-python-master/examples/json_att.py | from netCDF4 import Dataset
import json
# example showing how python objects (lists, dicts, None, True)
# can be serialized as strings, saved as netCDF attributes,
# and then converted back to python objects using json.
ds = Dataset('json.nc', 'w')
ds.pythonatt1 = json.dumps(['foo', {'bar': ['baz', None, 1.0, 2]}])
ds.pythonatt2 = "true" # converted to bool
ds.pythonatt3 = "null" # converted to None
print(ds)
ds.close()
ds = Dataset('json.nc')
def convert_json(s):
try:
a = json.loads(s)
return a
except:
return s
x = convert_json(ds.pythonatt1)
print(type(x))
print(x)
print(convert_json(ds.pythonatt2))
print(convert_json(ds.pythonatt3))
ds.close()
| 688 | 26.56 | 68 | py |
netcdf4-python | netcdf4-python-master/examples/threaded_read.py | from netCDF4 import Dataset
from numpy.testing import assert_array_equal, assert_array_almost_equal
import numpy as np
import threading
import queue
import time
# demonstrate reading of different files from different threads.
# Releasing the Global Interpreter Lock (GIL) when calling the
# netcdf C library for read operations speeds up the reads
# when threads are used (issue 369).
# Test script contributed by Ryan May of Unidata.
# Make some files
nfiles = 4
fnames = []; datal = []
for i in range(nfiles):
fname = 'test%d.nc' % i
fnames.append(fname)
nc = Dataset(fname, 'w')
data = np.random.randn(500, 500, 500)
datal.append(data)
nc.createDimension('x', 500)
nc.createDimension('y', 500)
nc.createDimension('z', 500)
var = nc.createVariable('grid', 'f', ('x', 'y', 'z'))
var[:] = data
nc.close()
# Queue them up
items = queue.Queue()
for data,fname in zip(datal,fnames):
items.put(fname)
# Function for threads to use
def get_data(serial=None):
if serial is None: # if not called from a thread
fname = items.get()
else:
fname = fnames[serial]
nc = Dataset(fname, 'r')
data2 = nc.variables['grid'][:]
# make sure the data is correct
#assert_array_almost_equal(data2,datal[int(fname[4])])
nc.close()
if serial is None:
items.task_done()
# Time it (no threading).
start = time.time()
for i in range(nfiles):
get_data(serial=i)
end = time.time()
print('no threads, time = ',end - start)
# with threading.
start = time.time()
for i in range(nfiles):
threading.Thread(target=get_data).start()
items.join()
end = time.time()
print('with threading, time = ',end - start)
| 1,690 | 25.84127 | 71 | py |
netcdf4-python | netcdf4-python-master/examples/mpi_example_compressed.py | # to run: mpirun -np 4 python mpi_example_compressed.py
import sys
from mpi4py import MPI
import numpy as np
from netCDF4 import Dataset
rank = MPI.COMM_WORLD.rank # The process ID (integer 0-3 for 4-process run)
nc = Dataset('parallel_test_compressed.nc', 'w', parallel=True)
d = nc.createDimension('dim',4)
v = nc.createVariable('var', np.int32, 'dim', zlib=True)
v[:] = np.arange(4)
nc.close()
# read compressed files in parallel, check the data, try to rewrite some data
nc = Dataset('parallel_test_compressed.nc', 'a', parallel=True)
v = nc['var']
assert rank==v[rank]
v.set_collective(True) # issue #1108 (var must be in collective mode or write will fail)
v[rank]=2*rank
nc.close()
| 690 | 35.368421 | 88 | py |
netcdf4-python | netcdf4-python-master/examples/bench.py | # benchmark reads and writes, with and without compression.
# tests all four supported file formats.
from numpy.random.mtrand import uniform
import netCDF4
from timeit import Timer
import os, sys
# create an n1dim by n2dim by n3dim random array.
n1dim = 30
n2dim = 15
n3dim = 73
n4dim = 144
ntrials = 10
sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim))
array = uniform(size=(n1dim,n2dim,n3dim,n4dim))
def write_netcdf(filename,zlib=False,least_significant_digit=None,format='NETCDF4'):
file = netCDF4.Dataset(filename,'w',format=format)
file.createDimension('n1', n1dim)
file.createDimension('n2', n2dim)
file.createDimension('n3', n3dim)
file.createDimension('n4', n4dim)
foo = file.createVariable('data', 'f8',('n1','n2','n3','n4'),zlib=zlib,least_significant_digit=least_significant_digit)
foo[:] = array
file.close()
def read_netcdf(filename):
file = netCDF4.Dataset(filename)
data = file.variables['data'][:]
file.close()
for format in ['NETCDF3_CLASSIC','NETCDF3_64BIT','NETCDF4_CLASSIC','NETCDF4']:
sys.stdout.write('testing file format %s ...\n' % format)
# writing, no compression.
t = Timer("write_netcdf('test1.nc',format='%s')" % format,"from __main__ import write_netcdf")
sys.stdout.write('writing took %s seconds\n' %\
repr(sum(t.repeat(ntrials,1))/ntrials))
# test reading.
t = Timer("read_netcdf('test1.nc')","from __main__ import read_netcdf")
sys.stdout.write('reading took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
| 1,607 | 37.285714 | 123 | py |
netcdf4-python | netcdf4-python-master/examples/bench_compress3.py | # benchmark reads and writes, with and without compression.
# tests all four supported file formats.
from numpy.random.mtrand import uniform
import netCDF4
from timeit import Timer
import os, sys
# use real data.
URL="http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis/pressure/hgt.1990.nc"
nc = netCDF4.Dataset(URL)
# use real 500 hPa geopotential height data.
n1dim = 100
n3dim = 73
n4dim = 144
ntrials = 10
sys.stdout.write('reading and writing a %s by %s by %s random array ..\n'%(n1dim,n3dim,n4dim))
sys.stdout.write('(average of %s trials)\n\n' % ntrials)
print(nc)
print(nc.variables['hgt'])
array = nc.variables['hgt'][0:n1dim,5,:,:]
print(array.min(), array.max(), array.shape, array.dtype)
def write_netcdf(filename,complevel,lsd):
file = netCDF4.Dataset(filename,'w',format='NETCDF4')
file.createDimension('n1', None)
file.createDimension('n3', n3dim)
file.createDimension('n4', n4dim)
foo = file.createVariable('data',\
'f4',('n1','n3','n4'),\
zlib=True,shuffle=True,complevel=complevel,\
least_significant_digit=lsd)
foo[:] = array
file.close()
def read_netcdf(filename):
file = netCDF4.Dataset(filename)
data = file.variables['data'][:]
file.close()
lsd = None
sys.stdout.write('using least_significant_digit %s\n\n' % lsd)
for complevel in range(0,10,2):
sys.stdout.write('testing compression with complevel %s...\n' % complevel)
# writing.
t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf")
sys.stdout.write('writing took %s seconds\n' %\
repr(sum(t.repeat(ntrials,1))/ntrials))
# test reading.
t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf")
sys.stdout.write('reading took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
# print out size of resulting files.
sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size))
complevel = 4
complevel = 4
sys.stdout.write('\nusing complevel %s\n\n' % complevel)
for lsd in range(0,6):
sys.stdout.write('testing compression with least_significant_digit %s..\n'\
% lsd)
# writing.
t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf")
sys.stdout.write('writing took %s seconds\n' %\
repr(sum(t.repeat(ntrials,1))/ntrials))
# test reading.
t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf")
sys.stdout.write('reading took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
# print out size of resulting files.
sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size))
| 2,788 | 37.205479 | 100 | py |
netcdf4-python | netcdf4-python-master/examples/bench_compress.py | # benchmark reads and writes, with and without compression.
# tests all four supported file formats.
from numpy.random.mtrand import uniform
import netCDF4
from timeit import Timer
import os, sys
# create an n1dim by n2dim by n3dim random array.
n1dim = 30
n2dim = 15
n3dim = 73
n4dim = 144
ntrials = 10
sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim))
sys.stdout.write('(average of %s trials)\n' % ntrials)
array = netCDF4.utils._quantize(uniform(size=(n1dim,n2dim,n3dim,n4dim)),4)
def write_netcdf(filename,zlib=False,shuffle=False,complevel=6):
file = netCDF4.Dataset(filename,'w',format='NETCDF4')
file.createDimension('n1', n1dim)
file.createDimension('n2', n2dim)
file.createDimension('n3', n3dim)
file.createDimension('n4', n4dim)
foo = file.createVariable('data',\
'f8',('n1','n2','n3','n4'),zlib=zlib,shuffle=shuffle,complevel=complevel)
foo[:] = array
file.close()
def read_netcdf(filename):
file = netCDF4.Dataset(filename)
data = file.variables['data'][:]
file.close()
for compress_kwargs in ["zlib=False,shuffle=False","zlib=True,shuffle=False",
"zlib=True,shuffle=True","zlib=True,shuffle=True,complevel=2"]:
sys.stdout.write('testing compression %s...\n' % repr(compress_kwargs))
# writing.
t = Timer("write_netcdf('test.nc',%s)" % compress_kwargs,"from __main__ import write_netcdf")
sys.stdout.write('writing took %s seconds\n' %\
repr(sum(t.repeat(ntrials,1))/ntrials))
# test reading.
t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf")
sys.stdout.write('reading took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
# print out size of resulting files.
sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size))
| 1,896 | 38.520833 | 106 | py |
netcdf4-python | netcdf4-python-master/examples/subset.py | # use 'orthogonal indexing' feature to subselect data over CONUS.
import netCDF4
import numpy as np
import matplotlib.pyplot as plt
# use real data from CFS reanalysis.
# note: we're reading GRIB2 data!
URL="http://nomads.ncdc.noaa.gov/thredds/dodsC/modeldata/cmd_flxf/2010/201007/20100701/flxf00.gdas.2010070100.grb2"
nc = netCDF4.Dataset(URL)
lats = nc.variables['lat'][:]; lons = nc.variables['lon'][:]
latselect = np.logical_and(lats>25,lats<50)
lonselect = np.logical_and(lons>230,lons<305)
data = nc.variables['Soil_moisture_content'][0,0,latselect,lonselect]
plt.contourf(data[::-1]) # flip latitudes so they go south -> north
plt.show()
| 647 | 39.5 | 115 | py |
netcdf4-python | netcdf4-python-master/examples/mpi_example.py | # to run: mpirun -np 4 python mpi_example.py
import sys
from mpi4py import MPI
import numpy as np
from netCDF4 import Dataset
if len(sys.argv) == 2:
format = sys.argv[1]
else:
format = 'NETCDF4_CLASSIC'
rank = MPI.COMM_WORLD.rank # The process ID (integer 0-3 for 4-process run)
if rank == 0:
print('Creating file with format {}'.format(format))
nc = Dataset('parallel_test.nc', 'w', parallel=True, comm=MPI.COMM_WORLD,
info=MPI.Info(),format=format)
# below should work also - MPI_COMM_WORLD and MPI_INFO_NULL will be used.
#nc = Dataset('parallel_test.nc', 'w', parallel=True)
d = nc.createDimension('dim',4)
v = nc.createVariable('var', np.int32, 'dim')
v[rank] = rank
# switch to collective mode, rewrite the data.
v.set_collective(True)
v[rank] = rank
nc.close()
# reopen the file read-only, check the data
nc = Dataset('parallel_test.nc', parallel=True, comm=MPI.COMM_WORLD,
info=MPI.Info())
assert rank==nc['var'][rank]
nc.close()
# reopen the file in append mode, modify the data on the last rank.
nc = Dataset('parallel_test.nc', 'a',parallel=True, comm=MPI.COMM_WORLD,
info=MPI.Info())
if rank == 3: v[rank] = 2*rank
nc.close()
# reopen the file read-only again, check the data.
# leave out the comm and info kwargs to check that the defaults
# (MPI_COMM_WORLD and MPI_INFO_NULL) work.
nc = Dataset('parallel_test.nc', parallel=True)
if rank == 3:
assert 2*rank==nc['var'][rank]
else:
assert rank==nc['var'][rank]
nc.close()
| 1,471 | 33.232558 | 76 | py |
netcdf4-python | netcdf4-python-master/examples/bench_compress4.py | # benchmark reads and writes, with and without compression.
# tests all four supported file formats.
from numpy.random.mtrand import uniform
import netCDF4
from timeit import Timer
import os, sys
# use real data.
URL="http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis/pressure/hgt.1990.nc"
nc = netCDF4.Dataset(URL)
# use real 500 hPa geopotential height data.
n1dim = 100
n3dim = 73
n4dim = 144
ntrials = 10
sys.stdout.write('reading and writing a %s by %s by %s random array ..\n'%(n1dim,n3dim,n4dim))
sys.stdout.write('(average of %s trials)\n\n' % ntrials)
array = nc.variables['hgt'][0:n1dim,5,:,:]
def write_netcdf(filename,nsd,quantize_mode='BitGroom'):
file = netCDF4.Dataset(filename,'w',format='NETCDF4')
file.createDimension('n1', None)
file.createDimension('n3', n3dim)
file.createDimension('n4', n4dim)
foo = file.createVariable('data',\
'f4',('n1','n3','n4'),\
zlib=True,shuffle=True,\
quantize_mode=quantize_mode,\
significant_digits=nsd)
foo[:] = array
file.close()
def read_netcdf(filename):
file = netCDF4.Dataset(filename)
data = file.variables['data'][:]
file.close()
for sigdigits in range(1,5,1):
sys.stdout.write('testing compression with significant_digits=%s...\n' %\
sigdigits)
write_netcdf('test.nc',sigdigits)
read_netcdf('test.nc')
# print out size of resulting files with standard quantization.
sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size))
sys.stdout.write("testing compression with significant_digits=%s and 'GranularBitRound'...\n" %\
sigdigits)
write_netcdf('test.nc',sigdigits,quantize_mode='GranularBitRound')
read_netcdf('test.nc')
# print out size of resulting files with alternate quantization.
sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size))
| 1,988 | 36.528302 | 100 | py |
netcdf4-python | netcdf4-python-master/src/netCDF4/utils.py | import sys
import numpy as np
from numpy import ma
from numpy.lib.stride_tricks import as_strided
import warnings
import getopt
import os
try:
bytes
except NameError:
# no bytes type in python < 2.6
bytes = str
def _safecast(a,b):
# check to see if array a can be safely cast
# to array b. A little less picky than numpy.can_cast.
try:
is_safe = ((a == b) | (np.isnan(a) & np.isnan(b))).all()
#is_safe = np.allclose(a, b, equal_nan=True) # numpy 1.10.0
except:
try:
is_safe = (a == b).all() # string arrays.
except:
is_safe = False
return is_safe
def _sortbylist(A,B):
# sort one list (A) using the values from another list (B)
return [A[i] for i in sorted(range(len(A)), key=B.__getitem__)]
def _find_dim(grp, dimname):
# find Dimension instance given group and name.
# look in current group, and parents.
group = grp
dim = None
while 1:
try:
dim = group.dimensions[dimname]
break
except:
try:
group = group.parent
except:
raise ValueError("cannot find dimension %s in this group or parent groups" % dimname)
if dim is None:
raise KeyError("dimension %s not defined in group %s or any group in it's family tree" % (dimname, grp.path))
else:
return dim
def _walk_grps(topgrp):
"""Iterate through all (sub-) groups of topgrp, similar to os.walktree.
"""
yield topgrp.groups.values()
for grp in topgrp.groups.values():
yield from _walk_grps(grp)
def _quantize(data,least_significant_digit):
"""
quantize data to improve compression. data is quantized using
around(scale*data)/scale, where scale is 2**bits, and bits is determined
from the least_significant_digit. For example, if
least_significant_digit=1, bits will be 4.
"""
precision = pow(10.,-least_significant_digit)
exp = np.log10(precision)
if exp < 0:
exp = int(np.floor(exp))
else:
exp = int(np.ceil(exp))
bits = np.ceil(np.log2(pow(10.,-exp)))
scale = pow(2.,bits)
datout = np.around(scale*data)/scale
if ma.isMA(datout):
datout.set_fill_value(data.fill_value)
return datout
else:
return datout
def _StartCountStride(elem, shape, dimensions=None, grp=None, datashape=None,\
put=False, use_get_vars = True):
"""Return start, count, stride and indices needed to store/extract data
into/from a netCDF variable.
This function is used to convert a slicing expression into a form that is
compatible with the nc_get_vars function. Specifically, it needs
to interpret integers, slices, Ellipses, and 1-d sequences of integers
and booleans.
Numpy uses "broadcasting indexing" to handle array-valued indices.
"Broadcasting indexing" (a.k.a "fancy indexing") treats all multi-valued
indices together to allow arbitrary points to be extracted. The index
arrays can be multidimensional, and more than one can be specified in a
slice, as long as they can be "broadcast" against each other.
This style of indexing can be very powerful, but it is very hard
to understand, explain, and implement (and can lead to hard to find bugs).
Most other python packages and array processing
languages (such as netcdf4-python, xray, biggus, matlab and fortran)
use "orthogonal indexing" which only allows for 1-d index arrays and
treats these arrays of indices independently along each dimension.
The implementation of "orthogonal indexing" used here requires that
index arrays be 1-d boolean or integer. If integer arrays are used,
the index values must be sorted and contain no duplicates.
In summary, slicing netcdf4-python variable objects with 1-d integer or
boolean arrays is allowed, but may give a different result than slicing a
numpy array.
Numpy also supports slicing an array with a boolean array of the same
shape. For example x[x>0] returns a 1-d array with all the positive values of x.
This is also not supported in netcdf4-python, if x.ndim > 1.
Orthogonal indexing can be used in to select netcdf variable slices
using the dimension variables. For example, you can use v[lat>60,lon<180]
to fetch the elements of v obeying conditions on latitude and longitude.
Allow for this sort of simple variable subsetting is the reason we decided to
deviate from numpy's slicing rules.
This function is used both by the __setitem__ and __getitem__ method of
the Variable class.
Parameters
----------
elem : tuple of integer, slice, ellipsis or 1-d boolean or integer
sequences used to slice the netCDF Variable (Variable[elem]).
shape : tuple containing the current shape of the netCDF variable.
dimensions : sequence
The name of the dimensions.
__setitem__.
grp : netCDF Group
The netCDF group to which the variable being set belongs to.
datashape : sequence
The shape of the data that is being stored. Only needed by __setitem__
put : True|False (default False). If called from __setitem__, put is True.
Returns
-------
start : ndarray (..., n)
A starting indices array of dimension n+1. The first n
dimensions identify different independent data chunks. The last dimension
can be read as the starting indices.
count : ndarray (..., n)
An array of dimension (n+1) storing the number of elements to get.
stride : ndarray (..., n)
An array of dimension (n+1) storing the steps between each datum.
indices : ndarray (..., n)
An array storing the indices describing the location of the
data chunk in the target/source array (__getitem__/__setitem__).
Notes:
netCDF data is accessed via the function:
nc_get_vars(grpid, varid, start, count, stride, data)
Assume that the variable has dimension n, then
start is a n-tuple that contains the indices at the beginning of data chunk.
count is a n-tuple that contains the number of elements to be accessed.
stride is a n-tuple that contains the step length between each element.
"""
# Adapted from pycdf (http://pysclint.sourceforge.net/pycdf)
# by Andre Gosselin..
# Modified by David Huard to handle efficiently fancy indexing with
# sequences of integers or booleans.
nDims = len(shape)
if nDims == 0:
nDims = 1
shape = (1,)
# is there an unlimited dimension? (only defined for __setitem__)
if put:
hasunlim = False
unlimd={}
if dimensions:
for i in range(nDims):
dimname = dimensions[i]
# is this dimension unlimited?
# look in current group, and parents for dim.
dim = _find_dim(grp, dimname)
unlimd[dimname]=dim.isunlimited()
if unlimd[dimname]:
hasunlim = True
else:
hasunlim = False
# When a single array or (non-tuple) sequence of integers is given
# as a slice, assume it applies to the first dimension,
# and use ellipsis for remaining dimensions.
if np.iterable(elem):
if type(elem) == np.ndarray or (type(elem) != tuple and \
np.array([_is_int(e) for e in elem]).all()):
elem = [elem]
for n in range(len(elem)+1,nDims+1):
elem.append(slice(None,None,None))
else: # Convert single index to sequence
elem = [elem]
# ensure there is at most 1 ellipse
# we cannot use elem.count(Ellipsis), as with fancy indexing would occur
# np.array() == Ellipsis which gives ValueError: The truth value of an
# array with more than one element is ambiguous. Use a.any() or a.all()
if sum(1 for e in elem if e is Ellipsis) > 1:
raise IndexError("At most one ellipsis allowed in a slicing expression")
# replace boolean arrays with sequences of integers.
newElem = []
IndexErrorMsg=\
"only integers, slices (`:`), ellipsis (`...`), and 1-d integer or boolean arrays are valid indices"
i=0
for e in elem:
# string-like object try to cast to int
# needs to be done first, since strings are iterable and
# hard to distinguish from something castable to an iterable numpy array.
if type(e) in [str, bytes]:
try:
e = int(e)
except:
raise IndexError(IndexErrorMsg)
ea = np.asarray(e)
# Raise error if multidimensional indexing is used.
if ea.ndim > 1:
raise IndexError("Index cannot be multidimensional")
# set unlim to True if dimension is unlimited and put==True
# (called from __setitem__)
if hasunlim and put and dimensions:
try:
dimname = dimensions[i]
unlim = unlimd[dimname]
except IndexError: # more slices than dimensions (issue 371)
unlim = False
else:
unlim = False
# convert boolean index to integer array.
if np.iterable(ea) and ea.dtype.kind =='b':
# check that boolean array not too long
if not unlim and shape[i] != len(ea):
msg="""
Boolean array must have the same shape as the data along this dimension."""
raise IndexError(msg)
ea = np.flatnonzero(ea)
# an iterable (non-scalar) integer array.
if np.iterable(ea) and ea.dtype.kind == 'i':
# convert negative indices in 1d array to positive ones.
ea = np.where(ea < 0, ea + shape[i], ea)
if np.any(ea < 0):
raise IndexError("integer index out of range")
# if unlim, let integer index be longer than current dimension
# length.
if ea.shape != (0,):
elen = shape[i]
if unlim:
elen = max(ea.max()+1,elen)
if ea.max()+1 > elen:
msg="integer index exceeds dimension size"
raise IndexError(msg)
newElem.append(ea)
# integer scalar
elif ea.dtype.kind == 'i':
newElem.append(e)
# slice or ellipsis object
elif type(e) == slice or type(e) == type(Ellipsis):
if not use_get_vars and type(e) == slice and e.step not in [None,-1,1] and\
dimensions is not None and grp is not None:
# convert strided slice to integer sequence if possible
# (this will avoid nc_get_vars, which is slow - issue #680).
start = e.start if e.start is not None else 0
step = e.step
if e.stop is None and dimensions is not None and grp is not None:
stop = len(_find_dim(grp, dimensions[i]))
else:
stop = e.stop
if stop < 0:
stop = len(_find_dim(grp, dimensions[i])) + stop
try:
ee = np.arange(start,stop,e.step)
if len(ee) > 0:
e = ee
except:
pass
newElem.append(e)
else: # castable to a scalar int, otherwise invalid
try:
e = int(e)
newElem.append(e)
except:
raise IndexError(IndexErrorMsg)
if type(e)==type(Ellipsis):
i+=1+nDims-len(elem)
else:
i+=1
elem = newElem
# replace Ellipsis and integer arrays with slice objects, if possible.
newElem = []
for e in elem:
ea = np.asarray(e)
# Replace ellipsis with slices.
if type(e) == type(Ellipsis):
# The ellipsis stands for the missing dimensions.
newElem.extend((slice(None, None, None),) * (nDims - len(elem) + 1))
# Replace sequence of indices with slice object if possible.
elif np.iterable(e) and len(e) > 1:
start = e[0]
stop = e[-1]+1
step = e[1]-e[0]
try:
ee = range(start,stop,step)
except ValueError: # start, stop or step is not valid for a range
ee = False
if ee and len(e) == len(ee) and (e == np.arange(start,stop,step)).all():
# don't convert to slice unless abs(stride) == 1
# (nc_get_vars is very slow, issue #680)
if not use_get_vars and step not in [1,-1]:
newElem.append(e)
else:
newElem.append(slice(start,stop,step))
else:
newElem.append(e)
elif np.iterable(e) and len(e) == 1:
newElem.append(slice(e[0], e[0] + 1, 1))
else:
newElem.append(e)
elem = newElem
# If slice doesn't cover all dims, assume ellipsis for rest of dims.
if len(elem) < nDims:
for n in range(len(elem)+1,nDims+1):
elem.append(slice(None,None,None))
# make sure there are not too many dimensions in slice.
if len(elem) > nDims:
raise ValueError("slicing expression exceeds the number of dimensions of the variable")
# Compute the dimensions of the start, count, stride and indices arrays.
# The number of elements in the first n dimensions corresponds to the
# number of times the _get method will be called.
sdim = []
for i, e in enumerate(elem):
# at this stage e is a slice, a scalar integer, or a 1d integer array.
# integer array: _get call for each True value
if np.iterable(e):
sdim.append(len(e))
# Scalar int or slice, just a single _get call
else:
sdim.append(1)
# broadcast data shape when assigned to full variable (issue #919)
try:
fullslice = elem.count(slice(None,None,None)) == len(elem)
except: # fails if elem contains a numpy array.
fullslice = False
if fullslice and datashape and put and not hasunlim:
datashape = broadcasted_shape(shape, datashape)
# pad datashape with zeros for dimensions not being sliced (issue #906)
# only used when data covers slice over subset of dimensions
if datashape and len(datashape) != len(elem) and\
len(datashape) == sum(1 for e in elem if type(e) == slice):
datashapenew = (); i=0
for e in elem:
if type(e) != slice and not np.iterable(e): # scalar integer slice
datashapenew = datashapenew + (0,)
else: # slice object
datashapenew = datashapenew + (datashape[i],)
i+=1
datashape = datashapenew
# Create the start, count, stride and indices arrays.
sdim.append(max(nDims, 1))
start = np.empty(sdim, dtype=np.intp)
count = np.empty(sdim, dtype=np.intp)
stride = np.empty(sdim, dtype=np.intp)
indices = np.empty(sdim, dtype=object)
for i, e in enumerate(elem):
ea = np.asarray(e)
# set unlim to True if dimension is unlimited and put==True
# (called from __setitem__). Note: grp and dimensions must be set.
if hasunlim and put and dimensions:
dimname = dimensions[i]
unlim = unlimd[dimname]
else:
unlim = False
# SLICE #
if type(e) == slice:
# determine length parameter for slice.indices.
# shape[i] can be zero for unlim dim that hasn't been written to
# yet.
# length of slice may be longer than current shape
# if dimension is unlimited (and we are writing, not reading).
if unlim and e.stop is not None and e.stop > shape[i]:
length = e.stop
elif unlim and e.stop is None and datashape != ():
try:
if e.start is None:
length = datashape[i]
else:
length = e.start+datashape[i]
except IndexError:
raise IndexError("shape of data does not conform to slice")
else:
if unlim and datashape == () and len(dim) == 0:
# writing scalar along unlimited dimension using slicing
# syntax (var[:] = 1, when var.shape = ())
length = 1
else:
length = shape[i]
beg, end, inc = e.indices(length)
n = len(range(beg,end,inc))
start[...,i] = beg
count[...,i] = n
stride[...,i] = inc
indices[...,i] = slice(None)
# ITERABLE #
elif np.iterable(e) and np.array(e).dtype.kind in 'i': # Sequence of integers
start[...,i] = np.apply_along_axis(lambda x: e*x, i, np.ones(sdim[:-1]))
indices[...,i] = np.apply_along_axis(lambda x: np.arange(sdim[i])*x, i, np.ones(sdim[:-1], int))
count[...,i] = 1
stride[...,i] = 1
# all that's left is SCALAR INTEGER #
else:
if e >= 0:
start[...,i] = e
elif e < 0 and (-e <= shape[i]) :
start[...,i] = e+shape[i]
else:
raise IndexError("Index out of range")
count[...,i] = 1
stride[...,i] = 1
indices[...,i] = -1 # Use -1 instead of 0 to indicate that
# this dimension shall be squeezed.
return start, count, stride, indices#, out_shape
def _out_array_shape(count):
"""Return the output array shape given the count array created by getStartCountStride"""
s = list(count.shape[:-1])
out = []
for i, n in enumerate(s):
if n == 1 and count.size > 0:
c = count[..., i].ravel()[0] # All elements should be identical.
out.append(c)
else:
out.append(n)
return out
def _is_container(a):
# is object container-like? (can test for
# membership with "is in", but not a string)
try: 1 in a
except: return False
if type(a) == type(basestring): return False
return True
def _is_int(a):
try:
return int(a) == a
except:
return False
def _tostr(s):
try:
ss = str(s)
except:
ss = s
return ss
def _getgrp(g,p):
import posixpath
grps = p.split("/")
for gname in grps:
if gname == "": continue
g = g.groups[gname]
return g
def ncinfo():
from netCDF4 import Dataset
usage = """
Print summary information about a netCDF file.
usage: %s [-h/--help] [-g grp or --group=grp] [-v var or --variable=var] [-d dim or --dimension=dim] filename
-h/--help -- Print usage message.
-g <group name> or --group=<group name> -- Print info for this group
(default is root group). Nested groups specified
using posix paths ("group1/group2/group3").
-v <variable name> or --variable=<variable name> -- Print info for this variable.
-d <dimension name> or --dimension=<dimension name> -- Print info for this dimension.
netcdf filename must be last argument.
\n""" % os.path.basename(sys.argv[0])
try:
opts, pargs = getopt.getopt(sys.argv[1:],'hv:g:d:',
['group=',
'variable=',
'dimension='])
except:
(type, value, traceback) = sys.exc_info()
sys.stdout.write("Error parsing the options. The error was: %s\n" % value)
sys.stderr.write(usage)
sys.exit(0)
# Get the options
group = None; var = None; dim=None
for option in opts:
if option[0] == '-h' or option[0] == '--help':
sys.stderr.write(usage)
sys.exit(0)
elif option[0] == '--group' or option[0] == '-g':
group = option[1]
elif option[0] == '--variable' or option[0] == '-v':
var = option[1]
elif option[0] == '--dimension' or option[0] == '-d':
dim = option[1]
else:
sys.stdout.write("%s: Unrecognized option\n" % option[0])
sys.stderr.write(usage)
sys.exit(0)
# filename passed as last argument
try:
filename = pargs[-1]
except IndexError:
sys.stdout.write("You need to pass netcdf filename!\n.")
sys.stderr.write(usage)
sys.exit(0)
f = Dataset(filename)
if group is None:
if var is None and dim is None:
print(f)
else:
if var is not None:
print(f.variables[var])
if dim is not None:
print(f.dimensions[dim])
else:
if var is None and dim is None:
print(_getgrp(f,group))
else:
g = _getgrp(f,group)
if var is not None:
print(g.variables[var])
if dim is not None:
print(g.dimensions[var])
f.close()
def _nc4tonc3(filename4,filename3,clobber=False,nchunk=10,quiet=False,format='NETCDF3_64BIT'):
"""convert a netcdf 4 file (filename4) in NETCDF4_CLASSIC format
to a netcdf 3 file (filename3) in NETCDF3_64BIT format."""
from netCDF4 import Dataset
ncfile4 = Dataset(filename4,'r')
if ncfile4.file_format != 'NETCDF4_CLASSIC':
raise OSError('input file must be in NETCDF4_CLASSIC format')
ncfile3 = Dataset(filename3,'w',clobber=clobber,format=format)
# create dimensions. Check for unlimited dim.
unlimdimname = False
unlimdim = None
# create global attributes.
if not quiet: sys.stdout.write('copying global attributes ..\n')
#for attname in ncfile4.ncattrs():
# setattr(ncfile3,attname,getattr(ncfile4,attname))
ncfile3.setncatts(ncfile4.__dict__)
if not quiet: sys.stdout.write('copying dimensions ..\n')
for dimname,dim in ncfile4.dimensions.items():
if dim.isunlimited():
unlimdimname = dimname
unlimdim = dim
ncfile3.createDimension(dimname,None)
else:
ncfile3.createDimension(dimname,len(dim))
# create variables.
for varname,ncvar in ncfile4.variables.items():
if not quiet:
sys.stdout.write('copying variable %s\n' % varname)
# is there an unlimited dimension?
if unlimdimname and unlimdimname in ncvar.dimensions:
hasunlimdim = True
else:
hasunlimdim = False
if hasattr(ncvar, '_FillValue'):
FillValue = ncvar._FillValue
else:
FillValue = None
var = ncfile3.createVariable(varname,ncvar.dtype,ncvar.dimensions,fill_value=FillValue)
# fill variable attributes.
attdict = ncvar.__dict__
if '_FillValue' in attdict:
del attdict['_FillValue']
var.setncatts(attdict)
#for attname in ncvar.ncattrs():
# if attname == '_FillValue': continue
# setattr(var,attname,getattr(ncvar,attname))
# fill variables with data.
if hasunlimdim: # has an unlim dim, loop over unlim dim index.
# range to copy
if nchunk:
start = 0; stop = len(unlimdim); step = nchunk
if step < 1:
step = 1
for n in range(start, stop, step):
nmax = n+nchunk
if nmax > len(unlimdim):
nmax=len(unlimdim)
var[n:nmax] = ncvar[n:nmax]
else:
var[0:len(unlimdim)] = ncvar[:]
else: # no unlim dim or 1-d variable, just copy all data at once.
var[:] = ncvar[:]
ncfile3.sync() # flush data to disk
# close files.
ncfile3.close()
ncfile4.close()
def nc4tonc3():
usage = """
Convert a netCDF 4 file (in NETCDF4_CLASSIC format) to netCDF 3 format.
usage: %s [-h/--help] [-o] [--chunk] netcdf4filename netcdf3filename
-h/--help -- Print usage message.
-o -- Overwrite destination file (default is to raise an error if output file already exists).
--quiet=(0|1) -- if 1, don't print diagnostic information.
--format -- netcdf3 format to use (NETCDF3_64BIT by default, can be set to NETCDF3_CLASSIC)
--chunk=(integer) -- number of records along unlimited dimension to
write at once. Default 10. Ignored if there is no unlimited
dimension. chunk=0 means write all the data at once.
\n""" % os.path.basename(sys.argv[0])
try:
opts, pargs = getopt.getopt(sys.argv[1:], 'ho',
['format=','chunk=','quiet='])
except:
(type, value, traceback) = sys.exc_info()
sys.stdout.write("Error parsing the options. The error was: %s\n" % value)
sys.stderr.write(usage)
sys.exit(0)
# default options
quiet = 0
chunk = 1000
format = 'NETCDF3_64BIT'
overwritefile = 0
# Get the options
for option in opts:
if option[0] == '-h' or option[0] == '--help':
sys.stderr.write(usage)
sys.exit(0)
elif option[0] == '-o':
overwritefile = 1
elif option[0] == '--quiet':
quiet = int(option[1])
elif option[0] == '--format':
format = option[1]
elif option[0] == '--chunk':
chunk = int(option[1])
else:
sys.stdout.write("%s : Unrecognized option\n" % options[0])
sys.stderr.write(usage)
sys.exit(0)
# if we pass a number of files different from 2, abort
if len(pargs) < 2 or len(pargs) > 2:
sys.stdout.write("You need to pass both source and destination!\n.")
sys.stderr.write(usage)
sys.exit(0)
# Catch the files passed as the last arguments
filename4 = pargs[0]
filename3 = pargs[1]
# copy the data from filename4 to filename3.
_nc4tonc3(filename4,filename3,clobber=overwritefile,quiet=quiet,format=format)
def _nc3tonc4(filename3,filename4,unpackshort=True,
zlib=True,complevel=6,shuffle=True,fletcher32=False,
clobber=False,lsd_dict=None,nchunk=10,quiet=False,classic=0,
vars=None,istart=0,istop=-1):
"""convert a netcdf 3 file (filename3) to a netcdf 4 file
The default format is 'NETCDF4', but can be set
to NETCDF4_CLASSIC if classic=1.
If unpackshort=True, variables stored as short
integers with a scale and offset are unpacked to floats.
in the netcdf 4 file. If the lsd_dict is not None, variable names
corresponding to the keys of the dict will be truncated to the decimal place
specified by the values of the dict. This improves compression by
making it 'lossy'..
If vars is not None, only variable names in the list
will be copied (plus all the dimension variables).
The zlib, complevel and shuffle keywords control
how the compression is done."""
from netCDF4 import Dataset
ncfile3 = Dataset(filename3,'r')
if classic:
ncfile4 = Dataset(filename4,'w',clobber=clobber,format='NETCDF4_CLASSIC')
else:
ncfile4 = Dataset(filename4,'w',clobber=clobber,format='NETCDF4')
mval = 1.e30 # missing value if unpackshort=True
# create dimensions. Check for unlimited dim.
unlimdimname = False
unlimdim = None
# create global attributes.
if not quiet: sys.stdout.write('copying global attributes ..\n')
#for attname in ncfile3.ncattrs():
# setattr(ncfile4,attname,getattr(ncfile3,attname))
ncfile4.setncatts(ncfile3.__dict__)
if not quiet: sys.stdout.write('copying dimensions ..\n')
for dimname,dim in ncfile3.dimensions.items():
if dim.isunlimited():
unlimdimname = dimname
unlimdim = dim
ncfile4.createDimension(dimname,None)
if istop == -1: istop=len(unlimdim)
else:
ncfile4.createDimension(dimname,len(dim))
# create variables.
if vars is None:
varnames = ncfile3.variables.keys()
else:
# variables to copy specified
varnames = vars
# add dimension variables
for dimname in ncfile3.dimensions.keys():
if dimname in ncfile3.variables.keys() and\
dimname not in varnames:
varnames.append(dimname)
for varname in varnames:
ncvar = ncfile3.variables[varname]
if not quiet: sys.stdout.write('copying variable %s\n' % varname)
# quantize data?
if lsd_dict is not None and varname in lsd_dict:
lsd = lsd_dict[varname]
if not quiet: sys.stdout.write('truncating to least_significant_digit = %d\n'%lsd)
else:
lsd = None # no quantization.
# unpack short integers to floats?
if unpackshort and hasattr(ncvar,'scale_factor') and hasattr(ncvar,'add_offset'):
dounpackshort = True
datatype = 'f4'
else:
dounpackshort = False
datatype = ncvar.dtype
# is there an unlimited dimension?
if unlimdimname and unlimdimname in ncvar.dimensions:
hasunlimdim = True
else:
hasunlimdim = False
if dounpackshort:
if not quiet: sys.stdout.write('unpacking short integers to floats ...\n')
sys.stdout.write('')
# is there missing value?
if hasattr(ncvar, '_FillValue'):
fillvalue3 = ncvar._FillValue
elif hasattr(ncvar, 'missing_value'):
fillvalue3 = ncvar.missing_value
else:
fillvalue3 = None
if fillvalue3 is not None:
fillvalue4 = fillvalue3 if not dounpackshort else mval
else:
fillvalue4 = None
var = ncfile4.createVariable(varname,datatype,ncvar.dimensions, fill_value=fillvalue4, least_significant_digit=lsd,zlib=zlib,complevel=complevel,shuffle=shuffle,fletcher32=fletcher32)
# fill variable attributes.
attdict = ncvar.__dict__
if '_FillValue' in attdict: del attdict['_FillValue']
if dounpackshort and 'add_offset' in attdict:
del attdict['add_offset']
if dounpackshort and 'scale_factor' in attdict:
del attdict['scale_factor']
if dounpackshort and 'missing_value' in attdict:
attdict['missing_value'] = fillvalue4
var.setncatts(attdict)
# fill variables with data.
if hasunlimdim: # has an unlim dim, loop over unlim dim index.
# range to copy
if nchunk:
start = istart; stop = istop; step = nchunk
if step < 1: step = 1
for n in range(start, stop, step):
nmax = n+nchunk
if nmax > istop: nmax=istop
var[n-istart:nmax-istart] = ncvar[n:nmax]
else:
var[0:len(unlimdim)] = ncvar[:]
else: # no unlim dim or 1-d variable, just copy all data at once.
var[:] = ncvar[:]
ncfile4.sync() # flush data to disk
# close files.
ncfile3.close()
ncfile4.close()
def nc3tonc4():
usage = """
Convert a netCDF 3 file to netCDF 4 format, optionally
unpacking variables packed as short integers (with scale_factor and add_offset)
to floats, and adding zlib compression (with the HDF5 shuffle filter and fletcher32 checksum).
Data may also be quantized (truncated) to a specified precision to improve compression.
usage: %s [-h/--help] [-o] [--vars=var1,var2,..] [--zlib=(0|1)] [--complevel=(1-9)] [--shuffle=(0|1)] [--fletcher32=(0|1)] [--unpackshort=(0|1)] [--quantize=var1=n1,var2=n2,..] netcdf3filename netcdf4filename
-h/--help -- Print usage message.
-o -- Overwrite destination file (default is to raise an error if output file already exists).
--vars -- comma separated list of variable names to copy (default is to copy
all variables)
--classic=(0|1) -- use NETCDF4_CLASSIC format instead of NETCDF4 (default 1)
--zlib=(0|1) -- Activate (or disable) zlib compression (default is activate).
--complevel=(1-9) -- Set zlib compression level (6 is default).
--shuffle=(0|1) -- Activate (or disable) the shuffle filter (active by default).
--fletcher32=(0|1) -- Activate (or disable) the fletcher32 checksum (not
active by default).
--unpackshort=(0|1) -- Unpack short integer variables to float variables
using scale_factor and add_offset netCDF variable attributes (active by default).
--quantize=(comma separated list of "variable name=integer" pairs) --
Truncate the data in the specified variables to a given decimal precision.
For example, 'speed=2, height=-2, temp=0' will cause the variable
'speed' to be truncated to a precision of 0.01, 'height' to a precision of 100
and 'temp' to 1. This can significantly improve compression. The default
is not to quantize any of the variables.
--quiet=(0|1) -- if 1, don't print diagnostic information.
--chunk=(integer) -- number of records along unlimited dimension to
write at once. Default 10. Ignored if there is no unlimited
dimension. chunk=0 means write all the data at once.
--istart=(integer) -- number of record to start at along unlimited dimension.
Default 0. Ignored if there is no unlimited dimension.
--istop=(integer) -- number of record to stop at along unlimited dimension.
Default -1. Ignored if there is no unlimited dimension.
\n""" % os.path.basename(sys.argv[0])
try:
opts, pargs = getopt.getopt(sys.argv[1:], 'ho',
['classic=',
'vars=',
'zlib=',
'quiet=',
'complevel=',
'shuffle=',
'fletcher32=',
'unpackshort=',
'quantize=',
'chunk=',
'istart=',
'istop='])
except:
(type, value, traceback) = sys.exc_info()
sys.stdout.write("Error parsing the options. The error was: %s\n" % value)
sys.stderr.write(usage)
sys.exit(0)
# default options
overwritefile = 0
complevel = 6
classic = 1
zlib = 1
shuffle = 1
fletcher32 = 0
unpackshort = 1
vars = None
quantize = None
quiet = 0
chunk = 1000
istart = 0
istop = -1
# Get the options
for option in opts:
if option[0] == '-h' or option[0] == '--help':
sys.stderr.write(usage)
sys.exit(0)
elif option[0] == '-o':
overwritefile = 1
elif option[0] == '--classic':
classic = int(option[1])
elif option[0] == '--zlib':
zlib = int(option[1])
elif option[0] == '--quiet':
quiet = int(option[1])
elif option[0] == '--complevel':
complevel = int(option[1])
elif option[0] == '--shuffle':
shuffle = int(option[1])
elif option[0] == '--fletcher32':
fletcher32 = int(option[1])
elif option[0] == '--unpackshort':
unpackshort = int(option[1])
elif option[0] == '--chunk':
chunk = int(option[1])
elif option[0] == '--vars':
vars = option[1]
elif option[0] == '--quantize':
quantize = option[1]
elif option[0] == '--istart':
istart = int(option[1])
elif option[0] == '--istop':
istop = int(option[1])
else:
sys.stdout.write("%s: Unrecognized option\n" % option[0])
sys.stderr.write(usage)
sys.exit(0)
# if we pass a number of files different from 2, abort
if len(pargs) < 2 or len(pargs) > 2:
sys.stdout.write("You need to pass both source and destination!.\n")
sys.stderr.write(usage)
sys.exit(0)
# Catch the files passed as the last arguments
filename3 = pargs[0]
filename4 = pargs[1]
# Parse the quantize option, create a dictionary from key/value pairs.
if quantize is not None:
lsd_dict = {}
for p in quantize.split(','):
kv = p.split('=')
lsd_dict[kv[0]] = int(kv[1])
else:
lsd_dict=None
# Parse the vars option, create a list of variable names.
if vars is not None:
vars = vars.split(',')
# copy the data from filename3 to filename4.
_nc3tonc4(filename3,filename4,unpackshort=unpackshort,
zlib=zlib,complevel=complevel,shuffle=shuffle,
fletcher32=fletcher32,clobber=overwritefile,lsd_dict=lsd_dict,
nchunk=chunk,quiet=quiet,vars=vars,classic=classic,
istart=istart,istop=istop)
def broadcasted_shape(shp1, shp2):
# determine shape of array of shp1 and shp2 broadcast against one another.
x = np.array([1])
# trick to define array with certain shape that doesn't allocate all the
# memory.
a = as_strided(x, shape=shp1, strides=[0] * len(shp1))
b = as_strided(x, shape=shp2, strides=[0] * len(shp2))
return np.broadcast(a, b).shape
| 37,383 | 37.699793 | 209 | py |
netcdf4-python | netcdf4-python-master/src/netCDF4/__init__.py | # init for netCDF4. package
# Docstring comes from extension module _netCDF4.
from ._netCDF4 import *
# Need explicit imports for names beginning with underscores
from ._netCDF4 import __doc__
from ._netCDF4 import (__version__, __netcdf4libversion__, __hdf5libversion__,
__has_rename_grp__, __has_nc_inq_path__,
__has_nc_inq_format_extended__, __has_nc_open_mem__,
__has_nc_create_mem__, __has_cdf5_format__,
__has_parallel4_support__, __has_pnetcdf_support__,
__has_quantization_support__, __has_zstandard_support__,
__has_bzip2_support__, __has_blosc_support__, __has_szip_support__,
__has_set_alignment__)
import os
__all__ =\
['Dataset','Variable','Dimension','Group','MFDataset','MFTime','CompoundType','VLType','date2num','num2date','date2index','stringtochar','chartostring','stringtoarr','getlibversion','EnumType','get_chunk_cache','set_chunk_cache','set_alignment','get_alignment']
__pdoc__ = {
'utils': False,
}
# if HDF5_PLUGIN_PATH not set, point to package path if plugins live there
pluginpath = os.path.join(__path__[0],'plugins')
if 'HDF5_PLUGIN_PATH' not in os.environ and\
(os.path.exists(os.path.join(pluginpath,'lib__nczhdf5filters.so')) or\
os.path.exists(os.path.join(pluginpath,'lib__nczhdf5filters.dylib'))):
os.environ['HDF5_PLUGIN_PATH']=pluginpath
| 1,455 | 55 | 261 | py |
netcdf4-python | netcdf4-python-master/test/tst_masked.py | import sys
import unittest
import os
import tempfile
import numpy as np
from numpy import ma
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.random.mtrand import uniform
import netCDF4
from numpy.ma import masked_all
# test automatic conversion of masked arrays, and
# packing/unpacking of short ints.
# create an n1dim by n2dim random ranarr.
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
FILE_NAME2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
ndim = 10
ranarr = 100.*uniform(size=(ndim))
ranarr2 = 100.*uniform(size=(ndim))
# used for checking vector missing_values
arr3 = np.linspace(0,9,ndim)
mask = np.zeros(ndim,np.bool_); mask[-1]=True; mask[-2]=True
marr3 = np.ma.array(arr3, mask=mask, dtype=np.int32)
packeddata = 10.*uniform(size=(ndim))
missing_value = -9999.
missing_value2 = np.nan
missing_value3 = [8,9]
ranarr[::2] = missing_value
ranarr2[::2] = missing_value2
np.seterr(invalid='ignore') # silence warnings from ma.masked_values
maskedarr = ma.masked_values(ranarr,missing_value)
#maskedarr2 = ma.masked_values(ranarr2,missing_value2)
maskedarr2 = ma.masked_invalid(ranarr2)
scale_factor = (packeddata.max()-packeddata.min())/(2.*32766.)
add_offset = 0.5*(packeddata.max()+packeddata.min())
packeddata2 = np.around((packeddata-add_offset)/scale_factor).astype('i2')
class PrimitiveTypesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
self.file2 = FILE_NAME2
file = netCDF4.Dataset(self.file,'w')
file.createDimension('n', ndim)
foo = file.createVariable('maskeddata', 'f8', ('n',))
foo2 = file.createVariable('maskeddata2', 'f8', ('n',))
foo3 = file.createVariable('maskeddata3', 'i4', ('n',))
foo.missing_value = missing_value
foo.set_auto_maskandscale(True)
foo2.missing_value = missing_value2
foo2.set_auto_maskandscale(True)
foo3.missing_value = missing_value3
foo3.set_auto_maskandscale(True)
bar = file.createVariable('packeddata', 'i2', ('n',))
bar.set_auto_maskandscale(True)
bar.scale_factor = scale_factor
bar.add_offset = add_offset
foo[:] = maskedarr
foo2[:] = maskedarr2
foo3[:] = arr3
bar[:] = packeddata
# added to test fix to issue 46
doh = file.createVariable('packeddata2','i2','n')
doh.scale_factor = 0.1
doh.add_offset = 0.
doh[0] = 1.1
# added to test fix to issue 381
doh2 = file.createVariable('packeddata3','i2','n')
doh2.add_offset = 1.
doh2[0] = 1.
# added test for issue 515
file.createDimension('x',1)
v = file.createVariable('v',np.float64,'x',fill_value=-9999)
file.close()
# issue #972: when auto_fill off byte arrays (u1,i1) should
# not be masked, but other datatypes should.
dataset = netCDF4.Dataset(self.file2, "w")
dataset.set_fill_off()
dim = dataset.createDimension("dim", 10)
var1 = dataset.createVariable("var1", "f8", (dim.name,))
var1[:] = masked_all((10,), "f8")
var2 = dataset.createVariable("var2", "u1", (dim.name,))
var2[:] = masked_all((10,), "u1")
dataset.close()
# issue #1152: if missing_value is a string that can't
# be cast to the variable type, issue a warning instead
# of raising an exception when auto-converted slice to a
# masked array
dataset = netCDF4.Dataset('issue1152.nc')
data = dataset['v'][:]
dataset.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
os.remove(self.file2)
def runTest(self):
"""testing auto-conversion of masked arrays and packed integers"""
file = netCDF4.Dataset(self.file)
datamasked = file.variables['maskeddata']
datamasked2 = file.variables['maskeddata2']
datamasked3 = file.variables['maskeddata3']
datapacked = file.variables['packeddata']
datapacked2 = file.variables['packeddata2']
datapacked3 = file.variables['packeddata3']
# check missing_value, scale_factor and add_offset attributes.
assert datamasked.missing_value == missing_value
assert datapacked.scale_factor == scale_factor
assert datapacked.add_offset == add_offset
# no auto-conversion.
datamasked.set_auto_maskandscale(False)
datamasked2.set_auto_maskandscale(False)
datapacked.set_auto_maskandscale(False)
assert_array_equal(datapacked[:],packeddata2)
assert_array_equal(datamasked3[:],marr3)
assert_array_almost_equal(datamasked[:],ranarr)
assert_array_almost_equal(datamasked2[:],ranarr2)
# auto-conversion
datamasked.set_auto_maskandscale(True)
datamasked2.set_auto_maskandscale(True)
datapacked.set_auto_maskandscale(True)
datapacked2.set_auto_maskandscale(False)
assert_array_almost_equal(datamasked[:].filled(),ranarr)
assert_array_almost_equal(datamasked2[:].filled(),ranarr2)
assert_array_almost_equal(datapacked[:],packeddata,decimal=4)
assert(datapacked3[:].dtype == np.float64)
# added to test fix to issue 46 (result before r865 was 10)
assert_array_equal(datapacked2[0],11)
# added test for issue 515
assert(file['v'][0] is np.ma.masked)
file.close()
# issue 766
np.seterr(invalid='raise')
f = netCDF4.Dataset(self.file, 'w')
f.createDimension('dimension', 2)
f.createVariable('variable', np.float32, dimensions=('dimension',))
f['variable'][:] = np.nan
data = f['variable'][:] # should not raise an error
f.close()
# issue #972
dataset = netCDF4.Dataset(self.file2, "r")
var1 = dataset.variables["var1"]
var2 = dataset.variables["var2"]
assert var1[:].mask.all()
assert var2[:].mask.any() == False
dataset.close()
if __name__ == '__main__':
unittest.main()
| 6,127 | 38.792208 | 75 | py |
netcdf4-python | netcdf4-python-master/test/tst_cdf5.py | from netCDF4 import Dataset
import numpy as np
import sys, os, unittest, tempfile
from numpy.testing import assert_array_equal
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
dimsize = np.iinfo(np.int32).max*2 # only allowed in CDF5
ndim = 100
arrdata = np.random.randint(np.iinfo(np.uint8).min,np.iinfo(np.uint8).max,size=ndim)
class test_cdf5(unittest.TestCase):
def setUp(self):
self.netcdf_file = FILE_NAME
nc = Dataset(self.netcdf_file,'w',format='NETCDF3_64BIT_DATA')
# create a 64-bit dimension
d = nc.createDimension('dim',dimsize) # 64-bit dimension
# create an 8-bit unsigned integer variable
v = nc.createVariable('var',np.uint8,'dim')
v[:ndim] = arrdata
# create a 64-bit integer attribute (issue #878)
nc.setncattr('int64_attr', np.int64(-9223372036854775806))
nc.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.netcdf_file)
def runTest(self):
"""testing NETCDF3_64BIT_DATA format (CDF-5)"""
f = Dataset(self.netcdf_file, 'r')
assert f.dimensions['dim'].size == dimsize
assert_array_equal(arrdata, f.variables['var'][:ndim])
assert (type(f.int64_attr) == np.int64)
f.close()
if __name__ == '__main__':
unittest.main()
| 1,347 | 33.564103 | 84 | py |
netcdf4-python | netcdf4-python-master/test/tst_shape.py | from netCDF4 import Dataset
import tempfile, unittest, os
import numpy as np
file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
xdim=None; ydim=121; zdim=169
datashape = (ydim,zdim)
data = np.ones(datashape,dtype=np.float64)
class ShapeTestCase(unittest.TestCase):
def setUp(self):
self.file = file_name
f = Dataset(file_name,'w')
f.createDimension('x',xdim)
f.createDimension('y',ydim)
f.createDimension('z',zdim)
v = f.createVariable('data',np.float64,('x','y','z'))
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""test for issue 90 (array shape should not be modified by
assignment to netCDF variable)"""
f = Dataset(self.file, 'a')
v = f.variables['data']
v[0] = data
# make sure shape of data array
# is not changed by assigning it
# to a netcdf var with one more dimension (issue 90)
assert(data.shape == datashape)
f.close()
if __name__ == '__main__':
unittest.main()
| 1,126 | 27.897436 | 72 | py |
netcdf4-python | netcdf4-python-master/test/tst_dap.py | import unittest
import netCDF4
import numpy as np
from datetime import datetime, timedelta
from numpy.testing import assert_array_almost_equal
# test accessing data over http with opendap.
yesterday = datetime.utcnow() - timedelta(days=1)
URL = f'http://nomads.ncep.noaa.gov/dods/gfs_1p00/gfs{yesterday:%Y%m%d}/gfs_1p00_00z'
URL_https = 'https://www.neracoos.org/erddap/griddap/WW3_EastCoast_latest'
varname = 'hgtsfc'
data_min = -40; data_max = 5900
varshape = (181, 360)
class DapTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def runTest(self):
"""testing access of data over http using opendap"""
ncfile = netCDF4.Dataset(URL)
assert varname in ncfile.variables.keys()
var = ncfile.variables[varname]
data = var[0,...]
assert data.shape == varshape
assert(np.abs(data.min()-data_min) < 10)
assert(np.abs(data.max()-data_max) < 100)
ncfile.close()
# test https support (linked curl lib must built with openssl support)
ncfile = netCDF4.Dataset(URL_https)
assert(ncfile['hs'].long_name=='Significant Wave Height')
ncfile.close()
if __name__ == '__main__':
unittest.main()
| 1,246 | 29.414634 | 85 | py |
netcdf4-python | netcdf4-python-master/test/tst_masked2.py | import sys
import unittest
import os
import tempfile
import numpy as np
from numpy import ma, seterr
from numpy.testing import assert_array_equal, assert_array_almost_equal
from netCDF4 import Dataset, default_fillvals
seterr(over='ignore') # don't print warning for overflow errors
# test automatic conversion of masked arrays, and
# packing/unpacking of short ints.
FILE_NAME1 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
FILE_NAME2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
FILE_NAME3 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
datacheck1 =\
ma.array([0,5000.0,4000.0,0],dtype=np.float64,mask=[True,False,False,True])
datacheck2 =\
ma.array([3000.0,5000.0,4000.0,0],dtype=np.float64,mask=[False,False,False,True])
datacheck3 =\
ma.array([3000.0,5000.0,0,2000.0],dtype=np.float64,mask=[False,False,True,False])
mask = [False,True,False,False]
datacheck4 = ma.array([1.5625,0,3.75,4.125],mask=mask,dtype=np.float32)
fillval = default_fillvals[datacheck4.dtype.str[1:]]
datacheck5 = np.array([1.5625,fillval,3.75,4.125],dtype=np.float32)
class PrimitiveTypesTestCase(unittest.TestCase):
def setUp(self):
self.files = [FILE_NAME1]
f = Dataset(FILE_NAME1,'w')
x = f.createDimension('x',None)
v = f.createVariable('v',np.int16,'x')
v.scale_factor = np.array(1,np.float32)
v.add_offset = np.array(32066,np.float32)
v.missing_value = np.array(-9999,v.dtype)
#v[0] not set, will be equal to _FillValue
v[1]=5000
v[2]=4000
v[3]=v.missing_value
f.close()
self.files.append(FILE_NAME2)
f = Dataset(FILE_NAME1,'r')
# create a new file, copy data, but change missing value and
# scale factor offset.
f2 = Dataset(FILE_NAME2,'w')
a = f2.createDimension('a',None)
b = f2.createVariable('b',np.int16,'a')
b.scale_factor = np.array(10.,np.float32)
b.add_offset = np.array(0,np.float32)
b.missing_value = np.array(9999,v.dtype)
b[:] = f.variables['v'][:]
f.close()
f2.close()
self.files.append(FILE_NAME3)
f = Dataset(FILE_NAME3,'w')
x = f.createDimension('x',None)
# create variable with lossy compression
v = f.createVariable('v',np.float32,'x',zlib=True,least_significant_digit=1)
# assign masked array to that variable with one missing value.
data =\
ma.array([1.5678,99.99,3.75145,4.127654],mask=np.array([False,True,False,False],np.bool_))
data.mask[1]=True
v[:] = data
f.close()
def tearDown(self):
# Remove the temporary files
for f in self.files:
os.remove(f)
def runTest(self):
"""testing auto-conversion of masked arrays and packed integers"""
f = Dataset(self.files[0])
data = f.variables['v'][:]
assert_array_almost_equal(data,datacheck1)
f.close()
f = Dataset(self.files[1])
data = f.variables['b'][:]
assert_array_almost_equal(data,datacheck1)
f.close()
f = Dataset(self.files[0],'a')
# change first element from _FillValue to actual data.
v = f.variables['v']
v[0]=3000
f.close()
f = Dataset(self.files[0],'r')
# read data back in, check.
data = f.variables['v'][:]
assert_array_almost_equal(data,datacheck2)
f.close()
f = Dataset(self.files[0],'a')
# change 3rd element to missing, 4 element to valid data.
v = f.variables['v']
data = v[:]
v[2]=-9999
v[3]=2000
f.close()
f = Dataset(self.files[0],'r')
# read data back in, check.
data = f.variables['v'][:]
assert_array_almost_equal(data,datacheck3)
f.close()
# check that masked arrays are handled correctly when lossy compression
# is used.
f = Dataset(self.files[2],'r')
data = f.variables['v'][:]
assert_array_almost_equal(data,datacheck4)
assert_array_almost_equal(data.filled(),datacheck5)
f.close()
if __name__ == '__main__':
unittest.main()
| 4,211 | 32.967742 | 98 | py |
netcdf4-python | netcdf4-python-master/test/tst_scaled.py | import unittest
import os
import tempfile
import numpy as np
from numpy import ma
from numpy.testing import assert_array_almost_equal
from netCDF4 import Dataset, default_fillvals
# Test automatic scaling of variables (set_auto_scale())
class SetAutoScaleTestBase(unittest.TestCase):
"""Base object for tests checking the functionality of set_auto_scale()"""
def setUp(self):
self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
self.fillval = default_fillvals["i2"]
self.missing_value = -9999
self.v = np.array([0, 5, 4, self.missing_value], dtype = "i2")
self.v_ma = ma.array([0, 5, 4, self.missing_value], dtype = "i2",
mask = [True, False, False, True], fill_value = self.fillval)
self.scale_factor = 10.
self.add_offset = 5.
self.v_scaled = self.v * self.scale_factor + self.add_offset
self.v_ma_scaled = self.v_ma * self.scale_factor + self.add_offset
f = Dataset(self.testfile, 'w')
x = f.createDimension('x', None)
xx = f.createDimension('xx', 10)
v = f.createVariable('v', "i2", 'x')
vv = f.createVariable('vv', "i2", 'xx')
vv.add_offset=0; vv.scale_factor=np.float32(1.0)
v[:] = self.v
vv[:] = np.ones(10)
# Note: Scale factors are only added after writing, so that no auto-scaling takes place!
v.scale_factor = self.scale_factor
v.add_offset = self.add_offset
f.close()
def tearDown(self):
os.remove(self.testfile)
class SetAutoScaleFalse(SetAutoScaleTestBase):
def test_unmasked(self):
"""Testing (not) auto-scaling of variables for set_auto_scale(False)"""
f = Dataset(self.testfile, "r")
f.variables["v"].set_auto_scale(False)
v = f.variables["v"][:]
self.assertEqual(v.dtype, "i2")
self.assertTrue(isinstance(v, np.ndarray))
# issue 785: always return masked array by default
self.assertTrue(isinstance(v, ma.core.MaskedArray))
assert_array_almost_equal(v, self.v)
f.close()
def test_masked(self):
"""Testing auto-conversion of masked arrays for set_auto_mask(False) with masking"""
# Update test data file
f = Dataset(self.testfile, "a")
f.variables["v"].missing_value = self.missing_value
f.close()
# Note: Converting arrays to masked arrays is default if missing_value is present
f = Dataset(self.testfile, "r")
f.variables["v"].set_auto_scale(False)
v_ma = f.variables["v"][:]
self.assertEqual(v_ma.dtype, "i2")
self.assertTrue(isinstance(v_ma, np.ndarray))
self.assertTrue(isinstance(v_ma, ma.core.MaskedArray))
assert_array_almost_equal(v_ma, self.v_ma)
f.close()
class SetAutoScaleTrue(SetAutoScaleTestBase):
def test_unmasked(self):
"""Testing auto-scaling of variables for set_auto_scale(True)"""
f = Dataset(self.testfile)
f.variables["v"].set_auto_scale(True) # The default anyway...
v_scaled = f.variables['v'][:]
# issue 913
vv_scaled = f.variables['vv'][:]
self.assertEqual(vv_scaled.dtype,f.variables['vv'].scale_factor.dtype)
assert_array_almost_equal(vv_scaled, np.ones(10))
self.assertEqual(v_scaled.dtype, "f8")
self.assertTrue(isinstance(v_scaled, np.ndarray))
# issue 785: always return masked array by default
self.assertTrue(isinstance(v_scaled, ma.core.MaskedArray))
assert_array_almost_equal(v_scaled, self.v_scaled)
f.close()
def test_masked(self):
"""Testing auto-scaling of variables for set_auto_scale(True) with masking"""
# Update test data file
f = Dataset(self.testfile, "a")
f.variables["v"].missing_value = self.missing_value
f.close()
# Note: Converting arrays to masked arrays is default if missing_value is present
f = Dataset(self.testfile)
f.variables["v"].set_auto_scale(True) # The default anyway...
v_ma_scaled = f.variables['v'][:]
self.assertEqual(v_ma_scaled.dtype, "f8")
self.assertTrue(isinstance(v_ma_scaled, np.ndarray))
self.assertTrue(isinstance(v_ma_scaled, ma.core.MaskedArray))
assert_array_almost_equal(v_ma_scaled, self.v_ma_scaled)
f.close()
class WriteAutoScaleTest(SetAutoScaleTestBase):
def test_auto_scale_write(self):
"""Testing automatic packing to all kinds of integer types"""
def packparams(dmax, dmin, dtyp):
kind = dtyp[0]
n = int(dtyp[1]) * 8
scale_factor = (dmax - dmin) / (2**n - 1)
if kind == 'i':
add_offset = dmin + 2**(n-1) * scale_factor
elif kind == 'u':
add_offset = dmin
else:
raise Exception
return((add_offset, scale_factor))
for dtyp in ['i1', 'i2', 'i4', 'u1', 'u2', 'u4']:
np.random.seed(456)
data = np.random.uniform(size=100)
f = Dataset(self.testfile, 'w')
f.createDimension('x')
#
# save auto_scaled
v = f.createVariable('v', dtyp, ('x',))
v.set_auto_scale(True) # redundant
v.add_offset, v.scale_factor = packparams(
np.max(data), np.min(data), dtyp)
v[:] = data
f.close()
#
# read back
f = Dataset(self.testfile, 'r')
v = f.variables['v']
v.set_auto_mask(False)
v.set_auto_scale(True) # redundant
vdata = v[:]
# error normalized by scale factor
maxerrnorm = np.max(np.abs((vdata - data) / v.scale_factor))
# 1e-5 accounts for floating point errors
assert(maxerrnorm < 0.5 + 1e-5)
f.close()
class GlobalSetAutoScaleTest(unittest.TestCase):
def setUp(self):
self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
f = Dataset(self.testfile, 'w')
grp1 = f.createGroup('Group1')
grp2 = f.createGroup('Group2')
f.createGroup('Group3') # empty group
f.createVariable('var0', "i2", ())
grp1.createVariable('var1', 'f8', ())
grp2.createVariable('var2', 'f4', ())
f.close()
def tearDown(self):
os.remove(self.testfile)
def runTest(self):
f = Dataset(self.testfile, "r")
# Default is both scaling and masking enabled
v0 = f.variables['var0']
v1 = f.groups['Group1'].variables['var1']
v2 = f.groups['Group2'].variables['var2']
self.assertTrue(v0.scale)
self.assertTrue(v0.mask)
self.assertTrue(v1.scale)
self.assertTrue(v1.mask)
self.assertTrue(v2.scale)
self.assertTrue(v2.mask)
# No auto-scaling
f.set_auto_scale(False)
self.assertFalse(v0.scale)
self.assertTrue(v0.mask)
self.assertFalse(v1.scale)
self.assertTrue(v1.mask)
self.assertFalse(v2.scale)
self.assertTrue(v2.mask)
f.close()
if __name__ == '__main__':
unittest.main()
| 7,326 | 27.960474 | 96 | py |
netcdf4-python | netcdf4-python-master/test/tst_refcount.py | import unittest, netCDF4, tempfile, os
file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
class RefCountTestCase(unittest.TestCase):
def setUp(self):
nc = netCDF4.Dataset(file_name, mode='w', keepweakref=True, format='NETCDF4')
d = nc.createDimension('fred', 2000)
v = nc.createVariable('frank','f',('fred',))
self.file = file_name
self.nc = nc
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing garbage collection (issue 218)"""
# this should trigger garbage collection (__dealloc__ method)
del self.nc
# if __dealloc__ not called to close file, then this
# will fail with "Permission denied" error (since you can't
# open a file 'w' that is already open for writing).
nc = netCDF4.Dataset(self.file, mode='w', format='NETCDF4')
if __name__ == '__main__':
unittest.main()
| 978 | 32.758621 | 85 | py |
netcdf4-python | netcdf4-python-master/test/tst_unicodeatt.py | from netCDF4 import Dataset
import sys, unittest, os, tempfile
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
class UnicodeAttTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
nc = Dataset(self.file,'w')
# write as a utf-8 string
nc.stratt = b'\xe6\xb7\xb1\xe5\x85\xa5 Python'.decode('utf-8')
# write as raw bytes (decoded string is same as above with 'big5' encoding)
nc.stratt2 = b'\xb2`\xa4J Python'
# same as above, but attribute forced to be of type NC_STRING
nc.setncattr_string('stratt3',b'\xb2`\xa4J Python')
nc.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing unicode attributes"""
nc = Dataset(self.file, 'r')
assert(nc.stratt.encode('utf-8') == b'\xe6\xb7\xb1\xe5\x85\xa5 Python')
stratt2 = nc.getncattr('stratt2',encoding='big5') # decodes using big5
stratt3 = nc.getncattr('stratt3',encoding='big5') # same as above
assert(stratt2.encode('big5') == b'\xb2`\xa4J Python')
assert(nc.stratt == stratt2) # decoded strings are the same
assert(nc.stratt == stratt3) # decoded strings are the same
nc.close()
if __name__ == '__main__':
unittest.main()
| 1,344 | 36.361111 | 83 | py |
netcdf4-python | netcdf4-python-master/test/tst_enum.py | import sys
import unittest
import os
import tempfile
from netCDF4 import Dataset
import numpy as np
from numpy.testing import assert_array_equal
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
ENUM_NAME = 'cloud_t'
ENUM_BASETYPE = np.int8
VAR_NAME = 'primary_cloud'
ENUM_DICT = {'Altocumulus': 7, 'Missing': 127, 'Stratus': 2, 'Clear': 0,
'Nimbostratus': 6, 'Cumulus': 4, 'Altostratus': 5, 'Cumulonimbus': 1,
'Stratocumulus': 3}
datain = np.array([ENUM_DICT['Clear'],ENUM_DICT['Stratus'],ENUM_DICT['Cumulus'],\
ENUM_DICT['Missing'],ENUM_DICT['Cumulonimbus']],dtype=ENUM_BASETYPE)
datain_masked = np.ma.masked_values(datain,ENUM_DICT['Missing'])
class EnumTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
cloud_type = f.createEnumType(ENUM_BASETYPE,ENUM_NAME,ENUM_DICT)
# make sure KeyError raised if non-integer basetype used.
try:
cloud_typ2 = f.createEnumType(np.float32,ENUM_NAME,ENUM_DICT)
except KeyError:
pass
f.createDimension('time',None)
cloud_var =\
f.createVariable(VAR_NAME,cloud_type,'time',\
fill_value=ENUM_DICT['Missing'])
cloud_var[:] = datain_masked
# make sure ValueError raised if illegal value assigned to Enum var.
try:
cloud_var[cloud_var.shape[0]] = 99
except ValueError:
pass
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing enum data type"""
f = Dataset(self.file, 'r')
v = f.variables[VAR_NAME]
assert v.datatype.enum_dict == ENUM_DICT
assert list(f.enumtypes.keys()) == [ENUM_NAME]
assert f.enumtypes[ENUM_NAME].name == ENUM_NAME # issue 775
assert f.enumtypes[ENUM_NAME].dtype == ENUM_BASETYPE
assert v._FillValue == ENUM_DICT['Missing']
v.set_auto_mask(False)
data = v[:]
assert_array_equal(data, datain)
v.set_auto_mask(True) # check to see if auto masking works
data = v[:]
assert_array_equal(data, datain_masked)
assert_array_equal(data.mask, datain_masked.mask)
f.close()
class EnumDictTestCase(unittest.TestCase):
# issue 1128
def setUp(self):
DT = np.int16; BITS = 8
self.STORED_VAL = DT(2**BITS)
self.VAL_MAP = {f'bits_{n}': DT(2**n) for n in range(1,BITS+1)}
self.VAL_MAP['invalid'] = 0
self.file = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
with netCDF4.Dataset(file, 'w') as nc:
# The enum is created with dtype=int16, so it will allow BITS values up to 15
et = nc.createEnumType(DT, 'etype', self.VAL_MAP)
ev = nc.createVariable('evar', et)
# Succeeds because the created EnumType does keep the correct dict
ev[...] = self.STORED_VAL
def tearDown(self):
os.remove(self.file)
def runTest(self):
with netCDF4.Dataset(file, 'r') as nc:
read_var = nc['evar']
assert(read_var[...] == self.STORED_VAL)
assert(read_et.enum_dict == self.VAL_MAP)
if __name__ == '__main__':
unittest.main()
| 3,348 | 36.211111 | 89 | py |
netcdf4-python | netcdf4-python-master/test/tst_open_mem.py | import os
import unittest
import netCDF4
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
class TestOpenMem(unittest.TestCase):
def test_mem_open(self):
fpath = os.path.join(CURRENT_DIR, "netcdf_dummy_file.nc")
with open(fpath, 'rb') as f:
nc_bytes = f.read()
if not netCDF4.__has_nc_open_mem__:
with self.assertRaises(ValueError):
netCDF4.Dataset('foo_bar', memory=nc_bytes)
return
# Needs: https://github.com/Unidata/netcdf-c/pull/400
if netCDF4.__netcdf4libversion__ < '4.4.1.2':
with self.assertRaises(OSError):
netCDF4.Dataset('foo_bar', memory=nc_bytes)
return
with netCDF4.Dataset('foo_bar', memory=nc_bytes) as nc:
assert nc.filepath() == 'foo_bar'
assert nc.project_summary == 'Dummy netCDF file'
if __name__ == '__main__':
unittest.main()
| 986 | 29.84375 | 67 | py |
netcdf4-python | netcdf4-python-master/test/tst_compoundvar.py | import sys
import unittest
import os
import tempfile
from netCDF4 import Dataset, CompoundType
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
# test compound data types.
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
DIM_NAME = 'phony_dim'
GROUP_NAME = 'phony_group'
VAR_NAME = 'phony_compound_var'
TYPE_NAME1 = 'cmp1'
TYPE_NAME2 = 'cmp2'
TYPE_NAME3 = 'cmp3'
TYPE_NAME4 = 'cmp4'
TYPE_NAME5 = 'cmp5'
DIM_SIZE=3
# unaligned data types (note they are nested)
dtype1=np.dtype([('i', 'i2'), ('j', 'i8')])
dtype2=np.dtype([('x', 'f4',), ('y', 'f8',(3,2))])
dtype3=np.dtype([('xx', dtype1), ('yy', dtype2)])
dtype4=np.dtype([('xxx',dtype3),('yyy','f8', (4,))])
dtype5=np.dtype([('x1', dtype1), ('y1', dtype2)])
# aligned data types
dtype1a = np.dtype({'names':['i','j'],'formats':['<i2','<i8']},align=True)
dtype2a = np.dtype({'names':['x','y'],'formats':['<f4',('<f8', (3, 2))]},align=True)
dtype3a = np.dtype({'names':['xx','yy'],'formats':[dtype1a,dtype2a]},align=True)
dtype4a = np.dtype({'names':['xxx','yyy'],'formats':[dtype3a,('f8', (4,))]},align=True)
dtype5a = np.dtype({'names':['x1','y1'],'formats':[dtype1a,dtype2a]},align=True)
data = np.zeros(DIM_SIZE,dtype4)
data['xxx']['xx']['i']=1
data['xxx']['xx']['j']=2
data['xxx']['yy']['x']=3
data['xxx']['yy']['y']=4
data['yyy'] = 5
datag = np.zeros(DIM_SIZE,dtype5)
datag['x1']['i']=10
datag['x1']['j']=20
datag['y1']['x']=30
datag['y1']['y']=40
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file, 'w')
d = f.createDimension(DIM_NAME,DIM_SIZE)
g = f.createGroup(GROUP_NAME)
# simple compound types.
cmptype1 = f.createCompoundType(dtype1, TYPE_NAME1)
cmptype2 = f.createCompoundType(dtype2, TYPE_NAME2)
# close and reopen the file to make sure compound
# type info read back in correctly.
f.close()
f = Dataset(self.file,'r+')
g = f.groups[GROUP_NAME]
# multiply nested compound types
cmptype3 = f.createCompoundType(dtype3, TYPE_NAME3)
cmptype4 = f.createCompoundType(dtype4, TYPE_NAME4)
cmptype5 = f.createCompoundType(dtype5, TYPE_NAME5)
v = f.createVariable(VAR_NAME,cmptype4, DIM_NAME)
vv = g.createVariable(VAR_NAME,cmptype5, DIM_NAME)
v[:] = data
vv[:] = datag
# try reading the data back before the file is closed
dataout = v[:]
dataoutg = vv[:]
assert (cmptype4 == dtype4a) # data type should be aligned
assert (dataout.dtype == dtype4a) # data type should be aligned
assert(list(f.cmptypes.keys()) ==\
[TYPE_NAME1,TYPE_NAME2,TYPE_NAME3,TYPE_NAME4,TYPE_NAME5])
assert_array_equal(dataout['xxx']['xx']['i'],data['xxx']['xx']['i'])
assert_array_equal(dataout['xxx']['xx']['j'],data['xxx']['xx']['j'])
assert_array_almost_equal(dataout['xxx']['yy']['x'],data['xxx']['yy']['x'])
assert_array_almost_equal(dataout['xxx']['yy']['y'],data['xxx']['yy']['y'])
assert_array_almost_equal(dataout['yyy'],data['yyy'])
assert_array_equal(dataoutg['x1']['i'],datag['x1']['i'])
assert_array_equal(dataoutg['x1']['j'],datag['x1']['j'])
assert_array_almost_equal(dataoutg['y1']['x'],datag['y1']['x'])
assert_array_almost_equal(dataoutg['y1']['y'],datag['y1']['y'])
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
#pass
def runTest(self):
"""testing compound variables"""
f = Dataset(self.file, 'r')
v = f.variables[VAR_NAME]
g = f.groups[GROUP_NAME]
vv = g.variables[VAR_NAME]
dataout = v[:]
dataoutg = vv[:]
# make sure data type is aligned
assert (f.cmptypes['cmp4'] == dtype4a)
assert(list(f.cmptypes.keys()) ==\
[TYPE_NAME1,TYPE_NAME2,TYPE_NAME3,TYPE_NAME4,TYPE_NAME5])
assert_array_equal(dataout['xxx']['xx']['i'],data['xxx']['xx']['i'])
assert_array_equal(dataout['xxx']['xx']['j'],data['xxx']['xx']['j'])
assert_array_almost_equal(dataout['xxx']['yy']['x'],data['xxx']['yy']['x'])
assert_array_almost_equal(dataout['xxx']['yy']['y'],data['xxx']['yy']['y'])
assert_array_almost_equal(dataout['yyy'],data['yyy'])
assert_array_equal(dataoutg['x1']['i'],datag['x1']['i'])
assert_array_equal(dataoutg['x1']['j'],datag['x1']['j'])
assert_array_almost_equal(dataoutg['y1']['x'],datag['y1']['x'])
assert_array_almost_equal(dataoutg['y1']['y'],datag['y1']['y'])
f.close()
# issue 773
f = Dataset(self.file,'w')
dtype = np.dtype([('observation', 'i4'),
('station_name','S80')])
dtype_nest = np.dtype([('observation', 'i4'),
('station_name','S80'),
('nested_observation',dtype)])
station_data_t1 = f.createCompoundType(dtype,'station_data1')
station_data_t2 = f.createCompoundType(dtype_nest,'station_data')
f.createDimension('station',None)
statdat = f.createVariable('station_obs', station_data_t2, ('station',))
assert(statdat.dtype == station_data_t2.dtype)
datain = np.empty(2,station_data_t2.dtype_view)
datain['observation'][:] = (123,314)
datain['station_name'][:] = ('Boulder','New York')
datain['nested_observation']['observation'][:] = (-999,999)
datain['nested_observation']['station_name'][:] = ('Boston','Chicago')
statdat[:] = datain
f.close()
f = Dataset(self.file)
dataout = f['station_obs'][:]
assert(dataout.dtype == station_data_t2.dtype_view)
assert_array_equal(datain, dataout)
f.close()
if __name__ == '__main__':
from netCDF4 import getlibversion
version = getlibversion().split()[0]
unittest.main()
| 6,011 | 41.041958 | 87 | py |
netcdf4-python | netcdf4-python-master/test/tst_multifile.py | from netCDF4 import Dataset, MFDataset, MFTime
import numpy as np
from numpy.random import seed, randint
from numpy.testing import assert_array_equal, assert_equal
from numpy import ma
import tempfile, unittest, os, datetime
import cftime
from pkg_resources import parse_version
nx=100; ydim=5; zdim=10
nfiles = 10
ninc = nx/nfiles
files = [tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name for nfile in range(nfiles)]
data = randint(0,10,size=(nx,ydim,zdim))
missval = 99
data[::10] = missval
data = ma.masked_values(data,missval)
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.files = files
for nfile,file in enumerate(self.files):
f = Dataset(file,'w',format='NETCDF4_CLASSIC')
f.createDimension('x',None)
f.createDimension('y',ydim)
f.createDimension('z',zdim)
f.history = 'created today'
x = f.createVariable('x','i',('x',))
x.units = 'zlotys'
dat = f.createVariable('data','i',('x','y','z',))
dat.long_name = 'phony data'
dat.missing_value = missval
nx1 = int(nfile*ninc); nx2 = int(ninc*(nfile+1))
#x[0:ninc] = np.arange(nfile*ninc,ninc*(nfile+1))
x[:] = np.arange(nfile*ninc,ninc*(nfile+1))
#dat[0:ninc] = data[nx1:nx2]
dat[:] = data[nx1:nx2]
f.close()
def tearDown(self):
# Remove the temporary files
for file in self.files:
os.remove(file)
def runTest(self):
"""testing multi-file dataset access"""
f = MFDataset(self.files,check=True)
f.set_auto_maskandscale(True) # issue570
f.set_always_mask(False)
assert f.history == 'created today'
assert_array_equal(np.arange(0,nx),f.variables['x'][:])
varin = f.variables['data']
datin = varin[:]
assert_array_equal(datin.mask,data.mask)
varin.set_auto_maskandscale(False)
data2 = data.filled()
assert varin.long_name == 'phony data'
assert len(varin) == nx
assert varin.shape == (nx,ydim,zdim)
assert varin.dimensions == ('x','y','z')
assert_array_equal(varin[4:-4:4,3:5,2:8],data2[4:-4:4,3:5,2:8])
assert varin[0,0,0] == data2[0,0,0]
assert_array_equal(varin[:],data2)
assert getattr(varin,'nonexistantatt',None) == None
f.close()
# test master_file kwarg (issue #835).
f = MFDataset(self.files,master_file=self.files[-1],check=True)
assert_array_equal(np.arange(0,nx),f.variables['x'][:])
varin = f.variables['data']
assert_array_equal(varin[4:-4:4,3:5,2:8],data2[4:-4:4,3:5,2:8])
f.close()
# testing multi-file get_variables_by_attributes
f = MFDataset(self.files,check=True)
assert f.get_variables_by_attributes(axis='T') == []
f.get_variables_by_attributes(units='zlotys')[0] == f['x']
assert f.isopen()
f.close()
assert not f.isopen()
class NonuniformTimeTestCase(unittest.TestCase):
ninc = 365
def setUp(self):
self.files = [tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name for nfile in range(2)]
for nfile,file in enumerate(self.files):
f = Dataset(file,'w',format='NETCDF4_CLASSIC')
f.createDimension('time',None)
f.createDimension('y',ydim)
f.createDimension('z',zdim)
f.history = 'created today'
time = f.createVariable('time', 'f', ('time', ))
#time.units = 'days since {0}-01-01'.format(1979+nfile)
yr = 1979+nfile
time.units = 'days since %s-01-01' % yr
# Do not set the calendar attribute on the created files to test calendar
# overload.
# time.calendar = 'standard'
x = f.createVariable('x','f',('time', 'y', 'z'))
x.units = 'potatoes per square mile'
nx1 = self.ninc*nfile;
nx2 = self.ninc*(nfile+1)
time[:] = np.arange(self.ninc)
x[:] = np.arange(nx1, nx2).reshape(self.ninc,1,1) * np.ones((1, ydim, zdim))
f.close()
def tearDown(self):
# Remove the temporary files
for file in self.files:
os.remove(file)
def runTest(self):
# The test files have no calendar attribute on the time variable.
calendar = 'standard'
# Get the real dates
dates = []
for file in self.files:
f = Dataset(file)
t = f.variables['time']
dates.extend(cftime.num2date(t[:], t.units, calendar))
f.close()
# Compare with the MF dates
f = MFDataset(self.files,check=True)
t = f.variables['time']
T = MFTime(t, calendar=calendar)
assert_equal(T.calendar, calendar)
assert_equal(len(T), len(t))
assert_equal(T.shape, t.shape)
assert_equal(T.dimensions, t.dimensions)
assert_equal(T.typecode(), t.typecode())
# skip this until cftime pull request #55 is in a released
# version (1.0.1?). Otherwise, fix for issue #808 breaks this
if parse_version(cftime.__version__) >= parse_version('1.0.1'):
assert_array_equal(cftime.num2date(T[:], T.units, T.calendar), dates)
assert_equal(cftime.date2index(datetime.datetime(1980, 1, 2), T), 366)
f.close()
# Test exception is raised when no calendar attribute is available on the
# time variable.
with MFDataset(self.files, check=True) as ds:
with self.assertRaises(ValueError):
MFTime(ds.variables['time'])
# Test exception is raised when the calendar attribute is different on the
# variables. First, add calendar attributes to file. Note this will modify
# the files inplace.
calendars = ['standard', 'gregorian']
for idx, f in enumerate(self.files):
with Dataset(f, 'a') as ds:
ds.variables['time'].calendar = calendars[idx]
with MFDataset(self.files, check=True) as ds:
with self.assertRaises(ValueError):
MFTime(ds.variables['time'])
if __name__ == '__main__':
unittest.main()
| 6,295 | 36.927711 | 105 | py |
netcdf4-python | netcdf4-python-master/test/tst_create_mem.py | import unittest
import netCDF4
import numpy as np
from numpy.testing import assert_array_equal
class TestCreateMem(unittest.TestCase):
def test_mem_create(self):
def check_inmemory(format):
# memory is 'advisory size' - not needed for NETCDF4/HDF5
# but is used for NETCDF3.
nc = netCDF4.Dataset('test.nc','w',memory=1028,format=format)
d = nc.createDimension('x',None)
v = nc.createVariable('v',np.int32,'x')
data = np.arange(5)
v[0:5] = data
# retrieve memory buffer
b = nc.close()
# open a new file using this memory buffer
nc2 = netCDF4.Dataset('test2.nc','r',memory=b)
assert_array_equal(nc2['v'][:],data)
nc2.close()
check_inmemory('NETCDF3_CLASSIC')
check_inmemory('NETCDF4_CLASSIC')
if __name__ == '__main__':
unittest.main()
| 924 | 33.259259 | 73 | py |
netcdf4-python | netcdf4-python-master/test/tst_scalarvar.py | import sys
import unittest
import os
import tempfile
from numpy.testing import assert_almost_equal
import netCDF4
import math
VAR_NAME='temp'
VAR_TYPE='f4'
VAR_VAL=math.pi
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
GROUP_NAME = 'subgroup'
# test scalar variable creation and retrieval.
class ScalarVariableTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
rootgrp = netCDF4.Dataset(self.file, 'w')
# scalar variable.
temp = rootgrp.createVariable(VAR_NAME,VAR_TYPE)
#temp[:] = VAR_VAL
temp.assignValue(VAR_VAL)
subgroup = rootgrp.createGroup(GROUP_NAME)
tempg = subgroup.createVariable(VAR_NAME,VAR_TYPE)
tempg[:] = VAR_VAL
#tempg.assignValue(VAR_VAL)
rootgrp.close()
def tearDown(self):
# Remove the temporary file
os.remove(self.file)
def runTest(self):
"""testing scalar variables"""
# check dimensions in root group.
f = netCDF4.Dataset(self.file, 'r+')
v = f.variables[VAR_NAME]
# dimensions and shape should be empty tuples
self.assertTrue(v.dimensions == ())
self.assertTrue(v.shape == ())
# check result of getValue and slice
assert_almost_equal(v.getValue(), VAR_VAL, decimal=6)
assert_almost_equal(v[:], VAR_VAL, decimal=6)
g = f.groups[GROUP_NAME]
vg = g.variables[VAR_NAME]
# dimensions and shape should be empty tuples
self.assertTrue(vg.dimensions == ())
self.assertTrue(vg.shape == ())
# check result of getValue and slice
assert_almost_equal(vg.getValue(), VAR_VAL, decimal=6)
assert_almost_equal(vg[:], VAR_VAL, decimal=6)
f.close()
if __name__ == '__main__':
unittest.main()
| 1,821 | 29.881356 | 72 | py |
netcdf4-python | netcdf4-python-master/test/tst_vars.py | import sys
import unittest
import os
import tempfile
import numpy as np
from numpy.random.mtrand import uniform
from numpy.testing import assert_array_equal, assert_array_almost_equal
import netCDF4
# test variable creation.
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
VAR_DOUBLE_NAME="dummy_var"
VAR_SHORT_NAME='dummy_var_short'
VARNAMES = sorted([VAR_DOUBLE_NAME,VAR_SHORT_NAME])
GROUP_NAME = "dummy_group"
DIM1_NAME="x"
DIM1_LEN=2
DIM2_NAME="y"
DIM2_LEN=3
DIM3_NAME="z"
DIM3_LEN=25
randomdata = uniform(size=(DIM1_LEN,DIM2_LEN,DIM3_LEN))
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = netCDF4.Dataset(self.file, 'w')
f.createDimension(DIM1_NAME, DIM1_LEN)
f.createDimension(DIM2_NAME, DIM2_LEN)
f.createDimension(DIM3_NAME, DIM3_LEN)
v1 = f.createVariable(VAR_DOUBLE_NAME, 'f8',(DIM1_NAME,DIM2_NAME,DIM3_NAME))
v2 = f.createVariable(VAR_SHORT_NAME, 'i2',(DIM2_NAME,DIM3_NAME))
v1.long_name = 'dummy data root'
g = f.createGroup(GROUP_NAME)
g.createDimension(DIM1_NAME, DIM1_LEN)
g.createDimension(DIM2_NAME, DIM2_LEN)
g.createDimension(DIM3_NAME, DIM3_LEN)
v1g = g.createVariable(VAR_DOUBLE_NAME, 'f8',(DIM1_NAME,DIM2_NAME,DIM3_NAME))
v2g = g.createVariable(VAR_SHORT_NAME, 'i2',(DIM2_NAME,DIM3_NAME))
v1g.long_name = 'dummy data subgroup'
v1[:] = randomdata
v1g[:] = randomdata
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing primitive variables"""
f = netCDF4.Dataset(self.file, 'r')
# check variables in root group.
varnames = sorted(f.variables.keys())
v1 = f.variables[VAR_DOUBLE_NAME]
v2 = f.variables[VAR_SHORT_NAME]
assert varnames == VARNAMES
assert v1.dtype.str[1:] == 'f8'
assert v2.dtype.str[1:] == 'i2'
assert v1.long_name == 'dummy data root'
assert v1.dimensions == (DIM1_NAME,DIM2_NAME,DIM3_NAME)
assert v2.dimensions == (DIM2_NAME,DIM3_NAME)
assert v1.shape == (DIM1_LEN,DIM2_LEN,DIM3_LEN)
assert v2.shape == (DIM2_LEN,DIM3_LEN)
assert v1.size == DIM1_LEN * DIM2_LEN * DIM3_LEN
assert len(v1) == DIM1_LEN
#assert np.allclose(v1[:],randomdata)
assert_array_almost_equal(v1[:],randomdata)
# check variables in sub group.
g = f.groups[GROUP_NAME]
varnames = sorted(g.variables.keys())
v1 = g.variables[VAR_DOUBLE_NAME]
# test iterating over variable (should stop when
# it gets to the end and raises IndexError, issue 121)
for v in v1:
pass
v2 = g.variables[VAR_SHORT_NAME]
assert varnames == VARNAMES
assert v1.dtype.str[1:] == 'f8'
assert v2.dtype.str[1:] == 'i2'
assert v1.long_name == 'dummy data subgroup'
assert v1.dimensions == (DIM1_NAME,DIM2_NAME,DIM3_NAME)
assert v2.dimensions == (DIM2_NAME,DIM3_NAME)
assert v1.shape == (DIM1_LEN,DIM2_LEN,DIM3_LEN)
assert v2.shape == (DIM2_LEN,DIM3_LEN)
#assert np.allclose(v1[:],randomdata)
assert_array_almost_equal(v1[:],randomdata)
f.close()
if __name__ == '__main__':
unittest.main()
| 3,378 | 34.568421 | 85 | py |
netcdf4-python | netcdf4-python-master/test/tst_compoundatt.py | import sys
import unittest
import os
import tempfile
from netCDF4 import Dataset, CompoundType
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
# test compound attributes.
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
DIM_NAME = 'time'
VAR_NAME = 'wind'
VAR_NAME2 = 'forecast_wind'
GROUP_NAME = 'forecasts'
dtype=np.dtype([('speed', 'f4'), ('direction', 'f4')])
TYPE_NAME = 'wind_vector_type'
TYPE_NAMEC = 'wind_vectorunits_type'
dtypec=np.dtype([('speed', 'S8'), ('direction', 'S8')])
missvals = np.empty(1,dtype)
missvals['direction']=1.e20
missvals['speed']=-999.
windunits = np.empty(1,dtypec)
windunits['speed'] = 'm/s'
windunits['direction'] = 'degrees'
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file, 'w')
d = f.createDimension(DIM_NAME,None)
g = f.createGroup(GROUP_NAME)
wind_vector_type = f.createCompoundType(dtype, TYPE_NAME)
wind_vectorunits_type = f.createCompoundType(dtypec, TYPE_NAMEC)
v = f.createVariable(VAR_NAME,wind_vector_type, DIM_NAME)
vv = g.createVariable(VAR_NAME2,wind_vector_type,DIM_NAME)
v.missing_values = missvals
v.units = windunits
vv.missing_values = missvals
vv.units = windunits
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing compound attributes"""
f = Dataset(self.file, 'r')
v = f.variables[VAR_NAME]
g = f.groups[GROUP_NAME]
vv = g.variables[VAR_NAME2]
assert_array_almost_equal(v.missing_values['speed'], missvals['speed'])
assert_array_almost_equal(v.missing_values['direction'],\
missvals['direction'])
assert_array_almost_equal(vv.missing_values['speed'], missvals['speed'])
assert_array_almost_equal(vv.missing_values['direction'],\
missvals['direction'])
assert_array_equal(v.units['speed'], windunits['speed'].squeeze())
assert_array_equal(v.units['direction'],\
windunits['direction'].squeeze())
assert_array_equal(vv.units['speed'], windunits['speed'].squeeze())
assert_array_equal(vv.units['direction'],\
windunits['direction'].squeeze())
assert(v.units['speed'] == b'm/s')
assert(v.units['direction'] == b'degrees')
assert(vv.units['speed'] == b'm/s')
assert(vv.units['direction'] == b'degrees')
f.close()
if __name__ == '__main__':
unittest.main()
| 2,644 | 34.743243 | 80 | py |
netcdf4-python | netcdf4-python-master/test/tst_diskless.py | import unittest, os, tempfile
import numpy as np
from numpy.random.mtrand import uniform
from numpy.testing import assert_array_equal, assert_array_almost_equal
import netCDF4
# rudimentary test of diskless file capability.
# create an n1dim by n2dim by n3dim random array
n1dim = 10
n2dim = 73
n3dim = 144
ranarr = 100.*uniform(size=(n1dim,n2dim,n3dim))
ranarr2 = 100.*uniform(size=(n1dim,n2dim,n3dim))
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=True).name
FILE_NAME2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
class DisklessTestCase(unittest.TestCase):
def setUp(self):
# in memory file, does not exist on disk (closing it
# makes data disappear from memory)
self.file = FILE_NAME
f = netCDF4.Dataset(self.file,'w',diskless=True, persist=False)
self.f = f
# foo has a single unlimited dimension
f.createDimension('n1', n1dim)
f.createDimension('n2', n2dim)
f.createDimension('n3', n3dim)
foo = f.createVariable('data1', ranarr.dtype.str[1:], ('n1','n2','n3'))
# write some data to it.
foo[0:n1dim-1] = ranarr[:-1,:,:]
foo[n1dim-1] = ranarr[-1,:,:]
# bar has 2 unlimited dimensions
f.createDimension('n4', None)
# write some data to it.
bar = f.createVariable('data2', ranarr.dtype.str[1:], ('n1','n2','n4'))
bar[0:n1dim,:, 0:n3dim] = ranarr2
# in memory file, that is persisted to disk when close method called.
self.file2 = FILE_NAME2
f2 = netCDF4.Dataset(self.file2,'w',diskless=True, persist=True)
f2.createDimension('n1', n1dim)
f2.createDimension('n2', n2dim)
f2.createDimension('n3', n3dim)
foo = f2.createVariable('data1', ranarr.dtype.str[1:], ('n1','n2','n3'))
# write some data to it.
foo[0:n1dim-1] = ranarr[:-1,:,:]
foo[n1dim-1] = ranarr[-1,:,:]
f2.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file2)
self.f.close()
def runTest(self):
"""testing diskless file capability"""
foo = self.f.variables['data1']
bar = self.f.variables['data2']
# check shape.
self.assertTrue(foo.shape == (n1dim,n2dim,n3dim))
self.assertTrue(bar.shape == (n1dim,n2dim,n3dim))
# check data.
assert_array_almost_equal(foo[:], ranarr)
assert_array_almost_equal(bar[:], ranarr2)
# file does not actually exist on disk
assert(os.path.isfile(self.file)==False)
# open persisted file.
# first, check that file does actually exist on disk
assert(os.path.isfile(self.file2)==True)
f = netCDF4.Dataset(self.file2)
foo = f.variables['data1']
# check shape.
self.assertTrue(foo.shape == (n1dim,n2dim,n3dim))
# check data.
assert_array_almost_equal(foo[:], ranarr)
f.close()
if __name__ == '__main__':
unittest.main()
| 3,009 | 35.707317 | 80 | py |
netcdf4-python | netcdf4-python-master/test/tst_compound_alignment.py | """ This illustrates a bug when a structured array is extracted from a netCDF4.Variable using the slicing operation.
Bug is observed with EPD 7.3-1 and 7.3-2 (64-bit)
"""
import netCDF4, numpy, tempfile, sys, os, unittest
from numpy.testing import assert_array_equal, assert_array_almost_equal
def string_to_bytes(xstring, size=-1, pad="\0"):
nbytes = len(xstring)
if (size >= 0):
xsize = size
else:
xsize = nbytes
xbytes = numpy.empty(xsize, dtype=numpy.uint8)
xbytes[:] = ord(pad)
if (nbytes > xsize):
nbytes = xsize
for i in range(nbytes):
xbytes[i] = ord(xstring[i])
return xbytes
cells = numpy.array([ (387, 289, 65.64321899414062, -167.90093994140625, 3555, -10158, 8934, -16608, 19, 34199, 2, 0, 218, 619, 534, 314, 234, 65528, 39, 1524, 2429, 3137, 2795, 3092, 6431, 12949, 6780, 18099, 8248, 9331, 972, 553, 721, 2874, 2488, 3087, 3072, 2537, 3295, 334, 334, 9888, 10552, 7175, 6981, 7250, 8133, 14349, 16565, 17097, 20945, 13, 5, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 5, 3, 7, 0, 0, 10, 11, 15, 7, 14, 4, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 12210, 16433, 45, 241, 243, 71, 131, [87, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(387, 290, 65.64067077636719, -167.93258666992188, 3546, -10161, 8934, -16611, 13, 34165, 1, 0, 215, 582, 534, 317, 204, 65528, 34, 1533, 2428, 3161, 2803, 3107, 6336, 12721, 6670, 17775, 7973, 8770, 933, 554, 714, 2904, 2480, 3102, 3087, 2560, 3323, 359, 359, 9934, 10585, 7235, 7007, 7315, 8209, 14421, 16538, 17046, 20924, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 5, 3, 7, 0, 0, 11, 11, 15, 6, 15, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 12235, 16433, 45, 241, 243, 71, 131, [-43, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(388, 287, 65.65902709960938, -167.84213256835938, 3574, -10167, 8936, -16602, 15, 34269, 1, 0, 213, 626, 521, 313, 230, 64, 35, 1519, 2391, 3091, 2719, 3011, 6313, 12685, 6657, 17785, 8169, 9420, 960, 541, 705, 2881, 2488, 3084, 3065, 2500, 3328, 357, 357, 10023, 10578, 7250, 6986, 7285, 8149, 14469, 16671, 17188, 20849, 13, 4, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 5, 3, 7, 0, 0, 11, 12, 15, 6, 15, 4, 4, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 12241, 16432, 25, 241, 243, 71, 131, [-41, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(388, 288, 65.65646362304688, -167.8740692138672, 3565, -10171, 8936, -16605, 17, 34234, 1, 0, 214, 618, 523, 310, 226, 70, 36, 1528, 2408, 3107, 2751, 3026, 6320, 12708, 6673, 17824, 8138, 9309, 960, 541, 712, 2881, 2496, 3084, 3079, 2477, 3259, 349, 349, 10023, 10528, 7281, 7011, 7285, 8149, 14416, 16503, 17057, 20928, 13, 5, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 5, 3, 7, 0, 0, 11, 12, 15, 6, 13, 4, 4, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 12239, 16433, 45, 241, 243, 71, 131, [-43, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(388, 289, 65.65390014648438, -167.9058380126953, 3555, -10174, 8935, -16608, 15, 34200, 2, 0, 212, 582, 526, 307, 208, 60, 40, 1519, 2408, 3107, 2751, 3042, 6226, 12504, 6548, 17477, 7880, 8732, 929, 541, 689, 2911, 2496, 3129, 3094, 2500, 3300, 342, 342, 10001, 10595, 7413, 7086, 7396, 8292, 14486, 16601, 16949, 21066, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 5, 3, 7, 0, 0, 11, 12, 15, 5, 13, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 12272, 16433, 45, 241, 243, 71, 131, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(388, 290, 65.6513442993164, -167.9374542236328, 3546, -10177, 8935, -16611, 6, 34166, 2, 0, 213, 568, 531, 315, 198, 64, 34, 1537, 2424, 3147, 2782, 3081, 6242, 12534, 6571, 17524, 7833, 8550, 921, 541, 689, 2926, 2496, 3144, 3102, 2546, 3341, 358, 358, 10045, 10629, 7421, 7078, 7448, 8326, 14485, 16572, 16984, 21085, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 5, 3, 7, 0, 0, 11, 12, 15, 5, 13, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 12307, 16433, 45, 241, 243, 71, 131, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(388, 291, 65.6487808227539, -167.96910095214844, 3536, -10180, 8934, -16614, 5, 34131, 1, 0, 218, 586, 538, 321, 211, 74, 40, 1546, 2424, 3171, 2806, 3113, 6368, 12821, 6704, 17895, 8029, 8835, 937, 549, 705, 2926, 2496, 3152, 3117, 2476, 3286, 350, 350, 9978, 10612, 7468, 7128, 7474, 8360, 14547, 16572, 17019, 20766, 13, 5, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 5, 3, 7, 0, 0, 11, 12, 15, 5, 13, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(389, 287, 65.66973876953125, -167.84698486328125, 3574, -10183, 8937, -16603, 8, 34270, 2, 0, 211, 598, 526, 304, 206, 65528, 35, 1516, 2378, 3069, 2697, 2984, 6168, 12394, 6515, 17382, 7931, 9011, 935, 530, 694, 2923, 2495, 3147, 3106, 2530, 3413, 334, 334, 9999, 10723, 7479, 7160, 7494, 8378, 14631, 16670, 17111, 21141, 12, 6, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 6, 3, 7, 0, 0, 11, 13, 15, 6, 11, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 3, 0, 6, 0, 12325, 16433, 45, 241, 243, 71, 131, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(389, 288, 65.66716003417969, -167.87890625, 3565, -10186, 8937, -16606, 9, 34235, 2, 0, 212, 602, 528, 309, 218, 65528, 38, 1525, 2387, 3101, 2736, 3016, 6240, 12542, 6585, 17587, 7994, 9050, 943, 530, 701, 2938, 2503, 3170, 3128, 2552, 3371, 333, 333, 9930, 10706, 7533, 7176, 7546, 8412, 14595, 16697, 17010, 20876, 12, 6, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 6, 3, 7, 0, 0, 11, 13, 15, 6, 10, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 3, 0, 6, 0, 12360, 16433, 45, 241, 243, 71, 131, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(389, 289, 65.66458892822266, -167.91065979003906, 3555, -10190, 8936, -16609, 5, 34201, 2, 0, 212, 561, 527, 311, 202, 65528, 34, 1524, 2412, 3117, 2744, 3032, 6137, 12342, 6461, 17241, 7721, 8408, 897, 530, 678, 2967, 2495, 3185, 3158, 2552, 3344, 335, 335, 9953, 10757, 7586, 7219, 7598, 8474, 14622, 16711, 17085, 20855, 12, 7, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 6, 3, 7, 0, 0, 12, 13, 15, 6, 11, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 3, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(389, 290, 65.6620101928711, -167.94241333007812, 3546, -10193, 8936, -16611, 5, 34166, 2, 0, 213, 558, 533, 315, 190, 65528, 35, 1533, 2420, 3141, 2767, 3071, 6168, 12424, 6500, 17312, 7721, 8360, 905, 530, 678, 2952, 2495, 3177, 3128, 2507, 3371, 334, 334, 9975, 10689, 7517, 7176, 7546, 8426, 14577, 16559, 17109, 21037, 12, 7, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 6, 3, 7, 0, 0, 12, 13, 15, 6, 11, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 3, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(389, 291, 65.65943145751953, -167.97409057617188, 3536, -10196, 8935, -16614, 5, 34132, 0, 0, 217, 578, 536, 324, 206, 65528, 36, 1542, 2420, 3165, 2799, 3095, 6303, 12683, 6640, 17713, 7924, 8654, 920, 546, 694, 2938, 2495, 3170, 3143, 2530, 3358, 327, 327, 9952, 10672, 7517, 7184, 7539, 8419, 14550, 16627, 17046, 20934, 12, 7, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 6, 3, 7, 0, 0, 11, 12, 15, 6, 11, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 3, 0, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(389, 292, 65.65685272216797, -168.0056915283203, 3527, -10199, 8934, -16617, 5, 34097, 1, 0, 226, 625, 545, 329, 232, 65528, 56, 1542, 2428, 3189, 2845, 3165, 6580, 13244, 6943, 18555, 8375, 9328, 973, 569, 732, 2952, 2503, 3155, 3106, 2507, 3289, 341, 341, 9861, 10552, 7494, 7176, 7513, 8405, 14460, 16489, 16983, 20873, 11, 5, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 6, 3, 7, 0, 0, 10, 11, 15, 6, 10, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 3, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [81, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(390, 287, 65.66167449951172, -167.85145568847656, 3573, -10045, 8937, -16603, 14, 34267, 1, 0, 213, 624, 529, 315, 216, 68, 44, 1533, 2414, 3105, 2719, 3022, 6294, 12637, 6630, 17704, 8134, 9315, 969, 542, 712, 2888, 2500, 3097, 3042, 2456, 3268, 334, 334, 10122, 10624, 7274, 7110, 7307, 8181, 14498, 16617, 17137, 21090, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 13, 15, 5, 9, 3, 3, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 12243, 16433, 45, 241, 243, 71, 131, [-41, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(390, 288, 65.65919494628906, -167.88340759277344, 3564, -10048, 8936, -16606, 15, 34233, 2, 0, 215, 610, 526, 316, 213, 72, 38, 1533, 2414, 3105, 2735, 3038, 6278, 12621, 6607, 17641, 8055, 9125, 954, 542, 704, 2910, 2506, 3113, 3071, 2501, 3254, 342, 342, 10032, 10624, 7358, 7181, 7353, 8243, 14453, 16522, 17075, 20905, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 13, 15, 5, 8, 3, 3, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 12255, 16433, 45, 241, 243, 71, 131, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(390, 289, 65.65672302246094, -167.91519165039062, 3554, -10050, 8935, -16608, 10, 34198, 2, 0, 211, 570, 533, 318, 196, 64, 34, 1524, 2414, 3128, 2751, 3038, 6177, 12399, 6491, 17304, 7774, 8523, 914, 542, 688, 2940, 2500, 3143, 3086, 2523, 3310, 335, 335, 10054, 10691, 7456, 7199, 7470, 8347, 14560, 16656, 17000, 20986, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 9, 3, 3, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 12318, 16433, 45, 241, 243, 71, 131, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(390, 290, 65.65425109863281, -167.9468994140625, 3545, -10053, 8935, -16611, 5, 34164, 2, 0, 213, 572, 538, 319, 196, 64, 41, 1533, 2438, 3160, 2789, 3077, 6231, 12534, 6561, 17477, 7829, 8538, 922, 542, 696, 2933, 2500, 3151, 3086, 2456, 3324, 343, 343, 10054, 10674, 7441, 7199, 7470, 8340, 14533, 16562, 16987, 20985, 13, 7, 6, 15, 15, 15, 15, 0, 9, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 9, 3, 3, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(390, 291, 65.65176391601562, -167.97862243652344, 3535, -10056, 8934, -16614, 5, 34129, 2, 0, 220, 600, 544, 324, 209, 78, 52, 1532, 2446, 3175, 2821, 3124, 6426, 12898, 6754, 18017, 8110, 8951, 961, 557, 712, 2948, 2500, 3143, 3094, 2456, 3268, 342, 342, 10054, 10624, 7433, 7181, 7490, 8361, 14524, 16615, 17011, 21005, 13, 6, 6, 15, 15, 15, 15, 0, 9, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 12, 15, 5, 9, 3, 3, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(391, 286, 65.67485809326172, -167.82440185546875, 3583, -10058, 8938, -16600, 13, 34302, 2, 0, 209, 603, 516, 306, 206, 69, 42, 1500, 2373, 3048, 2663, 2961, 6145, 12346, 6479, 17279, 7924, 9049, 930, 526, 697, 2902, 2500, 3127, 3090, 2513, 3361, 338, 338, 10063, 10809, 7433, 7131, 7427, 8311, 14635, 16809, 17275, 20874, 13, 5, 7, 15, 15, 15, 15, 0, 10, 6, 8, 8, 4, 5, 4, 7, 0, 0, 11, 13, 15, 6, 12, 3, 2, 5, 3, 15, 15, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 12294, 16433, 45, 241, 243, 72, 131, [85, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(391, 287, 65.67237854003906, -167.8563232421875, 3573, -10061, 8938, -16603, 7, 34268, 2, 0, 212, 608, 525, 309, 220, 66, 42, 1517, 2389, 3080, 2701, 2985, 6200, 12440, 6557, 17474, 7995, 9121, 946, 534, 697, 2932, 2500, 3165, 3113, 2490, 3320, 329, 329, 10085, 10776, 7527, 7186, 7538, 8414, 14652, 16698, 17108, 20833, 13, 5, 7, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 13, 15, 6, 12, 3, 2, 5, 3, 15, 15, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 12350, 16433, 45, 241, 243, 71, 131, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(391, 288, 65.66989135742188, -167.8882598876953, 3564, -10064, 8937, -16606, 6, 34233, 2, 0, 213, 598, 528, 311, 225, 77, 39, 1534, 2405, 3111, 2724, 3016, 6239, 12527, 6572, 17545, 7971, 8954, 946, 542, 704, 2955, 2507, 3172, 3128, 2467, 3320, 353, 353, 9952, 10725, 7534, 7241, 7571, 8441, 14617, 16615, 17081, 20772, 13, 5, 7, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 13, 15, 6, 11, 3, 2, 5, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 12369, 16433, 45, 241, 243, 71, 131, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(391, 289, 65.66740417480469, -167.9201202392578, 3554, -10066, 8936, -16609, 5, 34198, 2, 0, 209, 552, 534, 314, 188, 62, 40, 1525, 2413, 3119, 2740, 3032, 6114, 12267, 6409, 17084, 7635, 8240, 891, 534, 681, 2940, 2500, 3180, 3135, 2536, 3333, 330, 330, 10018, 10775, 7597, 7214, 7610, 8476, 14660, 16683, 17158, 20892, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 6, 12, 3, 2, 5, 3, 15, 15, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(391, 290, 65.66490936279297, -167.951904296875, 3545, -10069, 8936, -16612, 5, 34164, 2, 0, 212, 560, 530, 319, 192, 61, 38, 1525, 2421, 3143, 2763, 3055, 6184, 12425, 6502, 17326, 7729, 8391, 907, 542, 697, 2962, 2500, 3157, 3135, 2490, 3306, 330, 330, 9952, 10758, 7542, 7214, 7564, 8441, 14582, 16669, 17132, 20831, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 12, 3, 2, 5, 3, 15, 15, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(391, 291, 65.66241455078125, -167.98362731933594, 3535, -10072, 8935, -16615, 5, 34129, 0, 0, 219, 586, 545, 320, 207, 70, 47, 1543, 2438, 3166, 2802, 3110, 6357, 12780, 6697, 17880, 8011, 8788, 938, 557, 704, 2962, 2500, 3172, 3128, 2490, 3319, 353, 353, 10018, 10691, 7542, 7186, 7557, 8441, 14590, 16614, 17131, 20971, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 12, 15, 5, 12, 3, 2, 5, 3, 15, 15, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(391, 292, 65.65991973876953, -168.01527404785156, 3526, -10075, 8935, -16618, 5, 34095, 0, 0, 228, 635, 548, 330, 234, 91, 63, 1542, 2446, 3198, 2848, 3165, 6639, 13364, 6985, 18685, 8480, 9494, 993, 573, 744, 2947, 2500, 3165, 3113, 2467, 3250, 353, 353, 9929, 10606, 7518, 7204, 7531, 8407, 14461, 16475, 16938, 20849, 13, 4, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 10, 11, 15, 6, 12, 3, 2, 5, 4, 15, 15, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(392, 286, 65.68557739257812, -167.8292694091797, 3583, -10074, 8939, -16601, 6, 34303, 2, 0, 205, 586, 519, 302, 195, 65528, 36, 1511, 2365, 3033, 2638, 2931, 6076, 12187, 6396, 17037, 7800, 8827, 916, 521, 675, 2950, 2505, 3202, 3155, 2456, 3417, 343, 343, 9789, 10958, 7655, 7207, 7655, 8550, 14842, 16863, 17290, 20901, 13, 5, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 11, 2, 2, 6, 3, 15, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 12393, 16433, 45, 241, 243, 72, 131, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(392, 287, 65.6830825805664, -167.8612823486328, 3573, -10077, 8939, -16604, 5, 34268, 2, 0, 211, 609, 527, 308, 217, 65528, 38, 1519, 2399, 3073, 2700, 2994, 6226, 12518, 6568, 17499, 8037, 9190, 948, 529, 699, 2957, 2497, 3195, 3140, 2525, 3362, 342, 342, 9765, 10857, 7609, 7207, 7635, 8508, 14686, 16766, 17165, 20820, 13, 5, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 13, 15, 5, 12, 2, 3, 6, 3, 15, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(392, 288, 65.68058013916016, -167.8932342529297, 3564, -10079, 8938, -16606, 5, 34233, 2, 0, 209, 575, 527, 312, 206, 65528, 29, 1528, 2407, 3105, 2716, 3002, 6163, 12369, 6474, 17264, 7808, 8653, 908, 537, 691, 2950, 2497, 3187, 3132, 2502, 3361, 351, 351, 9601, 10724, 7571, 7188, 7596, 8460, 14572, 16655, 17127, 20820, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 12, 2, 3, 6, 3, 15, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(392, 289, 65.6780776977539, -167.92510986328125, 3554, -10082, 8937, -16609, 5, 34198, 2, 0, 208, 538, 531, 314, 182, 65528, 37, 1519, 2415, 3105, 2732, 3017, 6061, 12148, 6357, 16927, 7539, 8101, 885, 529, 675, 2957, 2505, 3195, 3147, 2502, 3334, 352, 352, 9624, 10740, 7609, 7151, 7609, 8487, 14624, 16737, 17089, 20859, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 11, 2, 3, 6, 3, 15, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(392, 290, 65.67557525634766, -167.9569091796875, 3545, -10085, 8937, -16612, 5, 34163, 0, 0, 212, 550, 530, 320, 185, 65528, 43, 1528, 2423, 3136, 2755, 3056, 6147, 12345, 6459, 17225, 7673, 8283, 885, 529, 683, 2965, 2497, 3195, 3147, 2479, 3334, 344, 344, 9600, 10774, 7608, 7179, 7628, 8508, 14659, 16737, 17113, 20818, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 12, 2, 3, 6, 3, 15, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(392, 291, 65.67306518554688, -167.9886474609375, 3535, -10088, 8936, -16615, 5, 34129, 0, 0, 214, 574, 533, 322, 199, 65528, 49, 1527, 2423, 3160, 2786, 3088, 6297, 12668, 6623, 17664, 7910, 8630, 924, 545, 699, 2972, 2505, 3187, 3139, 2525, 3334, 351, 351, 9647, 10757, 7601, 7169, 7622, 8494, 14633, 16709, 17113, 21018, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 7, 4, 5, 4, 7, 0, 0, 11, 12, 15, 5, 11, 2, 3, 6, 3, 15, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(392, 292, 65.67056274414062, -168.02029418945312, 3526, -10091, 8936, -16618, 5, 34095, 2, 0, 225, 627, 539, 327, 220, 65528, 56, 1527, 2431, 3176, 2833, 3150, 6604, 13291, 6951, 18566, 8416, 9411, 979, 568, 738, 2965, 2497, 3187, 3139, 2479, 3292, 350, 350, 9624, 10723, 7585, 7197, 7615, 8487, 14606, 16571, 17037, 20836, 13, 4, 6, 15, 15, 15, 15, 0, 10, 5, 7, 7, 4, 5, 4, 7, 0, 0, 10, 11, 15, 5, 12, 2, 3, 6, 3, 15, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(393, 286, 65.6962890625, -167.834228515625, 3583, -10089, 8940, -16601, 5, 34303, 1, 0, 203, 586, 515, 301, 208, 62, 39, 1504, 2362, 3033, 2647, 2933, 6076, 12190, 6396, 17048, 7825, 8887, 931, 518, 673, 2985, 2506, 3242, 3177, 2499, 3353, 341, 341, 10146, 11031, 7764, 7255, 7766, 8653, 14901, 16912, 17203, 21074, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 14, 15, 5, 9, 2, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(393, 287, 65.69377899169922, -167.8662567138672, 3573, -10092, 8939, -16604, 5, 34268, 0, 0, 205, 592, 521, 303, 211, 61, 38, 1503, 2387, 3057, 2671, 2956, 6139, 12323, 6467, 17236, 7873, 8958, 931, 525, 681, 3000, 2499, 3235, 3185, 2476, 3339, 333, 333, 10101, 11014, 7749, 7264, 7779, 8674, 14910, 16840, 17229, 20975, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 14, 15, 5, 10, 2, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(393, 288, 65.6912612915039, -167.89822387695312, 3564, -10095, 8939, -16607, 5, 34233, 0, 0, 204, 542, 529, 304, 189, 59, 35, 1512, 2403, 3081, 2702, 2987, 6013, 12080, 6318, 16828, 7540, 8173, 892, 518, 666, 2978, 2499, 3227, 3163, 2476, 3353, 350, 350, 9966, 10849, 7679, 7236, 7707, 8585, 14724, 16670, 17097, 21013, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 14, 15, 5, 10, 2, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(393, 289, 65.68875122070312, -167.93011474609375, 3554, -10098, 8938, -16610, 5, 34198, 2, 0, 203, 531, 529, 305, 169, 58, 35, 1521, 2403, 3097, 2726, 3003, 6013, 12064, 6310, 16812, 7460, 7990, 876, 525, 658, 2985, 2499, 3227, 3155, 2499, 3339, 343, 343, 9988, 10799, 7664, 7199, 7680, 8564, 14671, 16726, 17136, 21032, 13, 8, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 14, 15, 5, 10, 2, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(393, 290, 65.68623352050781, -167.9619140625, 3545, -10101, 8938, -16613, 5, 34163, 2, 0, 206, 546, 529, 311, 188, 64, 43, 1520, 2411, 3120, 2741, 3050, 6115, 12300, 6436, 17158, 7643, 8228, 900, 533, 673, 2985, 2506, 3227, 3148, 2453, 3339, 342, 342, 10011, 10832, 7679, 7255, 7680, 8585, 14724, 16698, 17162, 20953, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 9, 2, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(393, 291, 65.6837158203125, -167.99365234375, 3535, -10104, 8937, -16616, 5, 34129, 2, 0, 211, 567, 536, 318, 199, 70, 45, 1520, 2420, 3128, 2773, 3074, 6265, 12590, 6585, 17559, 7850, 8530, 923, 549, 689, 3000, 2499, 3242, 3140, 2499, 3325, 342, 342, 9988, 10782, 7664, 7217, 7674, 8557, 14635, 16655, 17083, 20893, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 7, 7, 4, 5, 4, 7, 0, 0, 11, 13, 15, 5, 10, 2, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 2, 0, 0, 0, 255, 255, [81, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(393, 292, 65.68119812011719, -168.0253143310547, 3526, -10107, 8937, -16619, 5, 34095, 2, 0, 221, 628, 535, 324, 219, 78, 51, 1529, 2419, 3152, 2804, 3128, 6573, 13234, 6930, 18526, 8430, 9490, 986, 557, 728, 3007, 2491, 3242, 3170, 2453, 3339, 357, 357, 9988, 10832, 7702, 7245, 7713, 8605, 14688, 16711, 17161, 20951, 13, 5, 6, 15, 15, 15, 15, 0, 10, 5, 7, 7, 4, 5, 4, 7, 0, 0, 10, 13, 15, 5, 12, 2, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(394, 286, 65.70700073242188, -167.83920288085938, 3583, -10105, 8941, -16601, 5, 34303, 0, 0, 200, 581, 516, 295, 202, 55, 28, 1489, 2346, 3018, 2636, 2906, 6020, 12097, 6352, 16932, 7760, 8872, 929, 517, 685, 3003, 2495, 3255, 3202, 2549, 3444, 330, 330, 10008, 11099, 7876, 7303, 7860, 8761, 14994, 16898, 17317, 20986, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 14, 15, 5, 11, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 15, 28, 28, 6, 6, 6, 3, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(394, 287, 65.70447540283203, -167.87124633789062, 3573, -10108, 8940, -16604, 5, 34268, 2, 0, 200, 568, 515, 297, 200, 59, 31, 1497, 2354, 3042, 2651, 2929, 6012, 12065, 6336, 16885, 7681, 8659, 921, 525, 677, 3010, 2503, 3277, 3217, 2548, 3444, 330, 330, 10054, 11082, 7883, 7321, 7873, 8774, 14967, 16911, 17264, 20995, 14, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 13, 15, 5, 9, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 15, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(394, 288, 65.70195007324219, -167.90321350097656, 3564, -10111, 8940, -16607, 5, 34233, 2, 0, 197, 514, 520, 297, 172, 49, 32, 1506, 2370, 3058, 2667, 2937, 5869, 11783, 6161, 16411, 7286, 7798, 866, 517, 646, 3003, 2503, 3247, 3188, 2525, 3389, 339, 339, 9985, 10951, 7791, 7257, 7801, 8692, 14845, 16843, 17250, 20935, 14, 8, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 14, 15, 5, 9, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 15, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(394, 289, 65.69942474365234, -167.9351043701172, 3554, -10114, 8939, -16610, 5, 34198, 2, 0, 197, 513, 527, 301, 166, 51, 31, 1506, 2387, 3090, 2683, 2969, 5909, 11854, 6201, 16514, 7310, 7790, 866, 517, 654, 2980, 2495, 3232, 3180, 2525, 3389, 339, 339, 9962, 10918, 7738, 7239, 7748, 8623, 14792, 16720, 17093, 20954, 14, 8, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 14, 15, 5, 11, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 15, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(394, 290, 65.6968994140625, -167.96693420410156, 3545, -10117, 8939, -16613, 5, 34164, 2, 0, 204, 538, 529, 306, 185, 61, 37, 1514, 2395, 3105, 2730, 3008, 6067, 12183, 6376, 16988, 7563, 8146, 889, 525, 670, 3003, 2503, 3255, 3202, 2502, 3389, 339, 339, 9939, 10934, 7791, 7266, 7761, 8664, 14792, 16760, 17171, 21051, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 9, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 15, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(394, 291, 65.69436645507812, -167.99867248535156, 3535, -10120, 8938, -16616, 5, 34129, 2, 0, 209, 569, 526, 314, 201, 67, 48, 1514, 2395, 3129, 2761, 3055, 6266, 12591, 6582, 17565, 7879, 8635, 921, 540, 693, 3003, 2488, 3255, 3180, 2502, 3375, 338, 338, 9962, 10835, 7745, 7248, 7748, 8643, 14739, 16637, 17145, 20923, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 7, 4, 5, 4, 7, 0, 0, 11, 13, 15, 5, 14, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 15, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [81, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(394, 292, 65.69184112548828, -168.0303497314453, 3526, -10123, 8938, -16619, 6, 34095, 2, 0, 220, 651, 526, 318, 234, 85, 48, 1505, 2386, 3121, 2785, 3094, 6590, 13265, 6963, 18615, 8567, 9875, 1015, 564, 740, 3010, 2503, 3255, 3188, 2456, 3361, 345, 345, 9870, 10868, 7783, 7302, 7761, 8650, 14739, 16678, 17144, 20990, 13, 4, 6, 15, 15, 15, 15, 0, 10, 5, 8, 7, 4, 5, 4, 7, 0, 0, 10, 12, 15, 5, 9, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 15, 28, 28, 6, 6, 6, 3, 6, 0, 12383, 49, 43, 250, 248, 71, 131, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(395, 287, 65.71517181396484, -167.87623596191406, 3573, -10124, 8941, -16605, 5, 34268, 0, 0, 198, 542, 513, 300, 183, 60, 29, 1496, 2343, 3027, 2635, 2904, 5886, 11824, 6182, 16478, 7447, 8243, 888, 511, 654, 3031, 2498, 3290, 3230, 2519, 3464, 340, 340, 10093, 11046, 7932, 7323, 7919, 8816, 14994, 16941, 17302, 21015, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 10, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(395, 288, 65.71263885498047, -167.908203125, 3564, -10127, 8941, -16608, 5, 34233, 2, 0, 194, 500, 516, 295, 158, 47, 25, 1496, 2360, 3035, 2643, 2912, 5775, 11590, 6048, 16115, 7146, 7586, 841, 511, 638, 3001, 2498, 3260, 3186, 2519, 3409, 342, 342, 9957, 10878, 7800, 7279, 7815, 8713, 14916, 16818, 17263, 20857, 13, 8, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 13, 13, 15, 5, 10, 3, 3, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(395, 289, 65.71009826660156, -167.94012451171875, 3554, -10130, 8940, -16611, 6, 34198, 1, 0, 198, 505, 516, 299, 159, 57, 29, 1505, 2368, 3051, 2659, 2936, 5847, 11746, 6127, 16305, 7226, 7673, 849, 511, 646, 2994, 2490, 3229, 3186, 2519, 3422, 349, 349, 9957, 10861, 7762, 7252, 7743, 8645, 14838, 16831, 17237, 20876, 13, 8, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 13, 13, 15, 5, 12, 3, 3, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(395, 290, 65.70755767822266, -167.97195434570312, 3545, -10133, 8940, -16614, 6, 34164, 2, 0, 202, 530, 516, 305, 170, 59, 33, 1487, 2359, 3051, 2690, 2967, 5974, 12003, 6285, 16740, 7471, 8037, 888, 534, 661, 3024, 2498, 3275, 3223, 2474, 3422, 356, 356, 9957, 10928, 7893, 7332, 7847, 8740, 14855, 16776, 17249, 21150, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 12, 15, 5, 10, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 12421, 49, 43, 250, 248, 71, 131, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(395, 291, 65.70501708984375, -168.0037078857422, 3535, -10136, 8939, -16617, 6, 34129, 1, 0, 211, 587, 521, 310, 202, 76, 50, 1495, 2367, 3067, 2738, 3014, 6259, 12580, 6585, 17570, 7971, 8892, 935, 550, 701, 3061, 2505, 3297, 3223, 2541, 3409, 340, 340, 9979, 10894, 7916, 7332, 7873, 8761, 14838, 16775, 17160, 20953, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 5, 4, 7, 0, 0, 11, 12, 15, 4, 8, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [-47, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(396, 288, 65.72332000732422, -167.9132080078125, 3564, -10143, 8942, -16608, 5, 34233, 2, 0, 193, 492, 506, 293, 149, 65528, 23, 1499, 2334, 3021, 2614, 2881, 5710, 11442, 5980, 15879, 7044, 7474, 823, 504, 637, 3017, 2504, 3268, 3212, 2535, 3451, 347, 347, 10063, 11038, 7836, 7335, 7844, 8742, 14915, 16850, 17220, 20894, 13, 8, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 13, 14, 15, 5, 12, 2, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(396, 289, 65.72077178955078, -167.94512939453125, 3554, -10146, 8941, -16611, 6, 34198, 2, 0, 195, 501, 510, 300, 157, 65528, 28, 1481, 2334, 3013, 2621, 2904, 5781, 11557, 6051, 16093, 7147, 7624, 847, 504, 637, 3017, 2504, 3253, 3205, 2490, 3410, 332, 332, 10017, 10938, 7821, 7291, 7805, 8707, 14853, 16850, 17207, 21072, 13, 8, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 13, 14, 15, 5, 12, 2, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 12431, 49, 44, 245, 245, 71, 131, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]),
(396, 290, 65.71821594238281, -167.9770050048828, 3545, -10149, 8941, -16614, 9, 34164, 1, 0, 200, 526, 511, 301, 170, 65528, 35, 1480, 2350, 3029, 2645, 2928, 5907, 11842, 6208, 16528, 7384, 7988, 870, 527, 661, 3054, 2504, 3291, 3235, 2490, 3424, 354, 354, 10039, 10988, 7958, 7395, 7902, 8811, 14853, 16836, 17231, 20852, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 12, 2, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0])],
dtype=[('mxd03_granule_row', '<i2'), ('mxd03_granule_column', '<i2'), ('mxd03_latitude', '<f4'), ('mxd03_longitude', '<f4'), ('mxd03_sensor_zenith', '<i2'), ('mxd03_sensor_azimuth', '<i2'), ('mxd03_solar_zenith', '<i2'), ('mxd03_solar_azimuth', '<i2'), ('mxd03_height', '<i2'), ('mxd03_range', '<u2'), ('mxd03_land_sea_mask', '|u1'), ('mxd03_gflags', '|u1'), ('mxd02_band_1A', '<u2'), ('mxd02_band_2A', '<u2'), ('mxd02_band_3A', '<u2'), ('mxd02_band_4A', '<u2'), ('mxd02_band_5A', '<u2'), ('mxd02_band_6A', '<u2'), ('mxd02_band_7A', '<u2'), ('mxd02_band_8', '<u2'), ('mxd02_band_9', '<u2'), ('mxd02_band_10', '<u2'), ('mxd02_band_11', '<u2'), ('mxd02_band_12', '<u2'), ('mxd02_band_13lo', '<u2'), ('mxd02_band_13hi', '<u2'), ('mxd02_band_14lo', '<u2'), ('mxd02_band_14hi', '<u2'), ('mxd02_band_15', '<u2'), ('mxd02_band_16', '<u2'), ('mxd02_band_17', '<u2'), ('mxd02_band_18', '<u2'), ('mxd02_band_19', '<u2'), ('mxd02_band_20', '<u2'), ('mxd02_band_21', '<u2'), ('mxd02_band_22', '<u2'), ('mxd02_band_23', '<u2'), ('mxd02_band_24', '<u2'), ('mxd02_band_25', '<u2'), ('mxd02_band_26', '<u2'), ('mxd02_band_26B', '<u2'), ('mxd02_band_27', '<u2'), ('mxd02_band_28', '<u2'), ('mxd02_band_29', '<u2'), ('mxd02_band_30', '<u2'), ('mxd02_band_31', '<u2'), ('mxd02_band_32', '<u2'), ('mxd02_band_33', '<u2'), ('mxd02_band_34', '<u2'), ('mxd02_band_35', '<u2'), ('mxd02_band_36', '<u2'), ('mxd02_band_uncertainity_1A', '|u1'), ('mxd02_band_uncertainity_2A', '|u1'), ('mxd02_band_uncertainity_3A', '|u1'), ('mxd02_band_uncertainity_4A', '|u1'), ('mxd02_band_uncertainity_5A', '|u1'), ('mxd02_band_uncertainity_6A', '|u1'), ('mxd02_band_uncertainity_7A', '|u1'), ('mxd02_band_uncertainity_8', '|u1'), ('mxd02_band_uncertainity_9', '|u1'), ('mxd02_band_uncertainity_10', '|u1'), ('mxd02_band_uncertainity_11', '|u1'), ('mxd02_band_uncertainity_12', '|u1'), ('mxd02_band_uncertainity_13lo', '|u1'), ('mxd02_band_uncertainity_13hi', '|u1'), ('mxd02_band_uncertainity_14lo', '|u1'), ('mxd02_band_uncertainity_14hi', '|u1'), ('mxd02_band_uncertainity_15', '|u1'), ('mxd02_band_uncertainity_16', '|u1'), ('mxd02_band_uncertainity_17', '|u1'), ('mxd02_band_uncertainity_18', '|u1'), ('mxd02_band_uncertainity_19', '|u1'), ('mxd02_band_uncertainity_20', '|u1'), ('mxd02_band_uncertainity_21', '|u1'), ('mxd02_band_uncertainity_22', '|u1'), ('mxd02_band_uncertainity_23', '|u1'), ('mxd02_band_uncertainity_24', '|u1'), ('mxd02_band_uncertainity_25', '|u1'), ('mxd02_band_uncertainity_26', '|u1'), ('mxd02_band_uncertainity_26B', '|u1'), ('mxd02_band_uncertainity_27', '|u1'), ('mxd02_band_uncertainity_28', '|u1'), ('mxd02_band_uncertainity_29', '|u1'), ('mxd02_band_uncertainity_30', '|u1'), ('mxd02_band_uncertainity_31', '|u1'), ('mxd02_band_uncertainity_32', '|u1'), ('mxd02_band_uncertainity_33', '|u1'), ('mxd02_band_uncertainity_34', '|u1'), ('mxd02_band_uncertainity_35', '|u1'), ('mxd02_band_uncertainity_36', '|u1'), ('mxd02_band_nsamples_1A', '|i1'), ('mxd02_band_nsamples_2A', '|i1'), ('mxd02_band_nsamples_3A', '|i1'), ('mxd02_band_nsamples_4A', '|i1'), ('mxd02_band_nsamples_5A', '|i1'), ('mxd02_band_nsamples_6A', '|i1'), ('mxd02_band_nsamples_7A', '|i1'), ('reserved_20120221a', '|u1'), ('mxd11_lst', '<u2'), ('mxd11_qc', '<u2'), ('mxd11_error_lst', '<u2'), ('mxd11_emis31', '<u2'), ('mxd11_emis32', '<u2'), ('mxd11_view_angle', '|u1'), ('mxd11_view_time', '|u1'), ('mxd35_cloud_mask', '|i1', (6,)), ('mxd35_quality_assurance', '|i1', (10,)), ('reserved_20120221b', '|u1', (5,))])
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
class CompoundAlignTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
dataset = netCDF4.Dataset(self.file, "w")
# Create the netCDF variable for the cell records, store the cell
# records in the variable, and give the variable a md5sum
# attribute.
cell_cmp_dtype = dataset.createCompoundType(cells.dtype,"cell_cmp_dtype")
dataset.createDimension("number_cells", cells.size)
v_cells = dataset.createVariable("cells",cell_cmp_dtype,"number_cells")
v_cells[:] = cells
dataset.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
f = netCDF4.Dataset(self.file, 'r')
new_cells = f.variables["cells"][:]
assert new_cells.shape == cells.shape
assert sorted(new_cells.dtype.names) == sorted(cells.dtype.names)
for name in cells.dtype.names:
assert cells[name].dtype.name == new_cells[name].dtype.name
assert cells[name].shape == new_cells[name].shape
assert_array_equal(cells[name],new_cells[name])
f.close()
if __name__ == '__main__':
unittest.main()
| 37,907 | 340.513514 | 3,483 | py |
netcdf4-python | netcdf4-python-master/test/tst_utils.py | from numpy.testing import assert_equal
from netCDF4.utils import _StartCountStride, _out_array_shape
import unittest
import numpy as np
class TestgetStartCountStride(unittest.TestCase):
def test_basic(self):
# Basic usage
elem = [0, slice(None), slice(None)]
start, count, stride, put_ind = _StartCountStride(elem, (50, 4, 10))
assert_equal(start, 0)
assert_equal(count[..., 0], 1)
assert_equal(count[..., 1], 4)
assert_equal(count[..., 2], 10)
assert_equal(stride, 1)
assert_equal(put_ind[...,0], -1)
assert_equal(put_ind[...,1], slice(None))
assert_equal(put_ind[...,2], slice(None))
assert_equal(_out_array_shape(count), (1, 4,10))
def test_slice(self):
# start and stop slice
elem = [5, slice(None), slice(5, 8, 2)]
start, count, stride, put_ind = _StartCountStride(elem, (50, 4, 10))
assert_equal(start[..., 0], 5)
assert_equal(start[..., 1], 0)
assert_equal(start[..., 2], 5)
assert_equal(count[..., 0], 1)
assert_equal(count[..., 1], 4)
assert_equal(count[..., 2], 2)
assert_equal(stride[..., 2], 2)
assert_equal(_out_array_shape(count), (1, 4,2))
def test_fancy(self):
# Fancy indexing
elem = [slice(None), [1,2,3], 8]
start, count, stride, put_ind = _StartCountStride(elem, (50, 4, 10))
assert_equal(start[..., 0], 0)
assert_equal(start[..., 1].squeeze(), 1)
assert_equal(start[..., 2], 8)
assert_equal(count[...,0], 50)
assert_equal(count[...,1], 3)
assert_equal(count[...,2], 1)
assert_equal(put_ind[...,1].squeeze(), slice(None, None, None))
assert_equal(_out_array_shape(count), (50, 3, 1))
i = np.array([2,5,7],'i4')
elem = [slice(None, -1,2),i,slice(None)]
start, count, stride, put_ind = _StartCountStride(elem, (9,10,11))
try:
elem = ( np.arange(6).reshape((3,2)), slice(None), slice(None) )
start, count, stride, put_ind = _StartCountStride(elem, (3,4,5))
except IndexError:
pass
# this one should be converted to a slice
elem = [slice(None), [1,3,5], 8]
start, count, stride, put_ind = _StartCountStride(elem, (50, 6, 10))
# pull request #683 now does not convert integer sequences to strided
# slices. PR #1224 reverts this behavior.
assert_equal(put_ind[...,1].squeeze(), slice(None,None,None))
#assert_equal(put_ind[...,1].squeeze(), [0,1,2])
def test_multiple_sequences(self):
elem = [[4,6,7], [1,2,3], slice(None)]
start, count, stride, put_ind = _StartCountStride(elem, (50, 4, 10))
assert_equal(_out_array_shape(count), (3, 3, 10))
assert_equal(start[..., 0].squeeze(), [4,6,7])
assert_equal(start[..., 1].squeeze(), [1,1,1])
assert_equal(start[..., 2], 0)
assert_equal(count[...,0], 1)
assert_equal(count[...,1], 3)
assert_equal(count[...,2], 10)
i = [1,3,4]
elem = (i, i, i)
start, count, stride, put_ind = _StartCountStride(elem, (50, 5, 10))
assert_equal(_out_array_shape(count), (3,3,3))
def test_put_indices(self):
elem = (1, slice(None), slice(None))
start, count, stride, put_ind = _StartCountStride(elem, (3,4,5))
orig = np.arange(60).reshape((3,4,5))
dest = np.empty(_out_array_shape(count))
dest[tuple(put_ind[0,0,0])] = orig[tuple(elem)]
def test_boolean(self):
elem = (1, slice(None), np.array([True, True, False, False, True]))
start, count, stride, put_ind = _StartCountStride(elem, (50, 4,5))
assert_equal(start[..., 2].squeeze(), [0,1,4])
assert_equal(count[...,2], 1)
assert_equal(_out_array_shape(count), (1, 4, 3))
# Multiple booleans --- The behavior is different from NumPy in this case.
elem = (np.array([True, True, False]), np.array([True, True, False, True]), slice(None))
start, count, stride, put_ind = _StartCountStride(elem, (3,4,5))
assert_equal(_out_array_shape(count), (2,3,5))
try:
elem = (np.array([True, True, False]), np.array([True, True, True, False]), slice(None))
except IndexError:
pass
def test_1d(self):
# Scalar
elem = (0)
start, count, stride, put_ind = _StartCountStride(elem, (10,))
assert_equal(start, 0)
assert_equal(count, 1)
assert_equal(stride, 1)
assert_equal(put_ind, -1)
elem = (-1)
start, count, stride, put_ind = _StartCountStride(elem, (10,))
assert_equal(start, 9)
assert_equal(count, 1)
assert_equal(stride, 1)
assert_equal(put_ind, -1)
# test conversion of a integer index array to a slice
elem = (np.array([0,1]))
start, count, stride, put_ind = _StartCountStride(elem, (10,))
assert_equal(start, 0)
assert_equal(count, 2)
assert_equal(stride, 1)
assert_equal(put_ind[:,0], slice(None,None,None))
# Slice
elem = (slice(2,5,2))
start, count, stride, put_ind = _StartCountStride(elem, (10,))
assert_equal(start, 2)
assert_equal(count, 2)
assert_equal(stride, 2)
assert_equal(put_ind, slice(None))
# Integer sequence
elem = ([2,4,7])
start, count, stride, put_ind = _StartCountStride(elem, (10,))
assert_equal(start.squeeze(), [2,4,7])
assert_equal(count, 1)
assert_equal(stride, 1)
assert_equal(put_ind[:,0], [0,1,2])
# Boolean slicing
elem = (np.array([True, True, False, True, False]),)
start, count, stride, put_ind = _StartCountStride(elem, (5,))
assert_equal(start.squeeze(), [0,1,3])
assert_equal(count, 1)
assert_equal(stride, 1)
assert_equal(put_ind[:,0], [0,1,2])
# Integer sequence simplification
elem = ([2,3,4])
start, count, stride, put_ind = _StartCountStride(elem, (10,))
assert_equal(start, 2)
assert_equal(count, 3)
assert_equal(stride, 1)
assert_equal(put_ind, slice(None))
# Boolean indices simplification
elem = (np.array([False, True, True, True, False]))
start, count, stride, put_ind = _StartCountStride(elem, (5,))
assert_equal(start, 1)
assert_equal(count, 3)
assert_equal(stride, 1)
assert_equal(put_ind, slice(None))
# All False
elem = (np.array([False, False, False, False]))
start, count, stride, put_ind = _StartCountStride(elem, (4,))
assert_equal(count, 0)
assert_equal(_out_array_shape(count), (0,))
def test_ellipsis(self):
elem=(Ellipsis, slice(1, 4))
start, count, stride, put_ind = _StartCountStride(elem, (22,25,4))
assert_equal(start[0,0,0], [0, 0, 1])
assert_equal(count[0,0,0], (22, 25, 3))
assert_equal(put_ind[0,0,0], (slice(None), slice(None), slice(None)))
elem=(Ellipsis, [15,16,17,18,19], slice(None), slice(None))
start, count, stride, put_ind = _StartCountStride(elem, (2,10,20,10,10))
assert_equal(start[0,0,0,0,0], [0, 0, 15, 0, 0])
assert_equal(count[0,0,0,0,0], (2, 10, 5, 10, 10))
assert_equal(put_ind[0,0,0,0,0], (slice(None), slice(None), slice(None), slice(None), slice(None)))
try:
elem=(Ellipsis, [15,16,17,18,19], slice(None))
start, count, stride, put_ind = _StartCountStride(elem, (2,10,20,10,10))
assert_equal(None, 'Should throw an exception')
except IndexError as e:
assert_equal(str(e), "integer index exceeds dimension size")
try:
elem=(Ellipsis, [15,16,17,18,19], Ellipsis)
start, count, stride, put_ind = _StartCountStride(elem, (2,10, 20,10,10))
assert_equal(None, 'Should throw an exception')
except IndexError as e:
assert_equal(str(e), "At most one ellipsis allowed in a slicing expression")
class TestsetStartCountStride(unittest.TestCase):
def test_basic(self):
grp = FakeGroup({'x':False, 'y':False, 'time':True})
elem=(slice(None), slice(None), 1)
start, count, stride, take_ind = _StartCountStride(elem, (22, 25, 1), ['x', 'y', 'time'], grp, (22,25), put=True)
assert_equal(start[0][0][0], [0, 0, 1])
assert_equal(count[0][0][0], (22, 25, 1))
assert_equal(take_ind[0][0][0], (slice(None), slice(None), -1))
elem=(slice(None), slice(None), slice(1, 4))
start, count, stride, take_ind = _StartCountStride(elem, (22,25,1),\
['x', 'y', 'time'], grp, (22,25,3), put=True)
assert_equal(start[0][0][0], [0, 0, 1])
assert_equal(count[0][0][0], (22, 25, 3))
assert_equal(take_ind[0][0][0], (slice(None), slice(None), slice(None)))
def test_integer(self):
grp = FakeGroup({'x':False, 'y':False})
elem=([0,4,5], slice(20, None))
start, count, stride, take_ind = _StartCountStride(elem, (22, 25), ['x', 'y'], grp, (3,5), put=True)
assert_equal(start[0][0], (0, 20))
assert_equal(start[1][0], (4, 20))
assert_equal(start[2][0], (5, 20))
assert_equal(count[0], np.array([[1,5],]))
assert_equal(stride[0][0], (1, 1))
assert_equal(take_ind[0][0], (0, slice(None)))
assert_equal(take_ind[1][0], (1, slice(None)))
assert_equal(take_ind[2][0], (2, slice(None)))
def test_booleans(self):
grp = FakeGroup({'x':False, 'y':False, 'z':False})
elem=([0,4,5], np.array([False, True, False, True, True]), slice(None))
start, count, stride, take_ind = _StartCountStride(elem, (10, 5, 12), ['x', 'y', 'z'], grp, (3, 3, 12), put=True)
assert_equal(start[0][0][0], (0, 1, 0))
assert_equal(start[1][0][0], (4, 1, 0))
assert_equal(start[2][0][0], (5, 1, 0))
assert_equal(start[0][1][0], (0, 3, 0))
assert_equal(count[0][0][0], (1, 1, 12))
assert_equal(stride[0][0][0], (1, 1, 1))
assert_equal(take_ind[0][0][0], (0, 0, slice(None)))
assert_equal(take_ind[1][0][0], (1, 0, slice(None)))
assert_equal(take_ind[0][1][0], (0, 1, slice(None)))
def test_unlim(self):
grp = FakeGroup({'time':True,'x':False, 'y':False})
elem = ([0,2,5], slice(None), slice(None))
start, count, stride, take_ind = _StartCountStride(elem, (0, 6, 7),\
['time', 'x', 'y'], grp, (3, 6, 7), put=True)
assert_equal(start[0][0][0], (0, 0, 0))
assert_equal(start[2][0][0], (5, 0, 0))
assert_equal(count[2][0][0], (1, 6, 7))
assert_equal(take_ind[0][0][0], (0, slice(None), slice(None)))
assert_equal(take_ind[2][0][0], (2, slice(None), slice(None)))
# pull request #683 broke this, since _StartCountStride now uses
# Dimension.__len__.
#elem = (slice(None, None, 2), slice(None), slice(None))
#start, count, stride, take_ind = _StartCountStride(elem, (0, 6, 7),\
# ['time', 'x', 'y'], grp, (10, 6, 7),put=True)
#assert_equal(start[0][0][0], (0,0,0))
#assert_equal(count[0][0][0], (5, 6, 7))
#assert_equal(stride[0][0][0], (2, 1, 1))
#assert_equal(take_ind[0][0][0], 3*(slice(None),))
def test_ellipsis(self):
grp = FakeGroup({'x':False, 'y':False, 'time':True})
elem=(Ellipsis, slice(1, 4))
start, count, stride, take_ind = _StartCountStride(elem, (22,25,1),\
['x', 'y', 'time'], grp, (22,25,3), put=True)
assert_equal(start[0,0,0], [0, 0, 1])
assert_equal(count[0,0,0], (22, 25, 3))
assert_equal(take_ind[0,0,0], (slice(None), slice(None), slice(None)))
grp = FakeGroup({'time':True, 'h':False, 'z':False, 'y':False, 'x':False})
elem=(Ellipsis, [15,16,17,18,19], slice(None), slice(None))
start, count, stride, take_ind = _StartCountStride(elem, (2,10,20,10,10),\
['time', 'h', 'z', 'y', 'x'], grp, (2,10,5,10,10), put=True)
assert_equal(start[0,0,0,0,0], [0, 0, 15, 0, 0])
assert_equal(count[0,0,0,0,0], [2, 10, 5, 10, 10])
assert_equal(stride[0,0,0,0,0], [1, 1, 1, 1, 1])
assert_equal(take_ind[0,0,0,0,0], (slice(None), slice(None), slice(None), slice(None), slice(None)))
try:
elem=(Ellipsis, [15,16,17,18,19], slice(None))
start, count, stride, take_ind = _StartCountStride(elem, (2,10,20,10,10),\
['time', 'z', 'y', 'x'], grp, (2,10,5,10,10), put=True)
assert_equal(None, 'Should throw an exception')
except IndexError as e:
#assert_equal(str(e), "integer index exceeds dimension size")
assert_equal(str(e), "list index out of range")
try:
elem=(Ellipsis, [15,16,17,18,19], Ellipsis)
start, count, stride, take_ind = _StartCountStride(elem, (2,10, 20,10,10),\
['time', 'z', 'y', 'x'], grp, (2,10,5,10,10), put=True)
assert_equal(None, 'Should throw an exception')
except IndexError as e:
#assert_equal(str(e), "At most one ellipsis allowed in a slicing expression")
assert_equal(str(e), "list index out of range")
class FakeGroup:
"""Create a fake group instance by passing a dictionary of booleans
keyed by dimension name."""
def __init__(self, dimensions):
self.dimensions = {}
for k,v in dimensions.items():
self.dimensions[k] = FakeDimension(v)
class FakeDimension:
def __init__(self, unlimited=False):
self.unlimited = unlimited
def isunlimited(self):
return self.unlimited
if __name__=='__main__':
unittest.main()
| 13,906 | 39.902941 | 121 | py |
netcdf4-python | netcdf4-python-master/test/run_all.py | import glob, os, sys, unittest, struct, tempfile
from netCDF4 import getlibversion,__hdf5libversion__,__netcdf4libversion__,__version__, Dataset
from netCDF4 import __has_cdf5_format__, __has_nc_inq_path__, __has_nc_create_mem__, \
__has_parallel4_support__, __has_pnetcdf_support__, \
__has_zstandard_support__, __has_bzip2_support__, \
__has_blosc_support__,__has_quantization_support__,\
__has_szip_support__
# can also just run
# python -m unittest discover . 'tst*py'
# Find all test files.
test_files = glob.glob('tst_*.py')
if __netcdf4libversion__ < '4.2.1' or __has_parallel4_support__ or __has_pnetcdf_support__:
test_files.remove('tst_diskless.py')
sys.stdout.write('not running tst_diskless.py ...\n')
if not __has_nc_inq_path__:
test_files.remove('tst_filepath.py')
sys.stdout.write('not running tst_filepath.py ...\n')
if not __has_nc_create_mem__:
test_files.remove('tst_create_mem.py')
sys.stdout.write('not running tst_create_mem.py ...\n')
if not __has_cdf5_format__ or struct.calcsize("P") < 8:
test_files.remove('tst_cdf5.py')
sys.stdout.write('not running tst_cdf5.py ...\n')
if not __has_quantization_support__:
test_files.remove('tst_compression_quant.py')
sys.stdout.write('not running tst_compression_quant.py ...\n')
filename = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
nc = Dataset(filename,'w')
if not __has_zstandard_support__ or os.getenv('NO_PLUGINS') or not nc.has_zstd_filter():
test_files.remove('tst_compression_zstd.py')
sys.stdout.write('not running tst_compression_zstd.py ...\n')
if not __has_bzip2_support__ or os.getenv('NO_PLUGINS') or not nc.has_bzip2_filter():
test_files.remove('tst_compression_bzip2.py')
sys.stdout.write('not running tst_compression_bzip2.py ...\n')
if not __has_blosc_support__ or os.getenv('NO_PLUGINS') or not nc.has_blosc_filter():
test_files.remove('tst_compression_blosc.py')
sys.stdout.write('not running tst_compression_blosc.py ...\n')
if not __has_szip_support__ or not nc.has_szip_filter():
test_files.remove('tst_compression_szip.py')
sys.stdout.write('not running tst_compression_szip.py ...\n')
nc.close()
os.remove(filename)
# Don't run tests that require network connectivity
if os.getenv('NO_NET'):
test_files.remove('tst_dap.py');
sys.stdout.write('not running tst_dap.py ...\n')
else:
# run opendap test first (issue #856).
test_files.remove('tst_dap.py')
test_files.insert(0,'tst_dap.py')
# Don't run CDL test (that requires ncdump/ncgen)
if os.getenv('NO_CDL'):
test_files.remove('tst_cdl.py');
sys.stdout.write('not running tst_cdl.py ...\n')
# Don't run computationally intensive test
if not os.getenv('MEMORY_LEAK_TEST'):
test_files.remove('tst_multiple_open_close.py');
sys.stdout.write('not running tst_multiple_open_close.py ...\n')
# Build the test suite from the tests found in the test files.
testsuite = unittest.TestSuite()
for f in test_files:
m = __import__(os.path.splitext(f)[0])
testsuite.addTests(unittest.TestLoader().loadTestsFromModule(m))
# Run the test suite.
def test(verbosity=1):
runner = unittest.TextTestRunner(verbosity=verbosity)
runner.run(testsuite)
if __name__ == '__main__':
import numpy, cython
sys.stdout.write('\n')
sys.stdout.write('netcdf4-python version: %s\n' % __version__)
sys.stdout.write('HDF5 lib version: %s\n' % __hdf5libversion__)
sys.stdout.write('netcdf lib version: %s\n' % __netcdf4libversion__)
sys.stdout.write('numpy version %s\n' % numpy.__version__)
sys.stdout.write('cython version %s\n' % cython.__version__)
runner = unittest.TextTestRunner(verbosity=1)
result = runner.run(testsuite)
if not result.wasSuccessful():
sys.exit(1)
| 3,881 | 43.113636 | 95 | py |
netcdf4-python | netcdf4-python-master/test/tst_atts.py | import math
import subprocess
import sys
import unittest
import os
import tempfile
import warnings
import numpy as np
from collections import OrderedDict
from numpy.random.mtrand import uniform
import netCDF4
# test attribute creation.
#FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
FILE_NAME = 'tst_atts.nc'
VAR_NAME="dummy_var"
GROUP_NAME = "dummy_group"
DIM1_NAME="x"
DIM1_LEN=2
DIM2_NAME="y"
DIM2_LEN=3
DIM3_NAME="z"
DIM3_LEN=25
STRATT = 'string attribute'
EMPTYSTRATT = ''
INTATT = 1
FLOATATT = math.pi
SEQATT = np.arange(10)
STRINGSEQATT = ['mary ','','had ','a ','little ','lamb',]
#ATTDICT = {'stratt':STRATT,'floatatt':FLOATATT,'seqatt':SEQATT,
# 'stringseqatt':''.join(STRINGSEQATT), # changed in issue #770
# 'emptystratt':EMPTYSTRATT,'intatt':INTATT}
ATTDICT = {'stratt':STRATT,'floatatt':FLOATATT,'seqatt':SEQATT,
'stringseqatt':STRINGSEQATT,
'emptystratt':EMPTYSTRATT,'intatt':INTATT}
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
with netCDF4.Dataset(self.file,'w') as f:
# try to set a dataset attribute with one of the reserved names.
f.setncattr('file_format','netcdf4_format')
# test attribute renaming
f.stratt_tmp = STRATT
f.renameAttribute('stratt_tmp','stratt')
f.emptystratt = EMPTYSTRATT
f.intatt = INTATT
f.floatatt = FLOATATT
f.seqatt = SEQATT
# sequences of strings converted to a single string.
f.stringseqatt = STRINGSEQATT
f.setncattr_string('stringseqatt_array',STRINGSEQATT) # array of NC_STRING
g = f.createGroup(GROUP_NAME)
f.createDimension(DIM1_NAME, DIM1_LEN)
f.createDimension(DIM2_NAME, DIM2_LEN)
f.createDimension(DIM3_NAME, DIM3_LEN)
g.createDimension(DIM1_NAME, DIM1_LEN)
g.createDimension(DIM2_NAME, DIM2_LEN)
g.createDimension(DIM3_NAME, DIM3_LEN)
g.stratt_tmp = STRATT
g.renameAttribute('stratt_tmp','stratt')
g.emptystratt = EMPTYSTRATT
g.intatt = INTATT
g.floatatt = FLOATATT
g.seqatt = SEQATT
g.stringseqatt = STRINGSEQATT
if netCDF4.__version__ > "1.4.2":
with self.assertRaises(ValueError):
g.arrayatt = [[1, 2], [3, 4]] # issue #841
g.setncattr_string('stringseqatt_array',STRINGSEQATT) # array of NC_STRING
v = f.createVariable(VAR_NAME, 'f8',(DIM1_NAME,DIM2_NAME,DIM3_NAME))
# try to set a variable attribute with one of the reserved names.
v.setncattr('ndim','three')
v.setncatts({'foo': 1})
v.setncatts(OrderedDict(bar=2))
v.stratt_tmp = STRATT
v.renameAttribute('stratt_tmp','stratt')
v.emptystratt = EMPTYSTRATT
v.intatt = INTATT
v.floatatt = FLOATATT
v.seqatt = SEQATT
v.stringseqatt = STRINGSEQATT
v.setncattr_string('stringseqatt_array',STRINGSEQATT) # array of NC_STRING
v1 = g.createVariable(VAR_NAME, 'f8',(DIM1_NAME,DIM2_NAME,DIM3_NAME))
v1.stratt = STRATT
v1.emptystratt = EMPTYSTRATT
v1.intatt = INTATT
v1.floatatt = FLOATATT
v1.seqatt = SEQATT
v1.stringseqatt = STRINGSEQATT
v1.setncattr_string('stringseqatt_array',STRINGSEQATT) # array of NC_STRING
# issue #959: should not be able to set _FillValue after var creation
try:
v1._FillValue(-999.)
except AttributeError:
pass
else:
raise ValueError('This test should have failed.')
try:
v1.setncattr('_FillValue',-999.)
except AttributeError:
pass
else:
raise ValueError('This test should have failed.')
# issue #485 (triggers segfault in C lib
# with version 1.2.1 without pull request #486)
f.foo = np.array('bar','S')
f.foo = np.array('bar','U')
# issue #529 write string attribute as NC_CHAR unless
# it can't be decoded to ascii. Add setncattr_string
# method to force NC_STRING.
f.charatt = 'foo' # will be written as NC_CHAR
f.setncattr_string('stringatt','bar') # NC_STRING
f.cafe = 'caf\xe9' # NC_STRING
f.batt = 'caf\xe9'.encode() #NC_CHAR
v.setncattr_string('stringatt','bar') # NC_STRING
# issue #882 - provide an option to always string attribute
# as NC_STRINGs. Testing various approaches to setting text attributes...
f.set_ncstring_attrs(True)
f.stringatt_ncstr = 'foo' # will now be written as NC_STRING
f.setncattr_string('stringatt_ncstr','bar') # NC_STRING anyway
f.caf_ncstr = 'caf\xe9' # NC_STRING anyway
f.bat_ncstr = 'caf\xe9'.encode() # now NC_STRING
g.stratt_ncstr = STRATT # now NC_STRING
#g.renameAttribute('stratt_tmp','stratt_ncstr')
v.setncattr_string('stringatt_ncstr','bar') # NC_STRING anyway
v.stratt_ncstr = STRATT
v1.emptystratt_ncstr = EMPTYSTRATT
def tearDown(self):
# Remove the temporary files
#pass
os.remove(self.file)
def runTest(self):
"""testing attributes"""
with netCDF4.Dataset(self.file, 'r') as f:
v = f.variables[VAR_NAME]
g = f.groups[GROUP_NAME]
v1 = g.variables[VAR_NAME]
# check attributes in root group.
# global attributes.
# check __dict__ method for accessing all netCDF attributes.
for key,val in ATTDICT.items():
if type(val) == np.ndarray:
assert f.__dict__[key].tolist() == val.tolist()
else:
assert f.__dict__[key] == val
# check accessing individual attributes.
assert f.intatt == INTATT
assert f.floatatt == FLOATATT
assert f.stratt == STRATT
assert f.emptystratt == EMPTYSTRATT
assert f.seqatt.tolist() == SEQATT.tolist()
#assert f.stringseqatt == ''.join(STRINGSEQATT) # issue 770
assert f.stringseqatt == STRINGSEQATT
assert f.stringseqatt_array == STRINGSEQATT
assert f.getncattr('file_format') == 'netcdf4_format'
# variable attributes.
# check __dict__ method for accessing all netCDF attributes.
for key,val in ATTDICT.items():
if type(val) == np.ndarray:
assert v.__dict__[key].tolist() == val.tolist()
else:
assert v.__dict__[key] == val
# check accessing individual attributes.
assert v.intatt == INTATT
assert v.floatatt == FLOATATT
assert v.stratt == STRATT
assert v.seqatt.tolist() == SEQATT.tolist()
#assert v.stringseqatt == ''.join(STRINGSEQATT) # issue 770
assert v.stringseqatt == STRINGSEQATT
assert v.stringseqatt_array == STRINGSEQATT
assert v.getncattr('ndim') == 'three'
assert v.getncattr('foo') == 1
assert v.getncattr('bar') == 2
# check type of attributes using ncdump (issue #529)
if not os.getenv('NO_CDL'):
ncdump_output = f.tocdl()
for line in ncdump_output:
line = line.strip('\t\n\r')
line = line.strip()# Must be done another time for group variables
if "stringatt" in line: assert line.startswith('string')
if "charatt" in line: assert line.startswith(':')
if "cafe" in line: assert line.startswith('string')
if "batt" in line: assert line.startswith(':')
if "_ncstr" in line: assert line.startswith('string')
# check attributes in subgroup.
# global attributes.
for key,val in ATTDICT.items():
if type(val) == np.ndarray:
assert g.__dict__[key].tolist() == val.tolist()
else:
assert g.__dict__[key] == val
assert g.intatt == INTATT
assert g.floatatt == FLOATATT
assert g.stratt == STRATT
assert g.emptystratt == EMPTYSTRATT
assert g.seqatt.tolist() == SEQATT.tolist()
#assert g.stringseqatt == ''.join(STRINGSEQATT) # issue 770
assert g.stringseqatt == STRINGSEQATT
assert g.stringseqatt_array == STRINGSEQATT
for key,val in ATTDICT.items():
if type(val) == np.ndarray:
assert v1.__dict__[key].tolist() == val.tolist()
else:
assert v1.__dict__[key] == val
assert v1.intatt == INTATT
assert v1.floatatt == FLOATATT
assert v1.stratt == STRATT
assert v1.emptystratt == EMPTYSTRATT
assert v1.seqatt.tolist() == SEQATT.tolist()
#assert v1.stringseqatt == ''.join(STRINGSEQATT) # issue 770
assert v1.stringseqatt == STRINGSEQATT
assert v1.stringseqatt_array == STRINGSEQATT
assert getattr(v1,'nonexistantatt',None) == None
# issue 915 empty string attribute (ncdump reports 'NIL')
f = netCDF4.Dataset('test_gold.nc')
assert f['RADIANCE'].VAR_NOTES == ""
f.close()
if __name__ == '__main__':
unittest.main()
| 9,842 | 42.171053 | 87 | py |
netcdf4-python | netcdf4-python-master/test/tst_multiple_open_close.py | import os
import tracemalloc
import unittest
import netCDF4
class MultipleVariablesByAttributesCallsTests(unittest.TestCase):
def test_multiple_calls(self):
netcdf_file = os.path.join(os.path.dirname(__file__), "netcdf_dummy_file.nc")
tracemalloc.start()
snapshot = tracemalloc.take_snapshot()
k_times = 10
for _k in range(k_times):
nc = netCDF4.Dataset(netcdf_file)
vs = nc.get_variables_by_attributes(axis='Z')
self.assertEqual(len(vs), 1)
vs = nc.get_variables_by_attributes(units='m/s')
self.assertEqual(len(vs), 4)
vs = nc.get_variables_by_attributes(axis='Z', units='m')
self.assertEqual(len(vs), 1)
vs = nc.get_variables_by_attributes(axis=lambda v: v in ['X', 'Y', 'Z', 'T'])
self.assertEqual(len(vs), 1)
vs = nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None)
self.assertEqual(len(vs), 12)
vs = nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None, long_name=lambda v: v is not None and 'Upward (w) velocity' in v)
self.assertEqual(len(vs), 1)
vs = nc.get_variables_by_attributes(units='m/s', grid_mapping=lambda v: v is not None)
self.assertEqual(len(vs), 4)
vs = nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None, long_name='Upward (w) velocity')
self.assertEqual(len(vs), 1)
nc.close()
stats = tracemalloc.take_snapshot().compare_to(snapshot, 'filename')
tracemalloc.stop()
print("[ Top 10 differences ]")
for stat in stats[:10]:
print(stat)
if __name__ == '__main__':
unittest.main()
| 1,810 | 34.509804 | 151 | py |
netcdf4-python | netcdf4-python-master/test/tst_dims.py | import sys
import unittest
import os
import tempfile
from numpy.random.mtrand import uniform
import netCDF4
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
LAT_NAME="lat"
LAT_LEN = 25
LAT_LENG = 50
LON_NAME="lon"
LON_LEN = 50
LON_LENG = 100
LEVEL_NAME="level"
LEVEL_LEN = None
LEVEL_LENG = None
TIME_NAME="time"
TIME_LEN = None
TIME_LENG = None
GROUP_NAME='forecasts'
VAR_NAME1='temp1'
VAR_NAME2='temp2'
VAR_NAME3='temp3'
VAR_NAME4='temp4'
VAR_NAME5='temp5'
VAR_TYPE='f8'
class DimensionsTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = netCDF4.Dataset(self.file, 'w')
lat_dim=f.createDimension(LAT_NAME,LAT_LEN)
lon_dim=f.createDimension(LON_NAME,LON_LEN)
lev_dim=f.createDimension(LEVEL_NAME,LEVEL_LEN)
time_dim=f.createDimension(TIME_NAME,TIME_LEN)
# specify dimensions with names
fv1 = f.createVariable(VAR_NAME1,VAR_TYPE,(LEVEL_NAME, LAT_NAME, LON_NAME, TIME_NAME))
# specify dimensions with instances
fv2 = f.createVariable(VAR_NAME2,VAR_TYPE,(lev_dim,lat_dim,lon_dim,time_dim))
# specify dimensions using a mix of names and instances
fv3 = f.createVariable(VAR_NAME3,VAR_TYPE,(lev_dim, LAT_NAME, lon_dim, TIME_NAME))
# single dim instance for name (not in a tuple)
fv4 = f.createVariable(VAR_NAME4,VAR_TYPE,time_dim)
fv5 = f.createVariable(VAR_NAME5,VAR_TYPE,TIME_NAME)
g = f.createGroup(GROUP_NAME)
g.createDimension(LAT_NAME,LAT_LENG)
g.createDimension(LON_NAME,LON_LENG)
# should get dimensions from parent group.
# (did not work prior to alpha 18)
#g.createDimension(LEVEL_NAME,LEVEL_LENG)
#g.createDimension(TIME_NAME,TIME_LENG)
gv = g.createVariable(VAR_NAME1,VAR_TYPE,(LEVEL_NAME, LAT_NAME, LON_NAME, TIME_NAME))
f.close()
def tearDown(self):
# Remove the temporary file
os.remove(self.file)
def runTest(self):
"""testing dimensions"""
# check dimensions in root group.
f = netCDF4.Dataset(self.file, 'r+')
v1 = f.variables[VAR_NAME1]
v2 = f.variables[VAR_NAME2]
v3 = f.variables[VAR_NAME3]
v4 = f.variables[VAR_NAME4]
v5 = f.variables[VAR_NAME5]
isunlim = [dim.isunlimited() for dim in f.dimensions.values()]
dimlens = [len(dim) for dim in f.dimensions.values()]
names_check = [LAT_NAME, LON_NAME, LEVEL_NAME, TIME_NAME]
lens_check = [LAT_LEN, LON_LEN, LEVEL_LEN, TIME_LEN]
isunlim = [dimlen == None for dimlen in lens_check]
for n,dimlen in enumerate(lens_check):
if dimlen is None:
lens_check[n] = 0
lensdict = dict(zip(names_check,lens_check))
unlimdict = dict(zip(names_check,isunlim))
# check that dimension names are correct.
for name in f.dimensions.keys():
self.assertTrue(name in names_check)
for name in v1.dimensions:
self.assertTrue(name in names_check)
for name in v2.dimensions:
self.assertTrue(name in names_check)
for name in v3.dimensions:
self.assertTrue(name in names_check)
self.assertTrue(v4.dimensions[0] == TIME_NAME)
self.assertTrue(v5.dimensions[0] == TIME_NAME)
# check that dimension lengths are correct.
# check that dimension lengths are correct.
for name,dim in f.dimensions.items():
self.assertTrue(len(dim) == lensdict[name])
# check that isunlimited() method works.
for name,dim in f.dimensions.items():
self.assertTrue(dim.isunlimited() == unlimdict[name])
# add some data to variable along unlimited dims,
# make sure length of dimensions change correctly.
nadd1 = 2
nadd2 = 4
v1[0:nadd1,:,:,0:nadd2] = uniform(size=(nadd1,LAT_LEN,LON_LEN,nadd2))
lensdict[LEVEL_NAME]=nadd1
lensdict[TIME_NAME]=nadd2
# check that dimension lengths are correct.
for name,dim in f.dimensions.items():
self.assertTrue(len(dim) == lensdict[name])
# check dimensions in subgroup.
g = f.groups[GROUP_NAME]
vg = g.variables[VAR_NAME1]
isunlim = [dim.isunlimited() for dim in g.dimensions.values()]
dimlens = [len(dim) for dim in g.dimensions.values()]
names_check = [LAT_NAME, LON_NAME, LEVEL_NAME, TIME_NAME]
lens_check = [LAT_LENG, LON_LENG, LEVEL_LENG, TIME_LENG]
isunlim = [dimlen == None for dimlen in lens_check]
for n,dimlen in enumerate(lens_check):
if dimlen is None:
lens_check[n] = 0
lensdict = dict(zip(names_check,lens_check))
unlimdict = dict(zip(names_check,isunlim))
# check that dimension names are correct.
for name in g.dimensions.keys():
self.assertTrue(name in names_check)
# check that dimension lengths are correct.
for name,dim in g.dimensions.items():
self.assertTrue(len(dim) == lensdict[name])
# check get_dims variable method
dim_tuple = vg.get_dims()
# some dimensions from parent group
dim_tup1 = (f.dimensions['level'],g.dimensions['lat'],\
g.dimensions['lon'],f.dimensions['time'])
dim_tup2 = vg.get_dims()
assert(dim_tup1 == dim_tup2)
# check that isunlimited() method works.
for name,dim in g.dimensions.items():
self.assertTrue(dim.isunlimited() == unlimdict[name])
# add some data to variable along unlimited dims,
# make sure length of dimensions change correctly.
nadd1 = 8
nadd2 = 4
vg[0:nadd1,:,:,0:nadd2] = uniform(size=(nadd1,LAT_LENG,LON_LENG,nadd2))
lensdict[LEVEL_NAME]=nadd1
lensdict[TIME_NAME]=nadd2
for name,dim in g.dimensions.items():
self.assertTrue(len(dim) == lensdict[name])
f.close()
if __name__ == '__main__':
unittest.main()
| 6,049 | 39.066225 | 94 | py |
netcdf4-python | netcdf4-python-master/test/tst_Unsigned.py | import unittest
import netCDF4
from numpy.testing import assert_array_equal
import numpy as np
class Test_Unsigned(unittest.TestCase):
"""
Test autoconversion to unsigned ints when _Unsigned attribute is True.
This attribute is is set by netcdf-java to designate unsigned
integer data stored with a signed integer type in netcdf-3.
If _Unsigned=True, a view to the data as unsigned integers is returned.
set_autoscale can be used to turn this off (default is on)
See issue #656 (pull request #658).
"""
def test_unsigned(self):
f = netCDF4.Dataset("ubyte.nc")
data = f['ub'][:]
assert data.dtype.str[1:] == 'u1'
assert_array_equal(data,np.array([0,255],np.uint8))
f.set_auto_scale(False)
data2 = f['ub'][:]
assert data2.dtype.str[1:] == 'i1'
assert_array_equal(data2,np.array([0,-1],np.int8))
data = f['sb'][:]
assert data.dtype.str[1:] == 'i1'
# issue #1232 _Unsigned='false' is same as not having _Unsigned set.
data = f['sb2'][:]
assert data.dtype.str[1:] == 'i1'
f.close()
# issue 671
f = netCDF4.Dataset('issue671.nc')
data1 = f['soil_moisture'][:]
assert(np.ma.isMA(data1))
f.set_auto_scale(False)
data2 = f['soil_moisture'][:]
assert(data1.mask.sum() == data2.mask.sum())
f.close()
# issue 794
# test that valid_min/valid_max/_FillValue are
# treated as unsigned integers.
f=netCDF4.Dataset('20171025_2056.Cloud_Top_Height.nc')
data = f['HT'][:]
assert(data.mask.sum() == 57432)
assert(int(data.max()) == 15430)
assert(int(data.min()) == 0)
assert(data.dtype == np.float32)
f.close()
if __name__ == '__main__':
unittest.main()
| 1,832 | 34.941176 | 76 | py |
netcdf4-python | netcdf4-python-master/test/tst_grps.py | import sys
import unittest
import os
import tempfile
import netCDF4
# test group creation.
FILE_NAME1 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
FILE_NAME2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
DYNASTY = "Tudor"
HENRY_VII = "Henry_VII"
MARGARET = "Margaret"
JAMES_V_OF_SCOTLAND = "James_V_of_Scotland"
MARY_I_OF_SCOTLAND = "Mary_I_of_Scotland"
JAMES_VI_OF_SCOTLAND_AND_I_OF_ENGLAND = "James_VI_of_Scotland_and_I_of_England"
names = [HENRY_VII,MARGARET,JAMES_V_OF_SCOTLAND,MARY_I_OF_SCOTLAND,JAMES_VI_OF_SCOTLAND_AND_I_OF_ENGLAND]
root = '/'
TREE1 = [root]
for n in range(1,len(names)+1):
path = []
for name in names[0:n]:
path.append(root+name)
TREE1.append(''.join(path))
TREE2 = [root,root+DYNASTY]
for name in names:
TREE2.append(root+DYNASTY+root+name)
TREE2.sort()
# python generator to walk the Group tree.
def walktree(top):
yield top.groups.values()
for value in top.groups.values():
yield from walktree(value)
class GroupsTestCase(unittest.TestCase):
def setUp(self):
self.file1 = FILE_NAME1
f = netCDF4.Dataset(self.file1, 'w')
g1 = f.createGroup(HENRY_VII)
g2 = g1.createGroup(MARGARET)
g3 = g2.createGroup(JAMES_V_OF_SCOTLAND)
g4 = g3.createGroup(MARY_I_OF_SCOTLAND)
g5 = g4.createGroup(JAMES_VI_OF_SCOTLAND_AND_I_OF_ENGLAND)
f.close()
self.file2 = FILE_NAME2
f = netCDF4.Dataset(self.file2, 'w')
g1 = netCDF4.Group(f,DYNASTY)
g2 = g1.createGroup(HENRY_VII)
g3 = g1.createGroup(MARGARET)
g4 = g1.createGroup(JAMES_V_OF_SCOTLAND)
g5 = g1.createGroup(MARY_I_OF_SCOTLAND)
g6 = g1.createGroup(JAMES_VI_OF_SCOTLAND_AND_I_OF_ENGLAND)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file1)
os.remove(self.file2)
def runTest(self):
"""testing groups"""
f = netCDF4.Dataset(self.file1, 'r')
# issue 988
f.name
tree = [f.path]
for children in walktree(f):
for child in children:
tree.append(child.path)
f.close()
assert tree == TREE1
f = netCDF4.Dataset(self.file2, 'r')
tree = [f.path]
for children in walktree(f):
for child in children:
tree.append(child.path)
tree.sort()
f.close()
assert tree == TREE2
if __name__ == '__main__':
unittest.main()
| 2,519 | 28.647059 | 105 | py |
netcdf4-python | netcdf4-python-master/test/tst_cdl.py | import unittest
import netCDF4
import os
test_ncdump="""netcdf ubyte {
dimensions:
d = 2 ;
variables:
byte ub(d) ;
ub:_Unsigned = "true" ;
byte sb(d) ;
byte sb2(d) ;
sb2:_Unsigned = "false" ;
// global attributes:
:_Format = "classic" ;
}
"""
test_ncdump2="""netcdf ubyte {
dimensions:
d = 2 ;
variables:
byte ub(d) ;
ub:_Unsigned = "true" ;
byte sb(d) ;
byte sb2(d) ;
sb2:_Unsigned = "false" ;
// global attributes:
:_Format = "classic" ;
data:
ub = 0, -1 ;
sb = -128, 127 ;
sb2 = -127, -127 ;
}
"""
class Test_CDL(unittest.TestCase):
"""
Test import/export of CDL
"""
def setUp(self):
f=netCDF4.Dataset('ubyte.nc')
f.tocdl(outfile='ubyte.cdl',data=True)
f.close()
def test_tocdl(self):
# treated as unsigned integers.
f=netCDF4.Dataset('ubyte.nc')
assert(f.tocdl() == test_ncdump)
assert(f.tocdl(data=True) == test_ncdump2)
f.close()
def test_fromcdl(self):
f1=netCDF4.Dataset.fromcdl('ubyte.cdl',ncfilename='ubyte2.nc')
f2=netCDF4.Dataset('ubyte.nc')
assert(f1.variables.keys() == f2.variables.keys())
assert(f1.filepath() == 'ubyte2.nc')
assert(f1.dimensions.keys() == f2.dimensions.keys())
assert(len(f1.dimensions['d']) == len(f2.dimensions['d']))
assert((f1['ub'][:] == f2['ub'][:]).all())
assert((f1['sb'][:] == f2['sb'][:]).all())
f1.close(); f2.close()
os.remove('ubyte2.nc')
def tearDown(self):
# Remove the temporary files
os.remove('ubyte.cdl')
if __name__ == '__main__':
unittest.main()
| 1,624 | 21.569444 | 70 | py |
netcdf4-python | netcdf4-python-master/test/tst_get_variables_by_attributes.py | import os
import unittest
import netCDF4
class VariablesByAttributesTests(unittest.TestCase):
def setUp(self):
netcdf_file = os.path.join(os.path.dirname(__file__), "netcdf_dummy_file.nc")
self.nc = netCDF4.Dataset(netcdf_file)
def test_find_variables_by_single_attribute(self):
vs = self.nc.get_variables_by_attributes(axis='Z')
self.assertEqual(len(vs), 1)
vs = self.nc.get_variables_by_attributes(units='m/s')
self.assertEqual(len(vs), 4)
def test_find_variables_by_multiple_attribute(self):
vs = self.nc.get_variables_by_attributes(axis='Z', units='m')
self.assertEqual(len(vs), 1)
def test_find_variables_by_single_lambda(self):
vs = self.nc.get_variables_by_attributes(axis=lambda v: v in ['X', 'Y', 'Z', 'T'])
self.assertEqual(len(vs), 1)
vs = self.nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None)
self.assertEqual(len(vs), 12)
def test_find_variables_by_multiple_lambdas(self):
vs = self.nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None,
long_name=lambda v: v is not None and 'Upward (w) velocity' in v)
self.assertEqual(len(vs), 1)
def test_find_variables_by_attribute_and_lambda(self):
vs = self.nc.get_variables_by_attributes(units='m/s',
grid_mapping=lambda v: v is not None)
self.assertEqual(len(vs), 4)
vs = self.nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None,
long_name='Upward (w) velocity')
self.assertEqual(len(vs), 1)
if __name__ == '__main__':
unittest.main()
| 1,771 | 37.521739 | 114 | py |
netcdf4-python | netcdf4-python-master/test/tst_compression.py | from numpy.random.mtrand import uniform
from netCDF4 import Dataset
from netCDF4.utils import _quantize
from numpy.testing import assert_almost_equal
import os, tempfile, unittest
ndim = 100000
ndim2 = 100
chunk1 = 10; chunk2 = ndim2
nfiles = 7
files = [tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name for nfile in range(nfiles)]
array = uniform(size=(ndim,))
array2 = uniform(size=(ndim,ndim2))
lsd = 3
def write_netcdf(filename,zlib,least_significant_digit,data,dtype='f8',shuffle=False,contiguous=False,\
chunksizes=None,complevel=6,fletcher32=False):
file = Dataset(filename,'w')
file.createDimension('n', ndim)
foo = file.createVariable('data',\
dtype,('n'),zlib=zlib,least_significant_digit=least_significant_digit,\
shuffle=shuffle,contiguous=contiguous,complevel=complevel,fletcher32=fletcher32,chunksizes=chunksizes)
# use compression kwarg instead of deprecated zlib
if zlib:
compression='zlib'
else:
compression=None
# anything that evaluates to False is same as None
#compression=False
#compression=''
#compression=0
#compression='gzip' # should fail
foo2 = file.createVariable('data2',\
dtype,('n'),compression=compression,least_significant_digit=least_significant_digit,\
shuffle=shuffle,contiguous=contiguous,complevel=complevel,fletcher32=fletcher32,chunksizes=chunksizes)
foo[:] = data
foo2[:] = data
file.close()
file = Dataset(filename)
data = file.variables['data'][:]
data2 = file.variables['data2'][:]
file.close()
def write_netcdf2(filename,zlib,least_significant_digit,data,dtype='f8',shuffle=False,contiguous=False,\
chunksizes=None,complevel=6,fletcher32=False):
file = Dataset(filename,'w')
file.createDimension('n', ndim)
file.createDimension('n2', ndim2)
foo = file.createVariable('data2',\
dtype,('n','n2'),zlib=zlib,least_significant_digit=least_significant_digit,\
shuffle=shuffle,contiguous=contiguous,complevel=complevel,fletcher32=fletcher32,chunksizes=chunksizes)
foo[:] = data
file.close()
file = Dataset(filename)
data = file.variables['data2'][:]
file.close()
class CompressionTestCase(unittest.TestCase):
def setUp(self):
self.files = files
# no compression
write_netcdf(self.files[0],False,None,array)
# compressed, lossless, no shuffle.
write_netcdf(self.files[1],True,None,array)
# compressed, lossless, with shuffle.
write_netcdf(self.files[2],True,None,array,shuffle=True)
# compressed, lossy, no shuffle.
write_netcdf(self.files[3],True,lsd,array)
# compressed, lossy, with shuffle.
write_netcdf(self.files[4],True,lsd,array,shuffle=True)
# compressed, lossy, with shuffle and fletcher32 checksum.
write_netcdf(self.files[5],True,lsd,array,shuffle=True,fletcher32=True)
# 2-d compressed, lossy, with shuffle and fletcher32 checksum and
# chunksizes.
write_netcdf2(self.files[6],True,lsd,array2,shuffle=True,fletcher32=True,chunksizes=(chunk1,chunk2))
def tearDown(self):
# Remove the temporary files
for file in self.files:
os.remove(file)
def runTest(self):
"""testing zlib and shuffle compression filters"""
uncompressed_size = os.stat(self.files[0]).st_size
# check uncompressed data
f = Dataset(self.files[0])
size = os.stat(self.files[0]).st_size
assert_almost_equal(array,f.variables['data'][:])
assert_almost_equal(array,f.variables['data2'][:])
assert f.variables['data'].filters() ==\
{'zlib':False,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':0,'fletcher32':False}
assert f.variables['data2'].filters() ==\
{'zlib':False,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':0,'fletcher32':False}
assert_almost_equal(size,uncompressed_size)
f.close()
# check compressed data.
f = Dataset(self.files[1])
size = os.stat(self.files[1]).st_size
assert_almost_equal(array,f.variables['data'][:])
assert_almost_equal(array,f.variables['data2'][:])
assert f.variables['data'].filters() ==\
{'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':6,'fletcher32':False}
assert f.variables['data2'].filters() ==\
{'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':6,'fletcher32':False}
assert(size < 0.95*uncompressed_size)
f.close()
# check compression with shuffle
f = Dataset(self.files[2])
size = os.stat(self.files[2]).st_size
assert_almost_equal(array,f.variables['data'][:])
assert_almost_equal(array,f.variables['data2'][:])
assert f.variables['data'].filters() ==\
{'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':6,'fletcher32':False}
assert f.variables['data2'].filters() ==\
{'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':6,'fletcher32':False}
assert(size < 0.85*uncompressed_size)
f.close()
# check lossy compression without shuffle
f = Dataset(self.files[3])
size = os.stat(self.files[3]).st_size
checkarray = _quantize(array,lsd)
assert_almost_equal(checkarray,f.variables['data'][:])
assert_almost_equal(checkarray,f.variables['data2'][:])
assert(size < 0.27*uncompressed_size)
f.close()
# check lossy compression with shuffle
f = Dataset(self.files[4])
size = os.stat(self.files[4]).st_size
assert_almost_equal(checkarray,f.variables['data'][:])
assert_almost_equal(checkarray,f.variables['data2'][:])
assert(size < 0.20*uncompressed_size)
size_save = size
f.close()
# check lossy compression with shuffle and fletcher32 checksum.
f = Dataset(self.files[5])
size = os.stat(self.files[5]).st_size
assert_almost_equal(checkarray,f.variables['data'][:])
assert_almost_equal(checkarray,f.variables['data2'][:])
assert f.variables['data'].filters() ==\
{'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':6,'fletcher32':True}
assert f.variables['data2'].filters() ==\
{'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':6,'fletcher32':True}
assert(size < 0.20*uncompressed_size)
# should be slightly larger than without fletcher32
assert(size > size_save)
# check chunksizes
f.close()
f = Dataset(self.files[6])
checkarray2 = _quantize(array2,lsd)
assert_almost_equal(checkarray2,f.variables['data2'][:])
assert f.variables['data2'].filters() ==\
{'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':6,'fletcher32':True}
assert f.variables['data2'].chunking() == [chunk1,chunk2]
f.close()
if __name__ == '__main__':
unittest.main()
| 7,378 | 45.408805 | 125 | py |
netcdf4-python | netcdf4-python-master/test/tst_types.py | import sys
import unittest
import os
import tempfile
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.random.mtrand import uniform
import netCDF4
# test primitive data types.
# create an n1dim by n2dim random ranarr.
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
n1dim = 5
n2dim = 10
ranarr = 100.*uniform(size=(n1dim,n2dim))
zlib=False;complevel=0;shuffle=0;least_significant_digit=None
datatypes = ['f8','f4','i1','i2','i4','i8','u1','u2','u4','u8','S1']
FillValue = 1.0
issue273_data = np.ma.array(['z']*10,dtype='S1',\
mask=[False,False,False,False,False,True,False,False,False,False])
class PrimitiveTypesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = netCDF4.Dataset(self.file,'w')
f.createDimension('n1', None)
f.createDimension('n2', n2dim)
for typ in datatypes:
foo = f.createVariable('data_'+typ, typ, ('n1','n2',),zlib=zlib,complevel=complevel,shuffle=shuffle,least_significant_digit=least_significant_digit,fill_value=FillValue)
#foo._FillValue = FillValue
# test writing of _FillValue attribute for diff types
# (should be cast to type of variable silently)
foo[1:n1dim] = ranarr[1:n1dim]
v = f.createVariable('issue271', np.dtype('S1'), [], fill_value=b'Z')
v2 = f.createVariable('issue273', np.dtype('S1'), 'n2',\
fill_value='\x00')
v2[:] = issue273_data
v3 = f.createVariable('issue707',np.int8,'n2')
v3.setncattr('missing_value',255)
v3[:]=-1
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing primitive data type """
f = netCDF4.Dataset(self.file)
for typ in datatypes:
data = f.variables['data_'+typ]
data.set_auto_maskandscale(False)
datarr = data[1:n1dim]
# fill missing data with _FillValue
# ('S1' array will have some missing values)
if hasattr(datarr, 'mask'):
datarr = datarr.filled()
datfilled = data[0]
# check to see that data type is correct
if typ == 'S1':
self.assertTrue(data.dtype.str[1:] in ['S1','U1'])
else:
self.assertTrue(data.dtype.str[1:] == typ)
# check data in variable.
if data.dtype.str[1:] != 'S1':
#assert np.allclose(datarr, ranarr[1:n1dim].astype(data.dtype))
assert_array_almost_equal(datarr,ranarr[1:n1dim].astype(data.dtype))
else:
assert datarr.tostring() == ranarr[1:n1dim].astype(data.dtype).tostring()
# check that variable elements not yet written are filled
# with the specified _FillValue.
assert_array_equal(datfilled,np.asarray(data._FillValue,datfilled.dtype))
# issue 271 (_FillValue should be a byte for character arrays on
# Python 3)
v = f.variables['issue271']
assert type(v._FillValue) == bytes
assert v._FillValue == b'Z'
# issue 273 (setting _FillValue to null byte manually)
v2 = f.variables['issue273']
assert type(v2._FillValue) == bytes
assert v2._FillValue == b'\x00'
assert(str(issue273_data) == str(v2[:]))
# issue 707 (don't apply missing_value if cast to variable type is
# unsafe)
v3 = f.variables['issue707']
assert_array_equal(v3[:],-1*np.ones(n2dim,v3.dtype))
f.close()
# issue #850 (masked scalar char variable)
f = netCDF4.Dataset(self.file,'a')
a = f.createVariable('a', 'c', ())
a[:] = np.ma.masked
f.close()
if __name__ == '__main__':
unittest.main()
| 3,886 | 38.663265 | 181 | py |
netcdf4-python | netcdf4-python-master/test/tst_fancyslicing.py | from netCDF4 import Dataset
from numpy.random import seed, randint
from numpy.testing import assert_array_equal, assert_equal
import tempfile, unittest, os, random
import numpy as np
"""
Bug note
There seems to be a bug when two unlimited dimensions are used,
ie ('x', 'y', 'time'), where x and time are unlimited dimensions.
Specifically, the x dimension is set to a random length after setting
it from an array. No data is lost, but the shape is wrong, and this
can hog down the computer when taking all data along x.
This bug appeared on Huard's box with netCDF4.0 and HDF5 1.8.1, and
seems to be absent in later versions of those libraries (this needs
to be checked.)
See test2unlim below for an example.
"""
file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
xdim=9; ydim=10; zdim=11
i = np.array([2,5,7],'i4')
i2 = np.array([0,8],'i4')
i3 = np.array([3,7,9,10],'i4')
ib = np.zeros(ydim,dtype=np.bool_)
ib[2] = True; ib[5] = True; ib[7] = True
ib2 = np.zeros(xdim, dtype=np.bool_)
ib2[1] = True; ib2[4] = True; ib2[6] = True
# this one should be converted to a slice.
ib3 = np.zeros(xdim, dtype=np.bool_)
ib3[0] = True; ib2[4] = True; ib2[8] = True
#seed(9) # fix seed
data = randint(0,10,size=(xdim,ydim,zdim)).astype('i2')
data1 = data[:,0,0].copy()
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = file_name
f = Dataset(file_name,'w')
f.createDimension('x',None)
f.createDimension('y',ydim)
f.createDimension('z',zdim)
v = f.createVariable('data','i2',('x','y','z'))
v[:] = data
v1 = f.createVariable('data1','i2','x')
self.data1 = data1
self.data = data
# test __setitem___
v[0:xdim] = self.data
# integer array slice.
v[:,i,:] = -100
self.data[:,i,:] = -100
# boolean array slice.
v[ib2] = -200
self.data[ib2] = -200
v[ib3,:,:] = -300
self.data[ib3,:,:] = -300
# same as above, for 1d array
v1[0:xdim] = self.data1
v1[i] = -100
self.data1[i] = -100
v1[ib2] = -200
self.data1[ib2] = -200
v1[ib3] = -300
self.data1[ib3] = -300
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def test_get(self):
"""testing 'fancy indexing'"""
f = Dataset(self.file, 'r')
v = f.variables['data']
# slice with an array of integers.
assert_array_equal(v[0:-1:2,i,:],self.data[0:-1:2,i,:])
# slice with an array of booleans.
assert_array_equal(v[0:-1:2,ib,:],self.data[0:-1:2,ib,:])
# Two slices
assert_array_equal(v[1:2,1:3,:], self.data[1:2,1:3,:])
# Three integer sequences
# sequences should be equivalent to booleans
ib1 = np.zeros(v.shape[0], np.bool_); ib1[i]=True
ib2 = np.zeros(v.shape[1], np.bool_); ib2[i2]=True
ib3 = np.zeros(v.shape[2], np.bool_); ib3[i3]=True
assert_array_equal(v[i,i2,i3], v[ib1,ib2,ib3])
assert_equal(v[i,i2,i3].shape, (len(i),len(i2),len(i3)))
# Two booleans and one slice. Different from NumPy
# ibx,ibz should be converted to slice, iby not.
ibx = np.array([True, False, True, False, True, False, True, False, True])
iby = np.array([True, False, True, False, False, False, True, False, True, False])
ibz = np.array([True, False, True, False, True, False, True, False,\
True, False, True])
datatmp = self.data[::2,:,:]
datatmp = datatmp[:,iby,:]
assert_array_equal(v[ibx, iby, :], datatmp)
# Three booleans
datatmp = self.data[::2,:,:]
datatmp = datatmp[:,iby,::2]
assert_array_equal(v[ibx,iby,ibz], datatmp)
# Empty boolean -- all False
d1 = f.variables['data1']
m = np.zeros(xdim, bool)
if np.__version__ > '1.9.0':
# fails for old numpy versions
assert_equal(d1[m], ())
# Check that no assignment is made
d1[m] = 0
assert_equal(d1[:], self.data1)
# boolean slices, only single items returned.
iby = np.array([True, False, False, False, False, False, False, False,\
False, False])
ibz = np.array([False, True, False, False, False, False, False, False,\
False,False,False])
assert_array_equal(v[:,iby,ibz],self.data[:,0:1,1:2])
# check slicing with unsorted integer sequences
# and integer sequences with duplicate elements.
v1 = v[:,[1],:]; v2 = v[:,[3],:]; v3 = v[:,[2],:]
vcheck = np.concatenate((v1,v2,v3),axis=1)
assert_array_equal(vcheck,v[:,[1,3,2],:])
vcheck = np.concatenate((v1,v3,v3),axis=1)
assert_array_equal(vcheck,v[:,[1,2,2],:])
# Ellipse
assert_array_equal(v[...,::2],self.data[..., ::2])
assert_array_equal(v[...,::-2],self.data[..., ::-2])
assert_array_equal(v[[1,2],...],self.data[[1,2],...])
assert_array_equal(v[0], self.data[0])
# slicing with all False booleans (PR #1197)
iby[:] = False
data = v[ibx,iby,ibz]
assert(data.size == 0)
f.close()
def test_set(self):
f = Dataset(self.file, 'a')
data = np.arange(xdim*ydim*zdim).reshape((xdim,ydim,zdim)).astype('i4')
vu = f.variables['data']
vu[0,:,:] = data[0,:,:]
assert_array_equal(vu[0,:,:], data[0,:,:])
vu[1:,:,:] = data[:]
assert_array_equal(vu[1:, :, :], data)
f.close()
def test2unlim(self):
"""Test with a variable that has two unlimited dimensions."""
f = Dataset(self.file, 'a')
f.createDimension('time',None)
v = f.createVariable('u2data', 'i2', ('time', 'x', 'y'))
xdim = len(f.dimensions['x'])
data = np.arange(3*xdim*ydim).reshape((3, xdim, ydim))
v[:] = data
assert_equal(v.shape, data.shape)
v[3:6, 0:xdim, 0:ydim] = data
try:
assert_equal(v.shape, (6, xdim, ydim))
except AssertionError:
import warnings
warnings.warn("""
There seems to be a bug in the netCDF4 or HDF5 library that is
installed on your computer. Please upgrade to the latest version
to avoid being affected. This only matters if you use more than
1 unlimited dimension.""")
raise AssertionError
f.close()
if __name__ == '__main__':
unittest.main()
| 6,576 | 33.255208 | 90 | py |
netcdf4-python | netcdf4-python-master/test/tst_multifile2.py | from netCDF4 import Dataset, MFDataset, MFTime
import numpy as np
from numpy.random import seed, randint
from numpy.testing import assert_array_equal, assert_equal
from numpy import ma
import tempfile, unittest, os, datetime
import cftime
from pkg_resources import parse_version
nx=100; ydim=5; zdim=10
nfiles = 10
ninc = nx/nfiles
files = [tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name for nfile in range(nfiles)]
data = randint(0,10,size=(nx,ydim,zdim))
missval = 99
data[::10] = missval
data = ma.masked_values(data,missval)
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.files = files
for nfile,file in enumerate(self.files):
f = Dataset(file,'w',format='NETCDF4_CLASSIC')
#f.createDimension('x',None)
f.createDimension('x',ninc)
f.createDimension('y',ydim)
f.createDimension('z',zdim)
f.history = 'created today'
x = f.createVariable('x','i',('x',))
x.units = 'zlotys'
dat = f.createVariable('data','i',('x','y','z',))
dat.long_name = 'phony data'
dat.missing_value = missval
nx1 = int(nfile*ninc); nx2 = int(ninc*(nfile+1))
#x[0:ninc] = np.arange(nfile*ninc,ninc*(nfile+1))
x[:] = np.arange(nfile*ninc,ninc*(nfile+1))
#dat[0:ninc] = data[nx1:nx2]
dat[:] = data[nx1:nx2]
f.close()
def tearDown(self):
# Remove the temporary files
for file in self.files:
os.remove(file)
def runTest(self):
"""testing multi-file dataset access"""
# specify the aggregation dim (not necessarily unlimited)
f = MFDataset(self.files,aggdim='x',check=True)
assert f.history == 'created today'
assert_array_equal(np.arange(0,nx),f.variables['x'][:])
varin = f.variables['data']
datin = varin[:]
assert_array_equal(datin.mask,data.mask)
varin.set_auto_maskandscale(False)
data2 = data.filled()
assert varin.long_name == 'phony data'
assert len(varin) == nx
assert varin.shape == (nx,ydim,zdim)
assert varin.dimensions == ('x','y','z')
assert_array_equal(varin[4:-4:4,3:5,2:8],data2[4:-4:4,3:5,2:8])
assert varin[0,0,0] == data2[0,0,0]
assert_array_equal(varin[:],data2)
assert getattr(varin,'nonexistantatt',None) == None
f.close()
class NonuniformTimeTestCase(unittest.TestCase):
ninc = 365
def setUp(self):
self.files = [tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name for nfile in range(2)]
for nfile,file in enumerate(self.files):
f = Dataset(file,'w',format='NETCDF4_CLASSIC')
f.createDimension('time',None)
f.createDimension('y',ydim)
f.createDimension('z',zdim)
f.history = 'created today'
time = f.createVariable('time', 'f', ('time', ))
#time.units = 'days since {0}-01-01'.format(1979+nfile)
yr = 1979+nfile
time.units = 'days since %s-01-01' % yr
time.calendar = 'standard'
x = f.createVariable('x','f',('time', 'y', 'z'))
x.units = 'potatoes per square mile'
nx1 = self.ninc*nfile;
nx2 = self.ninc*(nfile+1)
time[:] = np.arange(self.ninc)
x[:] = np.arange(nx1, nx2).reshape(self.ninc,1,1) * np.ones((1, ydim, zdim))
f.close()
def tearDown(self):
# Remove the temporary files
for file in self.files:
os.remove(file)
def runTest(self):
# Get the real dates
# skip this until cftime pull request #55 is in a released
# version (1.0.1?). Otherwise, fix for issue #808 breaks this
if parse_version(cftime.__version__) >= parse_version('1.0.1'):
dates = []
for file in self.files:
f = Dataset(file)
t = f.variables['time']
dates.extend(cftime.num2date(t[:], t.units, t.calendar))
f.close()
# Compare with the MF dates
f = MFDataset(self.files,check=True)
t = f.variables['time']
mfdates = cftime.num2date(t[:], t.units, t.calendar)
T = MFTime(t)
assert_equal(len(T), len(t))
assert_equal(T.shape, t.shape)
assert_equal(T.dimensions, t.dimensions)
assert_equal(T.typecode(), t.typecode())
# skip this until cftime pull request #55 is in a released
# version (1.0.1?). Otherwise, fix for issue #808 breaks this
if parse_version(cftime.__version__) >= parse_version('1.0.1'):
assert_array_equal(cftime.num2date(T[:], T.units, T.calendar), dates)
assert_equal(cftime.date2index(datetime.datetime(1980, 1, 2), T), 366)
f.close()
if __name__ == '__main__':
unittest.main()
| 4,945 | 35.367647 | 105 | py |
netcdf4-python | netcdf4-python-master/test/tst_unicode.py | import netCDF4
import numpy as np
import sys, unittest, os, tempfile
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
ATT1 = '\u03a0\u03a3\u03a9'
ATT2 = 'x\xb0'
ATT3 = ['\u03a0', '\u03a3', '\u03a9']
DIM_NAME = 'x\xb0'
VAR_NAME = 'Andr\xe9'
class UnicodeTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = netCDF4.Dataset(self.file,'w')
f.attribute1 = ATT1
f.attribute2 = ATT2
f.attribute3 = ATT3
d = f.createDimension(DIM_NAME, None)
v = f.createVariable(VAR_NAME, np.float64, (DIM_NAME,))
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing unicode"""
f = netCDF4.Dataset(self.file, 'r')
d = f.dimensions[DIM_NAME]
v = f.variables[VAR_NAME]
# check accessing individual attributes.
assert f.attribute1 == ATT1
assert f.attribute2 == ATT2
#assert f.attribute3 == ''.join(ATT3)
# behavior changed issue 770
assert f.attribute3 == ATT3
f.close()
if __name__ == '__main__':
unittest.main()
| 1,180 | 26.465116 | 72 | py |
netcdf4-python | netcdf4-python-master/test/tst_compression_bzip2.py | from numpy.random.mtrand import uniform
from netCDF4 import Dataset
from numpy.testing import assert_almost_equal
import os, tempfile, unittest, sys
ndim = 100000
filename1 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
filename2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
array = uniform(size=(ndim,))
def write_netcdf(filename,dtype='f8',complevel=6):
nc = Dataset(filename,'w')
nc.createDimension('n', ndim)
foo = nc.createVariable('data',\
dtype,('n'),compression='bzip2',complevel=complevel)
foo[:] = array
nc.close()
class CompressionTestCase(unittest.TestCase):
def setUp(self):
self.filename1 = filename1
self.filename2 = filename2
write_netcdf(self.filename1,complevel=0) # no compression
write_netcdf(self.filename2,complevel=4) # with compression
def tearDown(self):
# Remove the temporary files
os.remove(self.filename1)
os.remove(self.filename2)
def runTest(self):
uncompressed_size = os.stat(self.filename1).st_size
# check uncompressed data
f = Dataset(self.filename1)
size = os.stat(self.filename1).st_size
assert_almost_equal(array,f.variables['data'][:])
assert f.variables['data'].filters() ==\
{'zlib':False,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':0,'fletcher32':False}
assert_almost_equal(size,uncompressed_size)
f.close()
# check compressed data.
f = Dataset(self.filename2)
size = os.stat(self.filename2).st_size
assert_almost_equal(array,f.variables['data'][:])
assert f.variables['data'].filters() ==\
{'zlib':False,'szip':False,'zstd':False,'bzip2':True,'blosc':False,'shuffle':False,'complevel':4,'fletcher32':False}
assert(size < 0.96*uncompressed_size)
f.close()
if __name__ == '__main__':
nc = Dataset(filename1,'w')
if not nc.has_bzip2_filter():
sys.stdout.write('bzip2 filter not available, skipping tests ...\n')
else:
nc.close()
unittest.main()
| 2,136 | 35.844828 | 125 | py |
netcdf4-python | netcdf4-python-master/test/tst_endian.py | import netCDF4
import numpy as np
import unittest, os, tempfile
from numpy.testing import assert_array_equal, assert_array_almost_equal
data = np.arange(12,dtype='f4').reshape(3,4)
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
FILE_NAME2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
FILE_NAME3 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
def create_file(file,format,data):
import warnings
dataset = netCDF4.Dataset(file,'w',format=format)
dataset.createDimension('time', None)
dataset.createDimension('space', 4)
dims = ('time', 'space')
little = data.astype('<f4')
big = data.astype('>f4')
warnings.simplefilter('ignore') # ignore UserWarnings generated below
ll = dataset.createVariable('little-little', '<f4', dims)
lb = dataset.createVariable('little-big', '<f4', dims)
bl = dataset.createVariable('big-little', '>f4', dims)
bb = dataset.createVariable('big-big', '>f4', dims)
ll[:] = little
lb[:] = big
bl[:] = little
bb[:] = big
dataset.close()
def check_byteswap(file, data):
# byteswapping is done internally to native endian format
# when numpy array has non-native byte order. The byteswap was
# initially done in place, which caused the numpy array to
# be modified in the calling program. Pull request #555
# changed the byteswap to a copy, and this test checks
# to make sure the input numpy array is not modified.
dataset = netCDF4.Dataset(file,'w')
dataset.createDimension('time', None)
dataset.createDimension('space', 4)
dims = ('time', 'space')
bl = dataset.createVariable('big-little', np.float32, dims, endian='big')
data2 = data.copy()
bl[:] = data
dataset.close()
f = netCDF4.Dataset(file)
bl = f.variables['big-little'][:]
# check data.
assert_array_almost_equal(data, data2)
assert_array_almost_equal(bl, data)
f.close()
def check_data(file, data):
f = netCDF4.Dataset(file)
ll = f.variables['little-little'][:]
lb = f.variables['little-big'][:]
bb = f.variables['big-big'][:]
bl = f.variables['big-little'][:]
# check data.
assert_array_almost_equal(ll, data)
assert_array_almost_equal(lb, data)
assert_array_almost_equal(bl, data)
assert_array_almost_equal(bb, data)
f.close()
def issue310(file):
mval = 999.; fval = -999
nc = netCDF4.Dataset(file, "w")
nc.createDimension('obs', 10)
if netCDF4.is_native_little:
endian='big'
elif netCDF4.is_native_big:
endian='little'
else:
raise ValueError('cannot determine native endianness')
var_big_endian = nc.createVariable(\
'obs_big_endian', '>f8', ('obs', ),\
endian=endian,fill_value=fval)
# use default _FillValue
var_big_endian2 = nc.createVariable(\
'obs_big_endian2', '>f8', ('obs', ),\
endian=endian)
# NOTE: missing_value be written in same byte order
# as variable, or masked array won't be masked correctly
# when data is read in.
var_big_endian.missing_value = mval
var_big_endian[0]=np.pi
var_big_endian[1]=mval
var_big_endian2.missing_value = mval
var_big_endian2[0]=np.pi
var_big_endian2[1]=mval
var_native_endian = nc.createVariable(\
'obs_native_endian', '<f8', ('obs', ),\
endian='native',fill_value=fval)
var_native_endian.missing_value = mval
var_native_endian[0]=np.pi
var_native_endian[1]=mval
assert_array_almost_equal(var_native_endian[:].filled(),
var_big_endian[:].filled())
assert_array_almost_equal(var_big_endian[:].filled(),
var_big_endian2[:].filled())
nc.close()
def issue346(file):
# create a big and a little endian variable
xb = np.arange(10, dtype='>i4')
xl = np.arange(xb.size, dtype='<i4')
nc = netCDF4.Dataset(file, mode='w')
nc.createDimension('x', size=xb.size)
vb=nc.createVariable('xb', xb.dtype, ('x'),
endian='big')
vl=nc.createVariable('xl', xl.dtype, ('x'),
endian='little')
nc.variables['xb'][:] = xb
nc.variables['xl'][:] = xl
nc.close()
nc = netCDF4.Dataset(file)
datab = nc.variables['xb'][:]
datal = nc.variables['xl'][:]
assert_array_equal(datab,xb)
assert_array_equal(datal,xl)
nc.close()
def issue930(file):
# make sure view to unsigned data type (triggered
# by _Unsigned attribute being set) is correct when
# data byte order is non-native.
nc = netCDF4.Dataset(file,'w')
d = nc.createDimension('x',2)
v1 = nc.createVariable('v1','i2','x',endian='big')
v2 = nc.createVariable('v2','i2','x',endian='little')
v1[0] = 255; v1[1] = 1
v2[0] = 255; v2[1] = 1
v1._Unsigned="TRUE"; v1.missing_value=np.int16(1)
v2._Unsigned="TRUE"; v2.missing_value=np.int16(1)
nc.close()
nc = netCDF4.Dataset(file)
assert_array_equal(nc['v1'][:],np.ma.masked_array([255,1],mask=[False,True]))
assert_array_equal(nc['v2'][:],np.ma.masked_array([255,1],mask=[False,True]))
nc.set_auto_mask(False)
assert_array_equal(nc['v1'][:],np.array([255,1]))
assert_array_equal(nc['v2'][:],np.array([255,1]))
nc.close()
class EndianTestCase(unittest.TestCase):
def setUp(self):
create_file(FILE_NAME,'NETCDF4_CLASSIC',data); self.file=FILE_NAME
create_file(FILE_NAME2,'NETCDF3_CLASSIC',data); self.file2=FILE_NAME2
self.file3 = FILE_NAME3
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
os.remove(self.file2)
os.remove(self.file3)
def runTest(self):
"""testing endian conversion capability"""
check_data(self.file, data)
check_data(self.file2, data)
check_byteswap(self.file3, data)
issue310(self.file)
issue346(self.file2)
issue930(self.file2)
if __name__ == '__main__':
unittest.main()
| 6,046 | 34.781065 | 81 | py |
netcdf4-python | netcdf4-python-master/test/tst_compression_quant.py | from numpy.random.mtrand import uniform
from netCDF4 import Dataset
from numpy.testing import assert_almost_equal
import numpy as np
import os, tempfile, unittest
ndim = 100000
nfiles = 7
files = [tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name for nfile in range(nfiles)]
data_array = uniform(size=(ndim,))
nsd = 3
nsb = 10 # for BitRound, use significant bits (~3.32 sig digits)
complevel = 6
def write_netcdf(filename,zlib,significant_digits,data,dtype='f8',shuffle=False,\
complevel=6,quantize_mode="BitGroom"):
file = Dataset(filename,'w')
file.createDimension('n', ndim)
foo = file.createVariable('data',\
dtype,('n'),zlib=zlib,significant_digits=significant_digits,\
shuffle=shuffle,complevel=complevel,quantize_mode=quantize_mode)
foo[:] = data
file.close()
file = Dataset(filename)
data = file.variables['data'][:]
file.close()
class CompressionTestCase(unittest.TestCase):
def setUp(self):
self.files = files
# no compression
write_netcdf(self.files[0],False,None,data_array)
# compressed, lossless, no shuffle.
write_netcdf(self.files[1],True,None,data_array)
# compressed, lossless, with shuffle.
write_netcdf(self.files[2],True,None,data_array,shuffle=True)
# compressed, lossy, no shuffle.
write_netcdf(self.files[3],True,nsd,data_array)
# compressed, lossy, with shuffle.
write_netcdf(self.files[4],True,nsd,data_array,shuffle=True)
# compressed, lossy, with shuffle, and alternate quantization.
write_netcdf(self.files[5],True,nsd,data_array,quantize_mode='GranularBitRound',shuffle=True)
# compressed, lossy, with shuffle, and alternate quantization.
write_netcdf(self.files[6],True,nsb,data_array,quantize_mode='BitRound',shuffle=True)
def tearDown(self):
# Remove the temporary files
for file in self.files:
os.remove(file)
def runTest(self):
"""testing zlib and shuffle compression filters"""
uncompressed_size = os.stat(self.files[0]).st_size
#print('uncompressed size = ',uncompressed_size)
# check compressed data.
f = Dataset(self.files[1])
size = os.stat(self.files[1]).st_size
#print('compressed lossless no shuffle = ',size)
assert_almost_equal(data_array,f.variables['data'][:])
assert f.variables['data'].filters() ==\
{'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':complevel,'fletcher32':False}
assert(size < 0.95*uncompressed_size)
f.close()
# check compression with shuffle
f = Dataset(self.files[2])
size = os.stat(self.files[2]).st_size
#print('compressed lossless with shuffle ',size)
assert_almost_equal(data_array,f.variables['data'][:])
assert f.variables['data'].filters() ==\
{'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':complevel,'fletcher32':False}
assert(size < 0.85*uncompressed_size)
f.close()
# check lossy compression without shuffle
f = Dataset(self.files[3])
size = os.stat(self.files[3]).st_size
errmax = (np.abs(data_array-f.variables['data'][:])).max()
#print('compressed lossy no shuffle = ',size,' max err = ',errmax)
assert(f.variables['data'].quantization() == (nsd,'BitGroom'))
assert(errmax < 1.e-3)
assert(size < 0.35*uncompressed_size)
f.close()
# check lossy compression with shuffle
f = Dataset(self.files[4])
size = os.stat(self.files[4]).st_size
errmax = (np.abs(data_array-f.variables['data'][:])).max()
print('compressed lossy with shuffle and standard quantization = ',size,' max err = ',errmax)
assert(f.variables['data'].quantization() == (nsd,'BitGroom'))
assert(errmax < 1.e-3)
assert(size < 0.24*uncompressed_size)
f.close()
# check lossy compression with shuffle and alternate quantization
f = Dataset(self.files[5])
size = os.stat(self.files[5]).st_size
errmax = (np.abs(data_array-f.variables['data'][:])).max()
print('compressed lossy with shuffle and alternate quantization = ',size,' max err = ',errmax)
assert(f.variables['data'].quantization() == (nsd,'GranularBitRound'))
assert(errmax < 1.e-3)
assert(size < 0.24*uncompressed_size)
f.close()
# check lossy compression with shuffle and alternate quantization
f = Dataset(self.files[6])
size = os.stat(self.files[6]).st_size
errmax = (np.abs(data_array-f.variables['data'][:])).max()
print('compressed lossy with shuffle and alternate quantization = ',size,' max err = ',errmax)
assert(f.variables['data'].quantization() == (nsb,'BitRound'))
assert(errmax < 1.e-3)
assert(size < 0.24*uncompressed_size)
f.close()
if __name__ == '__main__':
unittest.main()
| 5,096 | 44.106195 | 132 | py |
netcdf4-python | netcdf4-python-master/test/tst_chunk_cache.py | import unittest, netCDF4, tempfile, os
file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
cache_size = 10000
cache_nelems = 100
cache_preempt = 0.5
cache_size2 = 20000
cache_nelems2 = 200
cache_preempt2 = 1.0
class RefCountTestCase(unittest.TestCase):
def setUp(self):
nc = netCDF4.Dataset(file_name, mode='w', format='NETCDF4')
d = nc.createDimension('fred', 2000)
# can only change cache size in createVariable (not nelems or preemption)
# this change lasts only as long as file is open.
v = nc.createVariable('frank','f',('fred',),chunk_cache=15000)
size, nelems, preempt = v.get_var_chunk_cache()
assert(size==15000)
self.file=file_name
nc.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing methods for accessing and changing chunk cache"""
# change cache parameters before opening fil.
netCDF4.set_chunk_cache(cache_size, cache_nelems, cache_preempt)
nc = netCDF4.Dataset(self.file, mode='r')
# check to see that chunk cache parameters were changed.
assert(netCDF4.get_chunk_cache() == (cache_size, cache_nelems, cache_preempt))
# change cache parameters for variable, check
nc['frank'].set_var_chunk_cache(cache_size2, cache_nelems2, cache_preempt2)
assert(nc['frank'].get_var_chunk_cache() == (cache_size2, cache_nelems2, cache_preempt2))
nc.close()
if __name__ == '__main__':
unittest.main()
| 1,570 | 36.404762 | 97 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.