text
stringlengths
1
93.6k
print("[INFO] Symbol pair name: %s" % symbol)
# Converting timeframe argument to minutes.
timeframe_list = []
timeframe_conv = {
"M": 1,
"H": 60,
"D": 24 * 60,
"W": 7 * 24 * 60,
"MN": 30 * 24 * 60,
}
for arg in args.timeframe.strip().upper().split(","):
match_obj = re.match(r"(M|H|D|W|MN)(\d+)", arg, re.I)
if match_obj:
model = match_obj.group(1).upper()
value = int(match_obj.group(2))
timeframe_list.append(timeframe_conv[model] * value)
else:
print("[ERROR] Bad timeframe setting '{}'!".format(arg))
sys.exit(1)
if args.verbose:
print(
"[INFO] Timeframe: %s - %s minute(s)"
% (args.timeframe.upper(), timeframe_list)
)
# Checking spread argument
spread = int(args.spread)
if args.verbose:
print("[INFO] Spread: %d" % spread)
# Create output directory
os.makedirs(args.outputDir, 0o755, True)
if args.verbose:
print("[INFO] Output directory: %s" % args.outputDir)
# Checking server argument
if len(args.server) > 128:
print(
"[WARNING] Server name is longer than 128 characters, cutting its end off!"
)
server = args.server[0:128]
else:
server = args.server
if args.verbose:
print("[INFO] Server name: %s" % server)
outputFormat = args.outputFormat.strip().lower()
if args.verbose:
print("[INFO] Output format: %s" % outputFormat)
multiple_timeframes = len(timeframe_list) > 1
queue = construct_queue(timeframe_list)
process_queue(queue)
# <FILESEP>
from safetensors import safe_open
from safetensors.torch import save_file
import sys
import re
## Usage: python convert_huggingface_t5 <path_from_huggingface_model.safetensors> <path_to_output_model.safetensors>
tensors = {}
with safe_open(sys.argv[1], framework="pt", device=0) as f:
for k in f.keys():
new_k = re.sub(".layer.*.SelfAttention.q", ".self_attention_layer.self_attention.Wq", k)
new_k = re.sub(".layer.*.SelfAttention.k", ".self_attention_layer.self_attention.Wk", new_k)
new_k = re.sub(".layer.*.SelfAttention.v", ".self_attention_layer.self_attention.Wv", new_k)
new_k = re.sub(".layer.*.SelfAttention.o", ".self_attention_layer.self_attention.o", new_k)
new_k = re.sub(".layer.*.EncDecAttention.q", ".cross_attention_layer.cross_attention.Wq", new_k)
new_k = re.sub(".layer.*.EncDecAttention.k", ".cross_attention_layer.cross_attention.Wk", new_k)
new_k = re.sub(".layer.*.EncDecAttention.v", ".cross_attention_layer.cross_attention.Wv", new_k)
new_k = re.sub(".layer.*.EncDecAttention.o", ".cross_attention_layer.cross_attention.o", new_k)
new_k = re.sub(".layer.*.SelfAttention.relative_attention_bias.", ".self_attention_layer.self_attention.pe_encoding.relative_attention_bias.", new_k)
new_k = new_k.replace(".layer.0.layer_norm.", ".self_attention_layer.layer_norm.")
if "encoder" in new_k:
new_k = new_k.replace(".layer.1.layer_norm.", ".ff_layer.layer_norm.")
else:
new_k = new_k.replace(".layer.1.layer_norm.", ".cross_attention_layer.layer_norm.")
new_k = new_k.replace(".layer.2.layer_norm.", ".ff_layer.layer_norm.")
new_k = re.sub(".layer.*.DenseReluDense.", ".ff_layer.", new_k)
new_k = new_k.replace(".wi_", ".act.wi_")
tensors[new_k] = f.get_tensor(k)
save_file(tensors, sys.argv[2], metadata={'format': 'pt'})
# <FILESEP>
#!/usr/bin/env python
#
# vConfigurator : an automatic VLAN configuration utility.
# Copyright (C) 2015 Mitch \x90
#
# This program is free software; you can redistribute it and/or modify