text
stringlengths
1
93.6k
ports = host.find("ports")
if ports:
for port in ports.findall("port"):
cells = []
for rc in row_cells:
current_cell = rc
for bc in re.findall("(\[[a-z\.*]+\])", rc):
for definition in definitions:
elem = definition.find(bc[1:-1])
if elem:
xml_element = port.find(elem.xpathfull())
if xml_element is not None:
data = elem.data(xml_element)
current_cell = current_cell.replace(bc, data)
break
break
cells.append(current_cell)
port_info.append(cells)
result[address] = port_info
# Start converting data to Markdown
# IP addresses are defined as a header
for address in result:
if not options.print_empty and len(result[address]) == 0:
continue
if options.hs != 0:
md += "%s %s\n\n" % ('#' * options.hs, address)
md += "| %s |" % " | ".join(columns)
md += "\n"
# Adding +2 for 1 space on left and right sides
md += "|%s|" % "|".join(map(lambda s: '-' * (len(s) + 2), columns))
md += "\n"
result[address] = sorted(
result[address],
key=lambda row: row[sorting_index],
reverse=sorting_reverse
)
for port_info in result[address]:
md += "| %s |" % " | ".join(port_info)
md += "\n"
md += "\n\n"
print()
print()
print(md)
# <FILESEP>
from transformers import (AutoTokenizer, AutoConfig, LlamaForCausalLM, DataCollatorForLanguageModeling, Trainer, TrainingArguments)
from datasets import load_dataset
from huggingface_hub import login
import wandb
from utils import *
### Login
# Wandb is for logging and is optional.
hf_token = "<your_hf_token>"
wb_token = "<your_wb_token>"
wandb.login(key=wb_token)
login(token=hf_token)
### Load and tokenize training data. Uncomment these lines to load and tokenize yourself.
# data_source = "Skylion007/openwebtext"
# data = load_dataset(data_source)
# subset = load_dataset(data_source, split="train[:15%]")
# context_length = 256
# tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
# def tokenize(element):
# outputs = tokenizer(
# element["text"],
# truncation=False,
# max_length=context_length,
# return_overflowing_tokens=True,
# return_length=True,
# )
# # Combine all tokens
# combined = []
# for tokenized_doc in outputs['input_ids']:
# combined += tokenized_doc + [tokenizer.eos_token_id]
# # Chunk
# input_batch = []
# for i in range(0, len(combined) - context_length, context_length):
# input_batch.append(combined[i:i+context_length])
# return {"input_ids": input_batch}
# tokenized_data = subset.map(
# tokenize, batched=True, remove_columns=data["train"].column_names,