text stringlengths 1 93.6k |
|---|
"""
|
Embedding layer dropout.
|
:param embed: embedding layer
|
:param words: input sequence of words. shape: (batch size, sequence length)
|
:param dropout: dropout to be applied to the embedding layer
|
:return:
|
"""
|
if dropout:
|
mask = embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_(1 - dropout).expand_as(
|
embed.weight) / (1 - dropout)
|
masked_embed_weight = mask * embed.weight
|
else:
|
masked_embed_weight = embed.weight
|
padding_idx = embed.padding_idx # be careful here to use the same 'padding_idx' name
|
if padding_idx is None:
|
padding_idx = -1
|
X = torch.nn.functional.embedding(words, masked_embed_weight,
|
padding_idx, embed.max_norm, embed.norm_type,
|
embed.scale_grad_by_freq, embed.sparse
|
)
|
return X
|
if __name__ == '__main__':
|
"""
|
Main script to check the embedding dropout alone.
|
"""
|
V = 50 # vocabulary size
|
h = 4 # embedding size
|
bptt = 10 # sequence length
|
batch_size = 2 # batch size
|
emb_drop = 0.1 # dropout to be applied to the embedding layer
|
# dummy input sequence
|
words = np.random.random_integers(low=0, high=V - 1, size=(batch_size, bptt))
|
words = torch.LongTensor(words)
|
# embedding layer
|
embed = torch.nn.Embedding(V, h)
|
# without embedding dropout
|
origX = embed(words)
|
# with embedding dropout
|
X = embedded_dropout(embed, words, emb_drop)
|
# <FILESEP>
|
import os
|
import l_network as network
|
import l_networks as networks
|
from modules import shared, ui_extra_networks
|
from modules.ui_extra_networks import quote_js
|
from l_ui_edit_user_metadata import LoraUserMetadataEditor
|
class ExtraNetworksPageLyCORIS(ui_extra_networks.ExtraNetworksPage):
|
def __init__(self):
|
super().__init__('LyCORIS')
|
def refresh(self):
|
networks.list_available_networks()
|
def create_item(self, name, index=None, enable_filter=True):
|
lora_on_disk = networks.available_networks.get(name)
|
if lora_on_disk is None:
|
return
|
path, ext = os.path.splitext(lora_on_disk.filename)
|
alias = lora_on_disk.get_alias()
|
search_terms = [self.search_terms_from_path(lora_on_disk.filename)]
|
if lora_on_disk.hash:
|
search_terms.append(lora_on_disk.hash)
|
item = {
|
"name": name,
|
"filename": lora_on_disk.filename,
|
"shorthash": lora_on_disk.shorthash,
|
"preview": self.find_preview(path),
|
"description": self.find_description(path),
|
"search_terms": search_terms,
|
"local_preview": f"{path}.{shared.opts.samples_format}",
|
"metadata": lora_on_disk.metadata,
|
"sort_keys": {'default': index, **self.get_sort_keys(lora_on_disk.filename)},
|
"sd_version": lora_on_disk.sd_version.name,
|
}
|
self.read_user_metadata(item)
|
activation_text = item["user_metadata"].get("activation text")
|
preferred_weight = item["user_metadata"].get("preferred weight", 0.0)
|
item["prompt"] = quote_js(f"<lyco:{alias}:") + " + " + (str(preferred_weight) if preferred_weight else "opts.extra_networks_default_multiplier") + " + " + quote_js(">")
|
if activation_text:
|
item["prompt"] += " + " + quote_js(" " + activation_text)
|
negative_prompt = item["user_metadata"].get("negative text")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.