_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q30700 | add_inspection | train | def add_inspection(name):
"""
Add a Jishaku object inspection
"""
# create the real decorator
def inspection_inner(func):
"""
Jishaku inspection decorator
"""
# pylint: disable=inconsistent-return-statements
# create an encapsulated version of the inspection that swallows exceptions
@functools.wraps(func)
def encapsulated(*args, **kwargs):
try:
return func(*args, **kwargs)
except (TypeError, AttributeError, ValueError, OSError):
return
INSPECTIONS.append((name, encapsulated))
return func
return inspection_inner | python | {
"resource": ""
} |
q30701 | all_inspections | train | def all_inspections(obj):
"""
Generator to iterate all current Jishaku inspections.
"""
for name, callback in INSPECTIONS:
result = callback(obj)
if result:
yield name, result | python | {
"resource": ""
} |
q30702 | class_name | train | def class_name(obj):
"""
Get the name of an object, including the module name if available.
"""
name = obj.__name__
module = getattr(obj, '__module__')
if module:
name = f'{module}.{name}'
return name | python | {
"resource": ""
} |
q30703 | PaginatorInterface.pages | train | def pages(self):
"""
Returns the paginator's pages without prematurely closing the active page.
"""
# protected access has to be permitted here to not close the paginator's pages
# pylint: disable=protected-access
paginator_pages = list(self.paginator._pages)
if len(self.paginator._current_page) > 1:
paginator_pages.append('\n'.join(self.paginator._current_page) + '\n' + (self.paginator.suffix or ''))
# pylint: enable=protected-access
return paginator_pages | python | {
"resource": ""
} |
q30704 | PaginatorInterface.display_page | train | def display_page(self):
"""
Returns the current page the paginator interface is on.
"""
self._display_page = max(0, min(self.page_count - 1, self._display_page))
return self._display_page | python | {
"resource": ""
} |
q30705 | PaginatorInterface.display_page | train | def display_page(self, value):
"""
Sets the current page the paginator is on. Automatically pushes values inbounds.
"""
self._display_page = max(0, min(self.page_count - 1, value)) | python | {
"resource": ""
} |
q30706 | PaginatorInterface.page_size | train | def page_size(self) -> int:
"""
A property that returns how large a page is, calculated from the paginator properties.
If this exceeds `max_page_size`, an exception is raised upon instantiation.
"""
page_count = self.page_count
return self.paginator.max_size + len(f'\nPage {page_count}/{page_count}') | python | {
"resource": ""
} |
q30707 | PaginatorInterface.add_line | train | async def add_line(self, *args, **kwargs):
"""
A proxy function that allows this PaginatorInterface to remain locked to the last page
if it is already on it.
"""
display_page = self.display_page
page_count = self.page_count
self.paginator.add_line(*args, **kwargs)
new_page_count = self.page_count
if display_page + 1 == page_count:
# To keep position fixed on the end, update position to new last page and update message.
self._display_page = new_page_count
self.bot.loop.create_task(self.update()) | python | {
"resource": ""
} |
q30708 | PaginatorInterface.send_to | train | async def send_to(self, destination: discord.abc.Messageable):
"""
Sends a message to the given destination with this interface.
This automatically creates the response task for you.
"""
self.message = await destination.send(**self.send_kwargs)
# add the close reaction
await self.message.add_reaction(self.emojis.close)
# if there is more than one page, and the reactions haven't been sent yet, send navigation emotes
if not self.sent_page_reactions and self.page_count > 1:
await self.send_all_reactions()
if self.task:
self.task.cancel()
self.task = self.bot.loop.create_task(self.wait_loop()) | python | {
"resource": ""
} |
q30709 | PaginatorInterface.send_all_reactions | train | async def send_all_reactions(self):
"""
Sends all reactions for this paginator, if any are missing.
This method is generally for internal use only.
"""
for emoji in filter(None, self.emojis):
await self.message.add_reaction(emoji)
self.sent_page_reactions = True | python | {
"resource": ""
} |
q30710 | PaginatorInterface.wait_loop | train | async def wait_loop(self):
"""
Waits on a loop for reactions to the message. This should not be called manually - it is handled by `send_to`.
"""
start, back, forward, end, close = self.emojis
def check(payload: discord.RawReactionActionEvent):
"""
Checks if this reaction is related to the paginator interface.
"""
owner_check = not self.owner or payload.user_id == self.owner.id
emoji = payload.emoji
if isinstance(emoji, discord.PartialEmoji) and emoji.is_unicode_emoji():
emoji = emoji.name
return payload.message_id == self.message.id and \
emoji and emoji in self.emojis and \
payload.user_id != self.bot.user.id and owner_check
try:
while not self.bot.is_closed():
payload = await self.bot.wait_for('raw_reaction_add', check=check, timeout=self.timeout)
emoji = payload.emoji
if isinstance(emoji, discord.PartialEmoji) and emoji.is_unicode_emoji():
emoji = emoji.name
if emoji == close:
await self.message.delete()
return
if emoji == start:
self._display_page = 0
elif emoji == end:
self._display_page = self.page_count - 1
elif emoji == back:
self._display_page -= 1
elif emoji == forward:
self._display_page += 1
self.bot.loop.create_task(self.update())
try:
await self.message.remove_reaction(payload.emoji, discord.Object(id=payload.user_id))
except discord.Forbidden:
pass
except asyncio.TimeoutError:
if self.delete_message:
return await self.message.delete()
for emoji in filter(None, self.emojis):
try:
await self.message.remove_reaction(emoji, self.message.guild.me)
except (discord.Forbidden, discord.NotFound):
pass | python | {
"resource": ""
} |
q30711 | PaginatorInterface.update | train | async def update(self):
"""
Updates this interface's messages with the latest data.
"""
if self.update_lock.locked():
return
async with self.update_lock:
if self.update_lock.locked():
# if this engagement has caused the semaphore to exhaust,
# we are overloaded and need to calm down.
await asyncio.sleep(1)
if not self.message:
# too fast, stagger so this update gets through
await asyncio.sleep(0.5)
if not self.sent_page_reactions and self.page_count > 1:
self.bot.loop.create_task(self.send_all_reactions())
self.sent_page_reactions = True # don't spawn any more tasks
await self.message.edit(**self.send_kwargs) | python | {
"resource": ""
} |
q30712 | Jishaku.submit | train | def submit(self, ctx: commands.Context):
"""
A context-manager that submits the current task to jishaku's task list
and removes it afterwards.
Arguments
---------
ctx: commands.Context
A Context object used to derive information about this command task.
"""
self.task_count += 1
cmdtask = CommandTask(self.task_count, ctx, asyncio.Task.current_task())
self.tasks.append(cmdtask)
try:
yield cmdtask
finally:
if cmdtask in self.tasks:
self.tasks.remove(cmdtask) | python | {
"resource": ""
} |
q30713 | Jishaku.cog_check | train | async def cog_check(self, ctx: commands.Context):
"""
Local check, makes all commands in this cog owner-only
"""
if not await ctx.bot.is_owner(ctx.author):
raise commands.NotOwner("You must own this bot to use Jishaku.")
return True | python | {
"resource": ""
} |
q30714 | Jishaku.jsk | train | async def jsk(self, ctx: commands.Context):
"""
The Jishaku debug and diagnostic commands.
This command on its own gives a status brief.
All other functionality is within its subcommands.
"""
summary = [
f"Jishaku v{__version__}, discord.py `{package_version('discord.py')}`, "
f"`Python {sys.version}` on `{sys.platform}`".replace("\n", ""),
f"Module was loaded {humanize.naturaltime(self.load_time)}, "
f"cog was loaded {humanize.naturaltime(self.start_time)}.",
""
]
if psutil:
proc = psutil.Process()
with proc.oneshot():
mem = proc.memory_full_info()
summary.append(f"Using {humanize.naturalsize(mem.rss)} physical memory and "
f"{humanize.naturalsize(mem.vms)} virtual memory, "
f"{humanize.naturalsize(mem.uss)} of which unique to this process.")
name = proc.name()
pid = proc.pid
thread_count = proc.num_threads()
summary.append(f"Running on PID {pid} (`{name}`) with {thread_count} thread(s).")
summary.append("") # blank line
cache_summary = f"{len(self.bot.guilds)} guild(s) and {len(self.bot.users)} user(s)"
if isinstance(self.bot, discord.AutoShardedClient):
summary.append(f"This bot is automatically sharded and can see {cache_summary}.")
elif self.bot.shard_count:
summary.append(f"This bot is manually sharded and can see {cache_summary}.")
else:
summary.append(f"This bot is not sharded and can see {cache_summary}.")
summary.append(f"Average websocket latency: {round(self.bot.latency * 1000, 2)}ms")
await ctx.send("\n".join(summary)) | python | {
"resource": ""
} |
q30715 | Jishaku.jsk_hide | train | async def jsk_hide(self, ctx: commands.Context):
"""
Hides Jishaku from the help command.
"""
if self.jsk.hidden:
return await ctx.send("Jishaku is already hidden.")
self.jsk.hidden = True
await ctx.send("Jishaku is now hidden.") | python | {
"resource": ""
} |
q30716 | Jishaku.jsk_show | train | async def jsk_show(self, ctx: commands.Context):
"""
Shows Jishaku in the help command.
"""
if not self.jsk.hidden:
return await ctx.send("Jishaku is already visible.")
self.jsk.hidden = False
await ctx.send("Jishaku is now visible.") | python | {
"resource": ""
} |
q30717 | Jishaku.jsk_tasks | train | async def jsk_tasks(self, ctx: commands.Context):
"""
Shows the currently running jishaku tasks.
"""
if not self.tasks:
return await ctx.send("No currently running tasks.")
paginator = commands.Paginator(max_size=1985)
for task in self.tasks:
paginator.add_line(f"{task.index}: `{task.ctx.command.qualified_name}`, invoked at "
f"{task.ctx.message.created_at.strftime('%Y-%m-%d %H:%M:%S')} UTC")
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
await interface.send_to(ctx) | python | {
"resource": ""
} |
q30718 | Jishaku.jsk_cancel | train | async def jsk_cancel(self, ctx: commands.Context, *, index: int):
"""
Cancels a task with the given index.
If the index passed is -1, will cancel the last task instead.
"""
if not self.tasks:
return await ctx.send("No tasks to cancel.")
if index == -1:
task = self.tasks.pop()
else:
task = discord.utils.get(self.tasks, index=index)
if task:
self.tasks.remove(task)
else:
return await ctx.send("Unknown task.")
task.task.cancel()
return await ctx.send(f"Cancelled task {task.index}: `{task.ctx.command.qualified_name}`,"
f" invoked at {task.ctx.message.created_at.strftime('%Y-%m-%d %H:%M:%S')} UTC") | python | {
"resource": ""
} |
q30719 | Jishaku.jsk_load | train | async def jsk_load(self, ctx: commands.Context, *extensions: ExtensionConverter):
"""
Loads or reloads the given extension names.
Reports any extensions that failed to load.
"""
paginator = commands.Paginator(prefix='', suffix='')
for extension in itertools.chain(*extensions):
method, icon = (
(self.bot.reload_extension, "\N{CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE ARROWS}")
if extension in self.bot.extensions else
(self.bot.load_extension, "\N{INBOX TRAY}")
)
try:
method(extension)
except Exception as exc: # pylint: disable=broad-except
traceback_data = ''.join(traceback.format_exception(type(exc), exc, exc.__traceback__, 1))
paginator.add_line(
f"{icon}\N{WARNING SIGN} `{extension}`\n```py\n{traceback_data}\n```",
empty=True
)
else:
paginator.add_line(f"{icon} `{extension}`", empty=True)
for page in paginator.pages:
await ctx.send(page) | python | {
"resource": ""
} |
q30720 | Jishaku.jsk_shutdown | train | async def jsk_shutdown(self, ctx: commands.Context):
"""
Logs this bot out.
"""
await ctx.send("Logging out now..")
await ctx.bot.logout() | python | {
"resource": ""
} |
q30721 | Jishaku.jsk_su | train | async def jsk_su(self, ctx: commands.Context, target: discord.User, *, command_string: str):
"""
Run a command as someone else.
This will try to resolve to a Member, but will use a User if it can't find one.
"""
if ctx.guild:
# Try to upgrade to a Member instance
# This used to be done by a Union converter, but doing it like this makes
# the command more compatible with chaining, e.g. `jsk in .. jsk su ..`
target = ctx.guild.get_member(target.id) or target
alt_ctx = await copy_context_with(ctx, author=target, content=ctx.prefix + command_string)
if alt_ctx.command is None:
if alt_ctx.invoked_with is None:
return await ctx.send('This bot has been hard-configured to ignore this user.')
return await ctx.send(f'Command "{alt_ctx.invoked_with}" is not found')
return await alt_ctx.command.invoke(alt_ctx) | python | {
"resource": ""
} |
q30722 | Jishaku.jsk_in | train | async def jsk_in(self, ctx: commands.Context, channel: discord.TextChannel, *, command_string: str):
"""
Run a command as if it were in a different channel.
"""
alt_ctx = await copy_context_with(ctx, channel=channel, content=ctx.prefix + command_string)
if alt_ctx.command is None:
return await ctx.send(f'Command "{alt_ctx.invoked_with}" is not found')
return await alt_ctx.command.invoke(alt_ctx) | python | {
"resource": ""
} |
q30723 | Jishaku.jsk_sudo | train | async def jsk_sudo(self, ctx: commands.Context, *, command_string: str):
"""
Run a command bypassing all checks and cooldowns.
This also bypasses permission checks so this has a high possibility of making a command raise.
"""
alt_ctx = await copy_context_with(ctx, content=ctx.prefix + command_string)
if alt_ctx.command is None:
return await ctx.send(f'Command "{alt_ctx.invoked_with}" is not found')
return await alt_ctx.command.reinvoke(alt_ctx) | python | {
"resource": ""
} |
q30724 | Jishaku.jsk_repeat | train | async def jsk_repeat(self, ctx: commands.Context, times: int, *, command_string: str):
"""
Runs a command multiple times in a row.
This acts like the command was invoked several times manually, so it obeys cooldowns.
"""
with self.submit(ctx): # allow repeats to be cancelled
for _ in range(times):
alt_ctx = await copy_context_with(ctx, content=ctx.prefix + command_string)
if alt_ctx.command is None:
return await ctx.send(f'Command "{alt_ctx.invoked_with}" is not found')
await alt_ctx.command.reinvoke(alt_ctx) | python | {
"resource": ""
} |
q30725 | Jishaku.jsk_debug | train | async def jsk_debug(self, ctx: commands.Context, *, command_string: str):
"""
Run a command timing execution and catching exceptions.
"""
alt_ctx = await copy_context_with(ctx, content=ctx.prefix + command_string)
if alt_ctx.command is None:
return await ctx.send(f'Command "{alt_ctx.invoked_with}" is not found')
start = time.perf_counter()
async with ReplResponseReactor(ctx.message):
with self.submit(ctx):
await alt_ctx.command.invoke(alt_ctx)
end = time.perf_counter()
return await ctx.send(f"Command `{alt_ctx.command.qualified_name}` finished in {end - start:.3f}s.") | python | {
"resource": ""
} |
q30726 | Jishaku.jsk_cat | train | async def jsk_cat(self, ctx: commands.Context, argument: str):
"""
Read out a file, using syntax highlighting if detected.
Lines and linespans are supported by adding '#L12' or '#L12-14' etc to the end of the filename.
"""
match = self.__cat_line_regex.search(argument)
if not match: # should never happen
return await ctx.send("Couldn't parse this input.")
path = match.group(1)
line_span = None
if match.group(2):
start = int(match.group(2))
line_span = (start, int(match.group(3) or start))
if not os.path.exists(path) or os.path.isdir(path):
return await ctx.send(f"`{path}`: No file by that name.")
size = os.path.getsize(path)
if size <= 0:
return await ctx.send(f"`{path}`: Cowardly refusing to read a file with no size stat"
f" (it may be empty, endless or inaccessible).")
if size > 50 * (1024 ** 2):
return await ctx.send(f"`{path}`: Cowardly refusing to read a file >50MB.")
try:
with open(path, "rb") as file:
paginator = WrappedFilePaginator(file, line_span=line_span, max_size=1985)
except UnicodeDecodeError:
return await ctx.send(f"`{path}`: Couldn't determine the encoding of this file.")
except ValueError as exc:
return await ctx.send(f"`{path}`: Couldn't read this file, {exc}")
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
await interface.send_to(ctx) | python | {
"resource": ""
} |
q30727 | Jishaku.jsk_curl | train | async def jsk_curl(self, ctx: commands.Context, url: str):
"""
Download and display a text file from the internet.
This command is similar to jsk cat, but accepts a URL.
"""
# remove embed maskers if present
url = url.lstrip("<").rstrip(">")
async with ReplResponseReactor(ctx.message):
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
data = await response.read()
hints = (
response.content_type,
url
)
code = response.status
if not data:
return await ctx.send(f"HTTP response was empty (status code {code}).")
try:
paginator = WrappedFilePaginator(io.BytesIO(data), language_hints=hints, max_size=1985)
except UnicodeDecodeError:
return await ctx.send(f"Couldn't determine the encoding of the response. (status code {code})")
except ValueError as exc:
return await ctx.send(f"Couldn't read response (status code {code}), {exc}")
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
await interface.send_to(ctx) | python | {
"resource": ""
} |
q30728 | Jishaku.jsk_source | train | async def jsk_source(self, ctx: commands.Context, *, command_name: str):
"""
Displays the source code for a command.
"""
command = self.bot.get_command(command_name)
if not command:
return await ctx.send(f"Couldn't find command `{command_name}`.")
try:
source_lines, _ = inspect.getsourcelines(command.callback)
except (TypeError, OSError):
return await ctx.send(f"Was unable to retrieve the source for `{command}` for some reason.")
# getsourcelines for some reason returns WITH line endings
source_lines = ''.join(source_lines).split('\n')
paginator = WrappedPaginator(prefix='```py', suffix='```', max_size=1985)
for line in source_lines:
paginator.add_line(line)
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
await interface.send_to(ctx) | python | {
"resource": ""
} |
q30729 | Jishaku.jsk_retain | train | async def jsk_retain(self, ctx: commands.Context, *, toggle: bool = None):
"""
Turn variable retention for REPL on or off.
Provide no argument for current status.
"""
if toggle is None:
if self.retain:
return await ctx.send("Variable retention is set to ON.")
return await ctx.send("Variable retention is set to OFF.")
if toggle:
if self.retain:
return await ctx.send("Variable retention is already set to ON.")
self.retain = True
self._scope = Scope()
return await ctx.send("Variable retention is ON. Future REPL sessions will retain their scope.")
if not self.retain:
return await ctx.send("Variable retention is already set to OFF.")
self.retain = False
return await ctx.send("Variable retention is OFF. Future REPL sessions will dispose their scope when done.") | python | {
"resource": ""
} |
q30730 | Jishaku.jsk_python | train | async def jsk_python(self, ctx: commands.Context, *, argument: CodeblockConverter):
"""
Direct evaluation of Python code.
"""
arg_dict = get_var_dict_from_ctx(ctx, SCOPE_PREFIX)
arg_dict["_"] = self.last_result
scope = self.scope
try:
async with ReplResponseReactor(ctx.message):
with self.submit(ctx):
async for result in AsyncCodeExecutor(argument.content, scope, arg_dict=arg_dict):
if result is None:
continue
self.last_result = result
if isinstance(result, discord.File):
await ctx.send(file=result)
elif isinstance(result, discord.Embed):
await ctx.send(embed=result)
elif isinstance(result, PaginatorInterface):
await result.send_to(ctx)
else:
if not isinstance(result, str):
# repr all non-strings
result = repr(result)
if len(result) > 2000:
# inconsistency here, results get wrapped in codeblocks when they are too large
# but don't if they're not. probably not that bad, but noting for later review
paginator = WrappedPaginator(prefix='```py', suffix='```', max_size=1985)
paginator.add_line(result)
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
await interface.send_to(ctx)
else:
if result.strip() == '':
result = "\u200b"
await ctx.send(result.replace(self.bot.http.token, "[token omitted]"))
finally:
scope.clear_intersection(arg_dict) | python | {
"resource": ""
} |
q30731 | Jishaku.jsk_python_inspect | train | async def jsk_python_inspect(self, ctx: commands.Context, *, argument: CodeblockConverter):
"""
Evaluation of Python code with inspect information.
"""
arg_dict = get_var_dict_from_ctx(ctx, SCOPE_PREFIX)
arg_dict["_"] = self.last_result
scope = self.scope
try:
async with ReplResponseReactor(ctx.message):
with self.submit(ctx):
async for result in AsyncCodeExecutor(argument.content, scope, arg_dict=arg_dict):
self.last_result = result
header = repr(result).replace("``", "`\u200b`").replace(self.bot.http.token, "[token omitted]")
if len(header) > 485:
header = header[0:482] + "..."
paginator = WrappedPaginator(prefix=f"```prolog\n=== {header} ===\n", max_size=1985)
for name, res in all_inspections(result):
paginator.add_line(f"{name:16.16} :: {res}")
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
await interface.send_to(ctx)
finally:
scope.clear_intersection(arg_dict) | python | {
"resource": ""
} |
q30732 | Jishaku.jsk_shell | train | async def jsk_shell(self, ctx: commands.Context, *, argument: CodeblockConverter):
"""
Executes statements in the system shell.
This uses the bash shell. Execution can be cancelled by closing the paginator.
"""
async with ReplResponseReactor(ctx.message):
with self.submit(ctx):
paginator = WrappedPaginator(prefix="```sh", max_size=1985)
paginator.add_line(f"$ {argument.content}\n")
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
self.bot.loop.create_task(interface.send_to(ctx))
with ShellReader(argument.content) as reader:
async for line in reader:
if interface.closed:
return
await interface.add_line(line)
await interface.add_line(f"\n[status] Return code {reader.close_code}") | python | {
"resource": ""
} |
q30733 | Jishaku.jsk_git | train | async def jsk_git(self, ctx: commands.Context, *, argument: CodeblockConverter):
"""
Shortcut for 'jsk sh git'. Invokes the system shell.
"""
return await ctx.invoke(self.jsk_shell, argument=Codeblock(argument.language, "git " + argument.content)) | python | {
"resource": ""
} |
q30734 | Jishaku.jsk_voice | train | async def jsk_voice(self, ctx: commands.Context):
"""
Voice-related commands.
If invoked without subcommand, relays current voice state.
"""
# if using a subcommand, short out
if ctx.invoked_subcommand is not None and ctx.invoked_subcommand is not self.jsk_voice:
return
# give info about the current voice client if there is one
voice = ctx.guild.voice_client
if not voice or not voice.is_connected():
return await ctx.send("Not connected.")
await ctx.send(f"Connected to {voice.channel.name}, "
f"{'paused' if voice.is_paused() else 'playing' if voice.is_playing() else 'idle'}.") | python | {
"resource": ""
} |
q30735 | Jishaku.jsk_vc_join | train | async def jsk_vc_join(self, ctx: commands.Context, *,
destination: typing.Union[discord.VoiceChannel, discord.Member] = None):
"""
Joins a voice channel, or moves to it if already connected.
Passing a voice channel uses that voice channel.
Passing a member will use that member's current voice channel.
Passing nothing will use the author's voice channel.
"""
destination = destination or ctx.author
if isinstance(destination, discord.Member):
if destination.voice and destination.voice.channel:
destination = destination.voice.channel
else:
return await ctx.send("Member has no voice channel.")
voice = ctx.guild.voice_client
if voice:
await voice.move_to(destination)
else:
await destination.connect(reconnect=True)
await ctx.send(f"Connected to {destination.name}.") | python | {
"resource": ""
} |
q30736 | Jishaku.jsk_vc_disconnect | train | async def jsk_vc_disconnect(self, ctx: commands.Context):
"""
Disconnects from the voice channel in this guild, if there is one.
"""
voice = ctx.guild.voice_client
await voice.disconnect()
await ctx.send(f"Disconnected from {voice.channel.name}.") | python | {
"resource": ""
} |
q30737 | Jishaku.jsk_vc_stop | train | async def jsk_vc_stop(self, ctx: commands.Context):
"""
Stops running an audio source, if there is one.
"""
voice = ctx.guild.voice_client
voice.stop()
await ctx.send(f"Stopped playing audio in {voice.channel.name}.") | python | {
"resource": ""
} |
q30738 | Jishaku.jsk_vc_pause | train | async def jsk_vc_pause(self, ctx: commands.Context):
"""
Pauses a running audio source, if there is one.
"""
voice = ctx.guild.voice_client
if voice.is_paused():
return await ctx.send("Audio is already paused.")
voice.pause()
await ctx.send(f"Paused audio in {voice.channel.name}.") | python | {
"resource": ""
} |
q30739 | Jishaku.jsk_vc_resume | train | async def jsk_vc_resume(self, ctx: commands.Context):
"""
Resumes a running audio source, if there is one.
"""
voice = ctx.guild.voice_client
if not voice.is_paused():
return await ctx.send("Audio is not paused.")
voice.resume()
await ctx.send(f"Resumed audio in {voice.channel.name}.") | python | {
"resource": ""
} |
q30740 | Jishaku.jsk_vc_volume | train | async def jsk_vc_volume(self, ctx: commands.Context, *, percentage: float):
"""
Adjusts the volume of an audio source if it is supported.
"""
volume = max(0.0, min(1.0, percentage / 100))
source = ctx.guild.voice_client.source
if not isinstance(source, discord.PCMVolumeTransformer):
return await ctx.send("This source doesn't support adjusting volume or "
"the interface to do so is not exposed.")
source.volume = volume
await ctx.send(f"Volume set to {volume * 100:.2f}%") | python | {
"resource": ""
} |
q30741 | Jishaku.jsk_vc_play | train | async def jsk_vc_play(self, ctx: commands.Context, *, uri: str):
"""
Plays audio direct from a URI.
Can be either a local file or an audio resource on the internet.
"""
voice = ctx.guild.voice_client
if voice.is_playing():
voice.stop()
# remove embed maskers if present
uri = uri.lstrip("<").rstrip(">")
voice.play(discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(uri)))
await ctx.send(f"Playing in {voice.channel.name}.") | python | {
"resource": ""
} |
q30742 | Jishaku.jsk_vc_youtube_dl | train | async def jsk_vc_youtube_dl(self, ctx: commands.Context, *, url: str):
"""
Plays audio from youtube_dl-compatible sources.
"""
if not youtube_dl:
return await ctx.send("youtube_dl is not installed.")
voice = ctx.guild.voice_client
if voice.is_playing():
voice.stop()
# remove embed maskers if present
url = url.lstrip("<").rstrip(">")
voice.play(discord.PCMVolumeTransformer(BasicYouTubeDLSource(url)))
await ctx.send(f"Playing in {voice.channel.name}.") | python | {
"resource": ""
} |
q30743 | get_language | train | def get_language(query: str) -> str:
"""Tries to work out the highlight.js language of a given file name or
shebang. Returns an empty string if none match.
"""
query = query.lower()
for language in LANGUAGES:
if query.endswith(language):
return language
return '' | python | {
"resource": ""
} |
q30744 | find_extensions_in | train | def find_extensions_in(path: typing.Union[str, pathlib.Path]) -> list:
"""
Tries to find things that look like bot extensions in a directory.
"""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
if not path.is_dir():
return []
extension_names = []
# Find extensions directly in this folder
for subpath in path.glob('*.py'):
parts = subpath.with_suffix('').parts
if parts[0] == '.':
parts = parts[1:]
extension_names.append('.'.join(parts))
# Find extensions as subfolder modules
for subpath in path.glob('*/__init__.py'):
parts = subpath.parent.parts
if parts[0] == '.':
parts = parts[1:]
extension_names.append('.'.join(parts))
return extension_names | python | {
"resource": ""
} |
q30745 | resolve_extensions | train | def resolve_extensions(bot: commands.Bot, name: str) -> list:
"""
Tries to resolve extension queries into a list of extension names.
"""
if name.endswith('.*'):
module_parts = name[:-2].split('.')
path = pathlib.Path(module_parts.pop(0))
for part in module_parts:
path = path / part
return find_extensions_in(path)
if name == '~':
return list(bot.extensions.keys())
return [name] | python | {
"resource": ""
} |
q30746 | package_version | train | def package_version(package_name: str) -> typing.Optional[str]:
"""
Returns package version as a string, or None if it couldn't be found.
"""
try:
return pkg_resources.get_distribution(package_name).version
except (pkg_resources.DistributionNotFound, AttributeError):
return None | python | {
"resource": ""
} |
q30747 | vc_check | train | async def vc_check(ctx: commands.Context): # pylint: disable=unused-argument
"""
Check for whether VC is available in this bot.
"""
if not discord.voice_client.has_nacl:
raise commands.CheckFailure("voice cannot be used because PyNaCl is not loaded")
if not discord.opus.is_loaded():
raise commands.CheckFailure("voice cannot be used because libopus is not loaded")
return True | python | {
"resource": ""
} |
q30748 | connected_check | train | async def connected_check(ctx: commands.Context):
"""
Check whether we are connected to VC in this guild.
"""
voice = ctx.guild.voice_client
if not voice or not voice.is_connected():
raise commands.CheckFailure("Not connected to VC in this guild")
return True | python | {
"resource": ""
} |
q30749 | playing_check | train | async def playing_check(ctx: commands.Context):
"""
Checks whether we are playing audio in VC in this guild.
This doubles up as a connection check.
"""
if await connected_check(ctx) and not ctx.guild.voice_client.is_playing():
raise commands.CheckFailure("The voice client in this guild is not playing anything.")
return True | python | {
"resource": ""
} |
q30750 | executor_function | train | def executor_function(sync_function: typing.Callable):
"""A decorator that wraps a sync function in an executor, changing it into an async function.
This allows processing functions to be wrapped and used immediately as an async function.
Examples
---------
Pushing processing with the Python Imaging Library into an executor:
.. code-block:: python3
from io import BytesIO
from PIL import Image
from jishaku.functools import executor_function
@executor_function
def color_processing(color: discord.Color):
with Image.new('RGB', (64, 64), color.to_rgb()) as im:
buff = BytesIO()
im.save(buff, 'png')
buff.seek(0)
return buff
@bot.command()
async def color(ctx: commands.Context, color: discord.Color=None):
color = color or ctx.author.color
buff = await color_processing(color=color)
await ctx.send(file=discord.File(fp=buff, filename='color.png'))
"""
@functools.wraps(sync_function)
async def sync_wrapper(*args, **kwargs):
"""
Asynchronous function that wraps a sync function with an executor.
"""
loop = asyncio.get_event_loop()
internal_function = functools.partial(sync_function, *args, **kwargs)
return await loop.run_in_executor(None, internal_function)
return sync_wrapper | python | {
"resource": ""
} |
q30751 | features | train | def features(sender=''):
'''Returns a list of signature features.'''
return [
# This one isn't from paper.
# Meant to match companies names, sender's names, address.
many_capitalized_words,
# This one is not from paper.
# Line is too long.
# This one is less aggressive than `Line is too short`
lambda line: 1 if len(line) > TOO_LONG_SIGNATURE_LINE else 0,
# Line contains email pattern.
binary_regex_search(RE_EMAIL),
# Line contains url.
binary_regex_search(RE_URL),
# Line contains phone number pattern.
binary_regex_search(RE_RELAX_PHONE),
# Line matches the regular expression "^[\s]*---*[\s]*$".
binary_regex_match(RE_SEPARATOR),
# Line has a sequence of 10 or more special characters.
binary_regex_search(RE_SPECIAL_CHARS),
# Line contains any typical signature words.
binary_regex_search(RE_SIGNATURE_WORDS),
# Line contains a pattern like Vitor R. Carvalho or William W. Cohen.
binary_regex_search(RE_NAME),
# Percentage of punctuation symbols in the line is larger than 50%
lambda line: 1 if punctuation_percent(line) > 50 else 0,
# Percentage of punctuation symbols in the line is larger than 90%
lambda line: 1 if punctuation_percent(line) > 90 else 0,
contains_sender_names(sender)
] | python | {
"resource": ""
} |
q30752 | apply_features | train | def apply_features(body, features):
'''Applies features to message body lines.
Returns list of lists. Each of the lists corresponds to the body line
and is constituted by the numbers of features occurrences (0 or 1).
E.g. if element j of list i equals 1 this means that
feature j occurred in line i (counting from the last line of the body).
'''
# collect all non empty lines
lines = [line for line in body.splitlines() if line.strip()]
# take the last SIGNATURE_MAX_LINES
last_lines = lines[-SIGNATURE_MAX_LINES:]
# apply features, fallback to zeros
return ([[f(line) for f in features] for line in last_lines] or
[[0 for f in features]]) | python | {
"resource": ""
} |
q30753 | build_pattern | train | def build_pattern(body, features):
'''Converts body into a pattern i.e. a point in the features space.
Applies features to the body lines and sums up the results.
Elements of the pattern indicate how many times a certain feature occurred
in the last lines of the body.
'''
line_patterns = apply_features(body, features)
return reduce(lambda x, y: [i + j for i, j in zip(x, y)], line_patterns) | python | {
"resource": ""
} |
q30754 | get_signature_candidate | train | def get_signature_candidate(lines):
"""Return lines that could hold signature
The lines should:
* be among last SIGNATURE_MAX_LINES non-empty lines.
* not include first line
* be shorter than TOO_LONG_SIGNATURE_LINE
* not include more than one line that starts with dashes
"""
# non empty lines indexes
non_empty = [i for i, line in enumerate(lines) if line.strip()]
# if message is empty or just one line then there is no signature
if len(non_empty) <= 1:
return []
# we don't expect signature to start at the 1st line
candidate = non_empty[1:]
# signature shouldn't be longer then SIGNATURE_MAX_LINES
candidate = candidate[-SIGNATURE_MAX_LINES:]
markers = _mark_candidate_indexes(lines, candidate)
candidate = _process_marked_candidate_indexes(candidate, markers)
# get actual lines for the candidate instead of indexes
if candidate:
candidate = lines[candidate[0]:]
return candidate
return [] | python | {
"resource": ""
} |
q30755 | _mark_candidate_indexes | train | def _mark_candidate_indexes(lines, candidate):
"""Mark candidate indexes with markers
Markers:
* c - line that could be a signature line
* l - long line
* d - line that starts with dashes but has other chars as well
>>> _mark_candidate_lines(['Some text', '', '-', 'Bob'], [0, 2, 3])
'cdc'
"""
# at first consider everything to be potential signature lines
markers = list('c' * len(candidate))
# mark lines starting from bottom up
for i, line_idx in reversed(list(enumerate(candidate))):
if len(lines[line_idx].strip()) > TOO_LONG_SIGNATURE_LINE:
markers[i] = 'l'
else:
line = lines[line_idx].strip()
if line.startswith('-') and line.strip("-"):
markers[i] = 'd'
return "".join(markers) | python | {
"resource": ""
} |
q30756 | _process_marked_candidate_indexes | train | def _process_marked_candidate_indexes(candidate, markers):
"""
Run regexes against candidate's marked indexes to strip
signature candidate.
>>> _process_marked_candidate_indexes([9, 12, 14, 15, 17], 'clddc')
[15, 17]
"""
match = RE_SIGNATURE_CANDIDATE.match(markers[::-1])
return candidate[-match.end('candidate'):] if match else [] | python | {
"resource": ""
} |
q30757 | contains_sender_names | train | def contains_sender_names(sender):
'''Returns a functions to search sender\'s name or it\'s part.
>>> feature = contains_sender_names("Sergey N. Obukhov <xxx@example.com>")
>>> feature("Sergey Obukhov")
1
>>> feature("BR, Sergey N.")
1
>>> feature("Sergey")
1
>>> contains_sender_names("<serobnic@mail.ru>")("Serobnic")
1
>>> contains_sender_names("<serobnic@mail.ru>")("serobnic")
1
'''
names = '( |$)|'.join(flatten_list([[e, e.capitalize()]
for e in extract_names(sender)]))
names = names or sender
if names != '':
return binary_regex_search(re.compile(names))
return lambda s: 0 | python | {
"resource": ""
} |
q30758 | categories_percent | train | def categories_percent(s, categories):
'''Returns category characters percent.
>>> categories_percent("qqq ggg hhh", ["Po"])
0.0
>>> categories_percent("q,w.", ["Po"])
50.0
>>> categories_percent("qqq ggg hhh", ["Nd"])
0.0
>>> categories_percent("q5", ["Nd"])
50.0
>>> categories_percent("s.s,5s", ["Po", "Nd"])
50.0
'''
count = 0
s = to_unicode(s, precise=True)
for c in s:
if unicodedata.category(c) in categories:
count += 1
return 100 * float(count) / len(s) if len(s) else 0 | python | {
"resource": ""
} |
q30759 | capitalized_words_percent | train | def capitalized_words_percent(s):
'''Returns capitalized words percent.'''
s = to_unicode(s, precise=True)
words = re.split('\s', s)
words = [w for w in words if w.strip()]
words = [w for w in words if len(w) > 2]
capitalized_words_counter = 0
valid_words_counter = 0
for word in words:
if not INVALID_WORD_START.match(word):
valid_words_counter += 1
if word[0].isupper() and not word[1].isupper():
capitalized_words_counter += 1
if valid_words_counter > 0 and len(words) > 1:
return 100 * float(capitalized_words_counter) / valid_words_counter
return 0 | python | {
"resource": ""
} |
q30760 | has_signature | train | def has_signature(body, sender):
'''Checks if the body has signature. Returns True or False.'''
non_empty = [line for line in body.splitlines() if line.strip()]
candidate = non_empty[-SIGNATURE_MAX_LINES:]
upvotes = 0
for line in candidate:
# we check lines for sender's name, phone, email and url,
# those signature lines don't take more then 27 lines
if len(line.strip()) > 27:
continue
elif contains_sender_names(sender)(line):
return True
elif (binary_regex_search(RE_RELAX_PHONE)(line) +
binary_regex_search(RE_EMAIL)(line) +
binary_regex_search(RE_URL)(line) == 1):
upvotes += 1
if upvotes > 1:
return True | python | {
"resource": ""
} |
q30761 | remove_initial_spaces_and_mark_message_lines | train | def remove_initial_spaces_and_mark_message_lines(lines):
"""
Removes the initial spaces in each line before marking message lines.
This ensures headers can be identified if they are indented with spaces.
"""
i = 0
while i < len(lines):
lines[i] = lines[i].lstrip(' ')
i += 1
return mark_message_lines(lines) | python | {
"resource": ""
} |
q30762 | mark_message_lines | train | def mark_message_lines(lines):
"""Mark message lines with markers to distinguish quotation lines.
Markers:
* e - empty line
* m - line that starts with quotation marker '>'
* s - splitter line
* t - presumably lines from the last message in the conversation
>>> mark_message_lines(['answer', 'From: foo@bar.com', '', '> question'])
'tsem'
"""
markers = ['e' for _ in lines]
i = 0
while i < len(lines):
if not lines[i].strip():
markers[i] = 'e' # empty line
elif QUOT_PATTERN.match(lines[i]):
markers[i] = 'm' # line with quotation marker
elif RE_FWD.match(lines[i]):
markers[i] = 'f' # ---- Forwarded message ----
else:
# in case splitter is spread across several lines
splitter = is_splitter('\n'.join(lines[i:i + SPLITTER_MAX_LINES]))
if splitter:
# append as many splitter markers as lines in splitter
splitter_lines = splitter.group().splitlines()
for j in range(len(splitter_lines)):
markers[i + j] = 's'
# skip splitter lines
i += len(splitter_lines) - 1
else:
# probably the line from the last message in the conversation
markers[i] = 't'
i += 1
return ''.join(markers) | python | {
"resource": ""
} |
q30763 | process_marked_lines | train | def process_marked_lines(lines, markers, return_flags=[False, -1, -1]):
"""Run regexes against message's marked lines to strip quotations.
Return only last message lines.
>>> mark_message_lines(['Hello', 'From: foo@bar.com', '', '> Hi', 'tsem'])
['Hello']
Also returns return_flags.
return_flags = [were_lines_deleted, first_deleted_line,
last_deleted_line]
"""
markers = ''.join(markers)
# if there are no splitter there should be no markers
if 's' not in markers and not re.search('(me*){3}', markers):
markers = markers.replace('m', 't')
if re.match('[te]*f', markers):
return_flags[:] = [False, -1, -1]
return lines
# inlined reply
# use lookbehind assertions to find overlapping entries e.g. for 'mtmtm'
# both 't' entries should be found
for inline_reply in re.finditer('(?<=m)e*(t[te]*)m', markers):
# long links could break sequence of quotation lines but they shouldn't
# be considered an inline reply
links = (
RE_PARENTHESIS_LINK.search(lines[inline_reply.start() - 1]) or
RE_PARENTHESIS_LINK.match(lines[inline_reply.start()].strip()))
if not links:
return_flags[:] = [False, -1, -1]
return lines
# cut out text lines coming after splitter if there are no markers there
quotation = re.search('(se*)+((t|f)+e*)+', markers)
if quotation:
return_flags[:] = [True, quotation.start(), len(lines)]
return lines[:quotation.start()]
# handle the case with markers
quotation = (RE_QUOTATION.search(markers) or
RE_EMPTY_QUOTATION.search(markers))
if quotation:
return_flags[:] = True, quotation.start(1), quotation.end(1)
return lines[:quotation.start(1)] + lines[quotation.end(1):]
return_flags[:] = [False, -1, -1]
return lines | python | {
"resource": ""
} |
q30764 | preprocess | train | def preprocess(msg_body, delimiter, content_type='text/plain'):
"""Prepares msg_body for being stripped.
Replaces link brackets so that they couldn't be taken for quotation marker.
Splits line in two if splitter pattern preceded by some text on the same
line (done only for 'On <date> <person> wrote:' pattern).
Converts msg_body into a unicode.
"""
msg_body = _replace_link_brackets(msg_body)
msg_body = _wrap_splitter_with_newline(msg_body, delimiter, content_type)
return msg_body | python | {
"resource": ""
} |
q30765 | extract_from_plain | train | def extract_from_plain(msg_body):
"""Extracts a non quoted message from provided plain text."""
stripped_text = msg_body
delimiter = get_delimiter(msg_body)
msg_body = preprocess(msg_body, delimiter)
# don't process too long messages
lines = msg_body.splitlines()[:MAX_LINES_COUNT]
markers = mark_message_lines(lines)
lines = process_marked_lines(lines, markers)
# concatenate lines, change links back, strip and return
msg_body = delimiter.join(lines)
msg_body = postprocess(msg_body)
return msg_body | python | {
"resource": ""
} |
q30766 | _mark_quoted_email_splitlines | train | def _mark_quoted_email_splitlines(markers, lines):
"""
When there are headers indented with '>' characters, this method will
attempt to identify if the header is a splitline header. If it is, then we
mark it with 's' instead of leaving it as 'm' and return the new markers.
"""
# Create a list of markers to easily alter specific characters
markerlist = list(markers)
for i, line in enumerate(lines):
if markerlist[i] != 'm':
continue
for pattern in SPLITTER_PATTERNS:
matcher = re.search(pattern, line)
if matcher:
markerlist[i] = 's'
break
return "".join(markerlist) | python | {
"resource": ""
} |
q30767 | _correct_splitlines_in_headers | train | def _correct_splitlines_in_headers(markers, lines):
"""
Corrects markers by removing splitlines deemed to be inside header blocks.
"""
updated_markers = ""
i = 0
in_header_block = False
for m in markers:
# Only set in_header_block flag when we hit an 's' and line is a header
if m == 's':
if not in_header_block:
if bool(re.search(RE_HEADER, lines[i])):
in_header_block = True
else:
if QUOT_PATTERN.match(lines[i]):
m = 'm'
else:
m = 't'
# If the line is not a header line, set in_header_block false.
if not bool(re.search(RE_HEADER, lines[i])):
in_header_block = False
# Add the marker to the new updated markers string.
updated_markers += m
i += 1
return updated_markers | python | {
"resource": ""
} |
q30768 | is_splitter | train | def is_splitter(line):
'''
Returns Matcher object if provided string is a splitter and
None otherwise.
'''
for pattern in SPLITTER_PATTERNS:
matcher = re.match(pattern, line)
if matcher:
return matcher | python | {
"resource": ""
} |
q30769 | is_signature_line | train | def is_signature_line(line, sender, classifier):
'''Checks if the line belongs to signature. Returns True or False.'''
data = numpy.array(build_pattern(line, features(sender))).reshape(1, -1)
return classifier.predict(data) > 0 | python | {
"resource": ""
} |
q30770 | extract | train | def extract(body, sender):
"""Strips signature from the body of the message.
Returns stripped body and signature as a tuple.
If no signature is found the corresponding returned value is None.
"""
try:
delimiter = get_delimiter(body)
body = body.strip()
if has_signature(body, sender):
lines = body.splitlines()
markers = _mark_lines(lines, sender)
text, signature = _process_marked_lines(lines, markers)
if signature:
text = delimiter.join(text)
if text.strip():
return (text, delimiter.join(signature))
except Exception as e:
log.exception('ERROR when extracting signature with classifiers')
return (body, None) | python | {
"resource": ""
} |
q30771 | _mark_lines | train | def _mark_lines(lines, sender):
"""Mark message lines with markers to distinguish signature lines.
Markers:
* e - empty line
* s - line identified as signature
* t - other i.e. ordinary text line
>>> mark_message_lines(['Some text', '', 'Bob'], 'Bob')
'tes'
"""
global EXTRACTOR
candidate = get_signature_candidate(lines)
# at first consider everything to be text no signature
markers = list('t' * len(lines))
# mark lines starting from bottom up
# mark only lines that belong to candidate
# no need to mark all lines of the message
for i, line in reversed(list(enumerate(candidate))):
# markers correspond to lines not candidate
# so we need to recalculate our index to be
# relative to lines not candidate
j = len(lines) - len(candidate) + i
if not line.strip():
markers[j] = 'e'
elif is_signature_line(line, sender, EXTRACTOR):
markers[j] = 's'
return "".join(markers) | python | {
"resource": ""
} |
q30772 | _process_marked_lines | train | def _process_marked_lines(lines, markers):
"""Run regexes against message's marked lines to strip signature.
>>> _process_marked_lines(['Some text', '', 'Bob'], 'tes')
(['Some text', ''], ['Bob'])
"""
# reverse lines and match signature pattern for reversed lines
signature = RE_REVERSE_SIGNATURE.match(markers[::-1])
if signature:
return (lines[:-signature.end()], lines[-signature.end():])
return (lines, None) | python | {
"resource": ""
} |
q30773 | train | train | def train(classifier, train_data_filename, save_classifier_filename=None):
"""Trains and saves classifier so that it could be easily loaded later."""
file_data = genfromtxt(train_data_filename, delimiter=",")
train_data, labels = file_data[:, :-1], file_data[:, -1]
classifier.fit(train_data, labels)
if save_classifier_filename:
joblib.dump(classifier, save_classifier_filename)
return classifier | python | {
"resource": ""
} |
q30774 | load | train | def load(saved_classifier_filename, train_data_filename):
"""Loads saved classifier. """
try:
return joblib.load(saved_classifier_filename)
except Exception:
import sys
if sys.version_info > (3, 0):
return load_compat(saved_classifier_filename)
raise | python | {
"resource": ""
} |
q30775 | parse_msg_sender | train | def parse_msg_sender(filename, sender_known=True):
"""Given a filename returns the sender and the message.
Here the message is assumed to be a whole MIME message or just
message body.
>>> sender, msg = parse_msg_sender('msg.eml')
>>> sender, msg = parse_msg_sender('msg_body')
If you don't want to consider the sender's name in your classification
algorithm:
>>> parse_msg_sender(filename, False)
"""
import sys
kwargs = {}
if sys.version_info > (3, 0):
kwargs["encoding"] = "utf8"
sender, msg = None, None
if os.path.isfile(filename) and not is_sender_filename(filename):
with open(filename, **kwargs) as f:
msg = f.read()
sender = u''
if sender_known:
sender_filename = build_sender_filename(filename)
if os.path.exists(sender_filename):
with open(sender_filename) as sender_file:
sender = sender_file.read().strip()
else:
# if sender isn't found then the next line fails
# and it is ok
lines = msg.splitlines()
for line in lines:
match = re.match('From:(.*)', line)
if match:
sender = match.group(1)
break
return (sender, msg) | python | {
"resource": ""
} |
q30776 | build_detection_class | train | def build_detection_class(folder, dataset_filename,
label, sender_known=True):
"""Builds signature detection class.
Signature detection dataset includes patterns for two classes:
* class for positive patterns (goes with label 1)
* class for negative patterns (goes with label -1)
The patterns are build of emails from `folder` and appended to
dataset file.
>>> build_signature_detection_class('emails/P', 'train.data', 1)
"""
with open(dataset_filename, 'a') as dataset:
for filename in os.listdir(folder):
filename = os.path.join(folder, filename)
sender, msg = parse_msg_sender(filename, sender_known)
if sender is None or msg is None:
continue
msg = re.sub('|'.join(ANNOTATIONS), '', msg)
X = build_pattern(msg, features(sender))
X.append(label)
labeled_pattern = ','.join([str(e) for e in X])
dataset.write(labeled_pattern + '\n') | python | {
"resource": ""
} |
q30777 | build_detection_dataset | train | def build_detection_dataset(folder, dataset_filename,
sender_known=True):
"""Builds signature detection dataset using emails from folder.
folder should have the following structure:
x-- folder
| x-- P
| | | -- positive sample email 1
| | | -- positive sample email 2
| | | -- ...
| x-- N
| | | -- negative sample email 1
| | | -- negative sample email 2
| | | -- ...
If the dataset file already exist it is rewritten.
"""
if os.path.exists(dataset_filename):
os.remove(dataset_filename)
build_detection_class(os.path.join(folder, u'P'),
dataset_filename, 1)
build_detection_class(os.path.join(folder, u'N'),
dataset_filename, -1) | python | {
"resource": ""
} |
q30778 | build_extraction_dataset | train | def build_extraction_dataset(folder, dataset_filename,
sender_known=True):
"""Builds signature extraction dataset using emails in the `folder`.
The emails in the `folder` should be annotated i.e. signature lines
should be marked with `#sig#`.
"""
if os.path.exists(dataset_filename):
os.remove(dataset_filename)
with open(dataset_filename, 'a') as dataset:
for filename in os.listdir(folder):
filename = os.path.join(folder, filename)
sender, msg = parse_msg_sender(filename, sender_known)
if not sender or not msg:
continue
lines = msg.splitlines()
for i in range(1, min(SIGNATURE_MAX_LINES,
len(lines)) + 1):
line = lines[-i]
label = -1
if line[:len(SIGNATURE_ANNOTATION)] == \
SIGNATURE_ANNOTATION:
label = 1
line = line[len(SIGNATURE_ANNOTATION):]
elif line[:len(REPLY_ANNOTATION)] == REPLY_ANNOTATION:
line = line[len(REPLY_ANNOTATION):]
X = build_pattern(line, features(sender))
X.append(label)
labeled_pattern = ','.join([str(e) for e in X])
dataset.write(labeled_pattern + '\n') | python | {
"resource": ""
} |
q30779 | add_checkpoint | train | def add_checkpoint(html_note, counter):
"""Recursively adds checkpoints to html tree.
"""
if html_note.text:
html_note.text = (html_note.text + CHECKPOINT_PREFIX +
str(counter) + CHECKPOINT_SUFFIX)
else:
html_note.text = (CHECKPOINT_PREFIX + str(counter) +
CHECKPOINT_SUFFIX)
counter += 1
for child in html_note.iterchildren():
counter = add_checkpoint(child, counter)
if html_note.tail:
html_note.tail = (html_note.tail + CHECKPOINT_PREFIX +
str(counter) + CHECKPOINT_SUFFIX)
else:
html_note.tail = (CHECKPOINT_PREFIX + str(counter) +
CHECKPOINT_SUFFIX)
counter += 1
return counter | python | {
"resource": ""
} |
q30780 | delete_quotation_tags | train | def delete_quotation_tags(html_note, counter, quotation_checkpoints):
"""Deletes tags with quotation checkpoints from html tree.
"""
tag_in_quotation = True
if quotation_checkpoints[counter]:
html_note.text = ''
else:
tag_in_quotation = False
counter += 1
quotation_children = [] # Children tags which are in quotation.
for child in html_note.iterchildren():
counter, child_tag_in_quotation = delete_quotation_tags(
child, counter,
quotation_checkpoints
)
if child_tag_in_quotation:
quotation_children.append(child)
if quotation_checkpoints[counter]:
html_note.tail = ''
else:
tag_in_quotation = False
counter += 1
if tag_in_quotation:
return counter, tag_in_quotation
else:
# Remove quotation children.
for child in quotation_children:
html_note.remove(child)
return counter, tag_in_quotation | python | {
"resource": ""
} |
q30781 | cut_gmail_quote | train | def cut_gmail_quote(html_message):
''' Cuts the outermost block element with class gmail_quote. '''
gmail_quote = cssselect('div.gmail_quote', html_message)
if gmail_quote and (gmail_quote[0].text is None or not RE_FWD.match(gmail_quote[0].text)):
gmail_quote[0].getparent().remove(gmail_quote[0])
return True | python | {
"resource": ""
} |
q30782 | cut_microsoft_quote | train | def cut_microsoft_quote(html_message):
''' Cuts splitter block and all following blocks. '''
#use EXSLT extensions to have a regex match() function with lxml
ns = {"re": "http://exslt.org/regular-expressions"}
#general pattern: @style='border:none;border-top:solid <color> 1.0pt;padding:3.0pt 0<unit> 0<unit> 0<unit>'
#outlook 2007, 2010 (international) <color=#B5C4DF> <unit=cm>
#outlook 2007, 2010 (american) <color=#B5C4DF> <unit=pt>
#outlook 2013 (international) <color=#E1E1E1> <unit=cm>
#outlook 2013 (american) <color=#E1E1E1> <unit=pt>
#also handles a variant with a space after the semicolon
splitter = html_message.xpath(
#outlook 2007, 2010, 2013 (international, american)
"//div[@style[re:match(., 'border:none; ?border-top:solid #(E1E1E1|B5C4DF) 1.0pt; ?"
"padding:3.0pt 0(in|cm) 0(in|cm) 0(in|cm)')]]|"
#windows mail
"//div[@style='padding-top: 5px; "
"border-top-color: rgb(229, 229, 229); "
"border-top-width: 1px; border-top-style: solid;']"
, namespaces=ns
)
if splitter:
splitter = splitter[0]
#outlook 2010
if splitter == splitter.getparent().getchildren()[0]:
splitter = splitter.getparent()
else:
#outlook 2003
splitter = html_message.xpath(
"//div"
"/div[@class='MsoNormal' and @align='center' "
"and @style='text-align:center']"
"/font"
"/span"
"/hr[@size='3' and @width='100%' and @align='center' "
"and @tabindex='-1']"
)
if len(splitter):
splitter = splitter[0]
splitter = splitter.getparent().getparent()
splitter = splitter.getparent().getparent()
if len(splitter):
parent = splitter.getparent()
after_splitter = splitter.getnext()
while after_splitter is not None:
parent.remove(after_splitter)
after_splitter = splitter.getnext()
parent.remove(splitter)
return True
return False | python | {
"resource": ""
} |
q30783 | cut_blockquote | train | def cut_blockquote(html_message):
''' Cuts the last non-nested blockquote with wrapping elements.'''
quote = html_message.xpath(
'(.//blockquote)'
'[not(@class="gmail_quote") and not(ancestor::blockquote)]'
'[last()]')
if quote:
quote = quote[0]
quote.getparent().remove(quote)
return True | python | {
"resource": ""
} |
q30784 | make_commkey | train | def make_commkey(key, session_id, ticks=50):
"""
take a password and session_id and scramble them to send to the machine.
copied from commpro.c - MakeKey
"""
key = int(key)
session_id = int(session_id)
k = 0
for i in range(32):
if (key & (1 << i)):
k = (k << 1 | 1)
else:
k = k << 1
k += session_id
k = pack(b'I', k)
k = unpack(b'BBBB', k)
k = pack(
b'BBBB',
k[0] ^ ord('Z'),
k[1] ^ ord('K'),
k[2] ^ ord('S'),
k[3] ^ ord('O'))
k = unpack(b'HH', k)
k = pack(b'HH', k[1], k[0])
B = 0xff & ticks
k = unpack(b'BBBB', k)
k = pack(
b'BBBB',
k[0] ^ B,
k[1] ^ B,
B,
k[3] ^ B)
return k | python | {
"resource": ""
} |
q30785 | ZK.__create_tcp_top | train | def __create_tcp_top(self, packet):
"""
witch the complete packet set top header
"""
length = len(packet)
top = pack('<HHI', const.MACHINE_PREPARE_DATA_1, const.MACHINE_PREPARE_DATA_2, length)
return top + packet | python | {
"resource": ""
} |
q30786 | ZK.__create_header | train | def __create_header(self, command, command_string, session_id, reply_id):
"""
Puts a the parts that make up a packet together and packs them into a byte string
"""
buf = pack('<4H', command, 0, session_id, reply_id) + command_string
buf = unpack('8B' + '%sB' % len(command_string), buf)
checksum = unpack('H', self.__create_checksum(buf))[0]
reply_id += 1
if reply_id >= const.USHRT_MAX:
reply_id -= const.USHRT_MAX
buf = pack('<4H', command, checksum, session_id, reply_id)
return buf + command_string | python | {
"resource": ""
} |
q30787 | ZK.__create_checksum | train | def __create_checksum(self, p):
"""
Calculates the checksum of the packet to be sent to the time clock
Copied from zkemsdk.c
"""
l = len(p)
checksum = 0
while l > 1:
checksum += unpack('H', pack('BB', p[0], p[1]))[0]
p = p[2:]
if checksum > const.USHRT_MAX:
checksum -= const.USHRT_MAX
l -= 2
if l:
checksum = checksum + p[-1]
while checksum > const.USHRT_MAX:
checksum -= const.USHRT_MAX
checksum = ~checksum
while checksum < 0:
checksum += const.USHRT_MAX
return pack('H', checksum) | python | {
"resource": ""
} |
q30788 | ZK.__send_command | train | def __send_command(self, command, command_string=b'', response_size=8):
"""
send command to the terminal
"""
if command not in [const.CMD_CONNECT, const.CMD_AUTH] and not self.is_connect:
raise ZKErrorConnection("instance are not connected.")
buf = self.__create_header(command, command_string, self.__session_id, self.__reply_id)
try:
if self.tcp:
top = self.__create_tcp_top(buf)
self.__sock.send(top)
self.__tcp_data_recv = self.__sock.recv(response_size + 8)
self.__tcp_length = self.__test_tcp_top(self.__tcp_data_recv)
if self.__tcp_length == 0:
raise ZKNetworkError("TCP packet invalid")
self.__header = unpack('<4H', self.__tcp_data_recv[8:16])
self.__data_recv = self.__tcp_data_recv[8:]
else:
self.__sock.sendto(buf, self.__address)
self.__data_recv = self.__sock.recv(response_size)
self.__header = unpack('<4H', self.__data_recv[:8])
except Exception as e:
raise ZKNetworkError(str(e))
self.__response = self.__header[0]
self.__reply_id = self.__header[3]
self.__data = self.__data_recv[8:]
if self.__response in [const.CMD_ACK_OK, const.CMD_PREPARE_DATA, const.CMD_DATA]:
return {
'status': True,
'code': self.__response
}
return {
'status': False,
'code': self.__response
} | python | {
"resource": ""
} |
q30789 | ZK.__ack_ok | train | def __ack_ok(self):
"""
event ack ok
"""
buf = self.__create_header(const.CMD_ACK_OK, b'', self.__session_id, const.USHRT_MAX - 1)
try:
if self.tcp:
top = self.__create_tcp_top(buf)
self.__sock.send(top)
else:
self.__sock.sendto(buf, self.__address)
except Exception as e:
raise ZKNetworkError(str(e)) | python | {
"resource": ""
} |
q30790 | ZK.__get_data_size | train | def __get_data_size(self):
"""
Checks a returned packet to see if it returned CMD_PREPARE_DATA,
indicating that data packets are to be sent
Returns the amount of bytes that are going to be sent
"""
response = self.__response
if response == const.CMD_PREPARE_DATA:
size = unpack('I', self.__data[:4])[0]
return size
else:
return 0 | python | {
"resource": ""
} |
q30791 | ZK.__decode_time | train | def __decode_time(self, t):
"""
Decode a timestamp retrieved from the timeclock
copied from zkemsdk.c - DecodeTime
"""
t = unpack("<I", t)[0]
second = t % 60
t = t // 60
minute = t % 60
t = t // 60
hour = t % 24
t = t // 24
day = t % 31 + 1
t = t // 31
month = t % 12 + 1
t = t // 12
year = t + 2000
d = datetime(year, month, day, hour, minute, second)
return d | python | {
"resource": ""
} |
q30792 | ZK.__decode_timehex | train | def __decode_timehex(self, timehex):
"""
timehex string of six bytes
"""
year, month, day, hour, minute, second = unpack("6B", timehex)
year += 2000
d = datetime(year, month, day, hour, minute, second)
return d | python | {
"resource": ""
} |
q30793 | ZK.__encode_time | train | def __encode_time(self, t):
"""
Encode a timestamp so that it can be read on the timeclock
"""
# formula taken from zkemsdk.c - EncodeTime
# can also be found in the technical manual
d = (
((t.year % 100) * 12 * 31 + ((t.month - 1) * 31) + t.day - 1) *
(24 * 60 * 60) + (t.hour * 60 + t.minute) * 60 + t.second
)
return d | python | {
"resource": ""
} |
q30794 | ZK.connect | train | def connect(self):
"""
connect to the device
:return: bool
"""
self.end_live_capture = False
if not self.ommit_ping and not self.helper.test_ping():
raise ZKNetworkError("can't reach device (ping %s)" % self.__address[0])
if not self.force_udp and self.helper.test_tcp() == 0:
self.user_packet_size = 72 # default zk8
self.__create_socket()
self.__session_id = 0
self.__reply_id = const.USHRT_MAX - 1
cmd_response = self.__send_command(const.CMD_CONNECT)
self.__session_id = self.__header[2]
if cmd_response.get('code') == const.CMD_ACK_UNAUTH:
if self.verbose: print ("try auth")
command_string = make_commkey(self.__password, self.__session_id)
cmd_response = self.__send_command(const.CMD_AUTH, command_string)
if cmd_response.get('status'):
self.is_connect = True
return self
else:
if cmd_response["code"] == const.CMD_ACK_UNAUTH:
raise ZKErrorResponse("Unauthenticated")
if self.verbose: print ("connect err response {} ".format(cmd_response["code"]))
raise ZKErrorResponse("Invalid response: Can't connect") | python | {
"resource": ""
} |
q30795 | ZK.disconnect | train | def disconnect(self):
"""
diconnect from the connected device
:return: bool
"""
cmd_response = self.__send_command(const.CMD_EXIT)
if cmd_response.get('status'):
self.is_connect = False
if self.__sock:
self.__sock.close()
return True
else:
raise ZKErrorResponse("can't disconnect") | python | {
"resource": ""
} |
q30796 | ZK.enable_device | train | def enable_device(self):
"""
re-enable the connected device and allow user activity in device again
:return: bool
"""
cmd_response = self.__send_command(const.CMD_ENABLEDEVICE)
if cmd_response.get('status'):
self.is_enabled = True
return True
else:
raise ZKErrorResponse("Can't enable device") | python | {
"resource": ""
} |
q30797 | ZK.get_device_name | train | def get_device_name(self):
"""
return the device name
:return: str
"""
command = const.CMD_OPTIONS_RRQ
command_string = b'~DeviceName\x00'
response_size = 1024
cmd_response = self.__send_command(command, command_string, response_size)
if cmd_response.get('status'):
device = self.__data.split(b'=', 1)[-1].split(b'\x00')[0]
return device.decode()
else:
return "" | python | {
"resource": ""
} |
q30798 | ZK.get_network_params | train | def get_network_params(self):
"""
get network params
"""
ip = self.__address[0]
mask = b''
gate = b''
cmd_response = self.__send_command(const.CMD_OPTIONS_RRQ, b'IPAddress\x00', 1024)
if cmd_response.get('status'):
ip = (self.__data.split(b'=', 1)[-1].split(b'\x00')[0])
cmd_response = self.__send_command(const.CMD_OPTIONS_RRQ, b'NetMask\x00', 1024)
if cmd_response.get('status'):
mask = (self.__data.split(b'=', 1)[-1].split(b'\x00')[0])
cmd_response = self.__send_command(const.CMD_OPTIONS_RRQ, b'GATEIPAddress\x00', 1024)
if cmd_response.get('status'):
gate = (self.__data.split(b'=', 1)[-1].split(b'\x00')[0])
return {'ip': ip.decode(), 'mask': mask.decode(), 'gateway': gate.decode()} | python | {
"resource": ""
} |
q30799 | ZK.read_sizes | train | def read_sizes(self):
"""
read the memory ussage
"""
command = const.CMD_GET_FREE_SIZES
response_size = 1024
cmd_response = self.__send_command(command,b'', response_size)
if cmd_response.get('status'):
if self.verbose: print(codecs.encode(self.__data,'hex'))
size = len(self.__data)
if len(self.__data) >= 80:
fields = unpack('20i', self.__data[:80])
self.users = fields[4]
self.fingers = fields[6]
self.records = fields[8]
self.dummy = fields[10] #???
self.cards = fields[12]
self.fingers_cap = fields[14]
self.users_cap = fields[15]
self.rec_cap = fields[16]
self.fingers_av = fields[17]
self.users_av = fields[18]
self.rec_av = fields[19]
self.__data = self.__data[80:]
if len(self.__data) >= 12: #face info
fields = unpack('3i', self.__data[:12]) #dirty hack! we need more information
self.faces = fields[0]
self.faces_cap = fields[2]
return True
else:
raise ZKErrorResponse("can't read sizes") | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.