repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
scalene | scalene-master/scalene/replacement_poll_selector.py | import selectors
import sys
import threading
import time
from typing import List, Optional, Tuple
from scalene.scalene_profiler import Scalene
@Scalene.shim
def replacement_poll_selector(scalene: Scalene) -> None:
"""
A replacement for selectors.PollSelector that
periodically wakes up to accept signals
"""
class ReplacementPollSelector(selectors.PollSelector):
def select(
self, timeout: Optional[float] = -1
) -> List[Tuple[selectors.SelectorKey, int]]:
tident = threading.get_ident()
start_time = time.perf_counter()
if not timeout or timeout < 0:
interval = sys.getswitchinterval()
else:
interval = min(timeout, sys.getswitchinterval())
while True:
scalene.set_thread_sleeping(tident)
selected = super().select(interval)
scalene.reset_thread_sleeping(tident)
if selected or timeout == 0:
return selected
end_time = time.perf_counter()
if timeout and timeout != -1:
if end_time - start_time >= timeout:
return [] # None
selectors.PollSelector = ReplacementPollSelector # type: ignore
| 1,299 | 32.333333 | 68 | py |
scalene | scalene-master/scalene/scalene_preload.py | import argparse
import contextlib
import os
import platform
import signal
import struct
import subprocess
import sys
from typing import Dict
import scalene
class ScalenePreload:
@staticmethod
def get_preload_environ(args: argparse.Namespace) -> Dict[str, str]:
env = dict()
# Set allocation sampling window (sync environment variable
# name with src/include/sampleheap.hpp).
env["SCALENE_ALLOCATION_SAMPLING_WINDOW"] = str(
args.allocation_sampling_window
)
# Set environment variables for loading the Scalene dynamic library,
# which interposes on allocation and copying functions.
if sys.platform == "linux":
if not args.cpu_only:
env["LD_PRELOAD"] = os.path.join(
scalene.__path__[0], "libscalene.so"
)
# Disable command-line specified PYTHONMALLOC.
if "PYTHONMALLOC" in env:
del env["PYTHONMALLOC"]
elif sys.platform == "darwin":
if not args.cpu_only:
env["DYLD_INSERT_LIBRARIES"] = os.path.join(
scalene.__path__[0], "libscalene.dylib"
)
# Disable command-line specified PYTHONMALLOC.
if "PYTHONMALLOC" in env:
del env["PYTHONMALLOC"]
# required for multiprocessing support, even without libscalene
env["OBJC_DISABLE_INITIALIZE_FORK_SAFETY"] = "YES"
elif sys.platform == "win32":
# Force CPU only on Windows for now.
args.cpu_only = True
return env
@staticmethod
def setup_preload(args: argparse.Namespace) -> bool:
"""
Ensures that Scalene runs with libscalene preloaded, if necessary,
as well as any other required environment variables.
Returns true iff we had to run another process.
"""
# First, check that we are on a supported platform.
# (x86-64 and ARM only for now.)
if not args.cpu_only and (
(
platform.machine() != "x86_64"
and platform.machine() != "AMD64"
and platform.machine() != "arm64"
and platform.machine() != "aarch64"
)
or struct.calcsize("P") * 8 != 64
):
args.cpu_only = True
print(
"Scalene warning: currently only 64-bit x86-64 and ARM platforms are supported for memory and copy profiling."
)
with contextlib.suppress(Exception):
from IPython import get_ipython
if get_ipython():
sys.exit = Scalene.clean_exit # type: ignore
sys._exit = Scalene.clean_exit # type: ignore
# Start a subprocess with the required environment variables,
# which may include preloading libscalene
req_env = ScalenePreload.get_preload_environ(args)
if not all(k_v in os.environ.items() for k_v in req_env.items()):
os.environ.update(req_env)
new_args = [
sys.executable,
"-m",
"scalene",
] + sys.argv[1:]
result = subprocess.Popen(new_args, close_fds=True, shell=False)
with contextlib.suppress(Exception):
# If running in the background, print the PID.
if os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()):
# In the background.
print(f"Scalene now profiling process {result.pid}")
print(
f" to disable profiling: python3 -m scalene.profile --off --pid {result.pid}"
)
print(
f" to resume profiling: python3 -m scalene.profile --on --pid {result.pid}"
)
try:
result.wait()
except KeyboardInterrupt:
result.returncode = 0
if result.returncode < 0:
print(
"Scalene error: received signal",
signal.Signals(-result.returncode).name,
)
sys.exit(result.returncode)
return True
return False
| 4,311 | 34.933333 | 126 | py |
scalene | scalene-master/scalene/runningstats.py | # Translated from C++ by Emery Berger from https://www.johndcook.com/blog/skewness_kurtosis/
import math
class RunningStats:
"""Incrementally compute statistics"""
def __init__(self) -> None:
self.clear()
def __add__(self: "RunningStats", other: "RunningStats") -> "RunningStats":
s = RunningStats()
if other._n > 0:
s._m1 = (self._m1 * self._n + other._m1 * other._n) / (
self._n + other._n
)
# TBD: Fix s._m2 and friends
# For now, leave at zero.
s._n = self._n + other._n
s._peak = max(self._peak, other._peak)
else:
s = self
return s
def clear(self) -> None:
"""Reset for new samples"""
self._n = 0
self._m1 = self._m2 = self._m3 = self._m4 = 0.0
self._peak = 0.0
def push(self, x: float) -> None:
"""Add a sample"""
if x > self._peak:
self._peak = x
n1 = self._n
self._n += 1
delta = x - self._m1
delta_n = delta / self._n
delta_n2 = delta_n * delta_n
term1 = delta * delta_n * n1
self._m1 += delta_n
self._m4 += (
term1 * delta_n2 * (self._n * self._n - 3 * self._n + 3)
+ 6 * delta_n2 * self._m2
- 4 * delta_n * self._m3
)
self._m3 += term1 * delta_n * (self._n - 2) - 3 * delta_n * self._m2
self._m2 += term1
def peak(self) -> float:
"""The maximum sample seen."""
return self._peak
def size(self) -> int:
"""The number of samples"""
return self._n
def mean(self) -> float:
"""Arithmetic mean, a.k.a. average"""
return self._m1
def var(self) -> float:
"""Variance"""
return self._m2 / (self._n - 1.0)
def std(self) -> float:
"""Standard deviation"""
return math.sqrt(self.var())
def sem(self) -> float:
"""Standard error of the mean"""
return self.std() / math.sqrt(self._n)
| 2,060 | 26.851351 | 92 | py |
scalene | scalene-master/scalene/replacement_get_context.py | import multiprocessing
from typing import Any
from scalene.scalene_profiler import Scalene
@Scalene.shim
def replacement_mp_get_context(scalene: Scalene) -> None:
old_get_context = multiprocessing.get_context
def replacement_get_context(method: Any = None) -> Any:
return old_get_context("fork")
multiprocessing.get_context = replacement_get_context
| 375 | 24.066667 | 59 | py |
scalene | scalene-master/scalene/scalene_mapfile.py | import mmap
import os
import sys
from typing import Any, NewType, TextIO
if sys.platform != "win32":
from scalene import get_line_atomic # type: ignore
Filename = NewType("Filename", str)
class ScaleneMapFile:
# Things that need to be in sync with the C++ side
# (see include/sampleheap.hpp, include/samplefile.hpp)
MAX_BUFSIZE = 256 # Must match SampleFile::MAX_BUFSIZE
def __init__(self, name: str) -> None:
self._name = name
self._buf = bytearray(ScaleneMapFile.MAX_BUFSIZE)
# file to communicate samples (+ PID)
self._signal_filename = Filename(
f"/tmp/scalene-{name}-signal{os.getpid()}"
)
self._lock_filename = Filename(
f"/tmp/scalene-{name}-lock{os.getpid()}"
)
self._init_filename = Filename(
f"/tmp/scalene-{name}-init{os.getpid()}"
)
self._signal_position = 0
self._lastpos = bytearray(8)
self._signal_mmap = None
self._lock_mmap: mmap.mmap
self._signal_fd: TextIO
self._lock_fd: TextIO
self._signal_fd = open(self._signal_filename, "r")
os.unlink(self._signal_fd.name)
self._lock_fd = open(self._lock_filename, "r+")
os.unlink(self._lock_fd.name)
self._signal_mmap = mmap.mmap(
self._signal_fd.fileno(),
0,
mmap.MAP_SHARED,
mmap.PROT_READ,
)
self._lock_mmap = mmap.mmap(
self._lock_fd.fileno(),
0,
mmap.MAP_SHARED,
mmap.PROT_READ | mmap.PROT_WRITE,
)
def close(self) -> None:
"""Close the map file."""
self._signal_fd.close()
self._lock_fd.close()
def cleanup(self) -> None:
"""Remove all map files."""
try:
os.remove(self._init_filename)
os.remove(self._signal_filename)
except FileNotFoundError:
pass
def read(self) -> Any:
"""Read a line from the map file."""
if sys.platform == "win32":
return False
if not self._signal_mmap:
return False
return get_line_atomic.get_line_atomic(
self._lock_mmap, self._signal_mmap, self._buf, self._lastpos
)
def get_str(self) -> str:
"""Get the string from the buffer."""
map_str = self._buf.rstrip(b"\x00").split(b"\n")[0].decode("ascii")
return map_str
| 2,457 | 28.614458 | 75 | py |
scalene | scalene-master/scalene/scalene_client_timer.py | from typing import Tuple
class ScaleneClientTimer:
"""
A class to wrap the logic of a timer running at
a different frequency than the Scalene timer. Can handle at most
one timer.
"""
seconds: float
interval: float
remaining_seconds: float
remaining_interval: float
delay_elapsed: bool
is_set: bool
def __init__(self) -> None:
self.seconds = 0.0
self.interval = 0.0
self.is_set = False
def set_itimer(self, seconds: float, interval: float) -> None:
self.seconds = seconds
self.interval = interval
self.remaining_seconds = seconds
self.remaining_interval = interval
self.delay_elapsed = False
self.is_set = True
def reset(self) -> None:
"""Reset the timer."""
self.seconds = 0.0
self.interval = 0.0
self.is_set = False
def get_itimer(self) -> Tuple[float, float]:
"""Returns a tuple of (seconds, interval)."""
return self.seconds, self.interval
def yield_next_delay(self, elapsed: float) -> Tuple[bool, float]:
"""
Updates remaining_interval or remaining_seconds, returning whether
the timer signal should be passed up to the client and
the next delay. If the second return <= 0, then
there is no interval and the delay has elapsed.
"""
if self.delay_elapsed:
self.remaining_interval -= elapsed
is_done = self.remaining_interval <= 0
if is_done:
self.remaining_interval = self.interval
return is_done, self.remaining_interval
self.remaining_seconds -= elapsed
is_done = self.remaining_seconds <= 0
if is_done:
self.delay_elapsed = True
return (
is_done,
self.remaining_interval if is_done else self.remaining_seconds,
)
| 1,909 | 28.384615 | 75 | py |
scalene | scalene-master/scalene/scalene_output.py | import random
import sys
import tempfile
from collections import OrderedDict, defaultdict
from operator import itemgetter
from pathlib import Path
from typing import Any, Callable, Dict, List, Union
from rich import box
from rich.console import Console
from rich.markdown import Markdown
from rich.syntax import Syntax
from rich.table import Table
from rich.text import Text
from scalene import sparkline
from scalene.scalene_json import ScaleneJSON
from scalene.scalene_statistics import Filename, LineNumber, ScaleneStatistics
from scalene.scalene_leak_analysis import ScaleneLeakAnalysis
from scalene.syntaxline import SyntaxLine
class ScaleneOutput:
# Maximum entries for sparklines, per file
max_sparkline_len_file = 27
# Maximum entries for sparklines, per line
max_sparkline_len_line = 9
# Threshold for highlighting lines of code in red.
highlight_percentage = 33
# Color for highlighted text (over the threshold of CPU time)
highlight_color = "bold red"
def __init__(self) -> None:
# where we write profile info
self.output_file = ""
# if we output HTML or not
self.html = False
# if we are on a GPU or not
self.gpu = False
# Profile output methods
def output_top_memory(
self, title: str, console: Console, mallocs: Dict[LineNumber, float]
) -> None:
# Print the top N lines by memory consumption, as long
# as they are above some threshold MB in size.
print_top_mallocs_count = 5
print_top_mallocs_threshold_mb = 1
if len(mallocs) > 0:
printed_header = False
number = 1
for malloc_lineno in mallocs:
# Don't print lines with less than the threshold MB allocated.
if mallocs[malloc_lineno] <= print_top_mallocs_threshold_mb:
break
# Only print the top N.
if number > print_top_mallocs_count:
break
# Print the header only if we are printing something (and only once).
if not printed_header:
console.print(title)
printed_header = True
output_str = f"({str(number)}) {malloc_lineno:5.0f}: {(mallocs[malloc_lineno]):5.0f} MB"
console.print(Markdown(output_str, style="dark_green"))
number += 1
def output_profile_line(
self,
json: ScaleneJSON,
fname: Filename,
line_no: LineNumber,
line: SyntaxLine,
console: Console,
tbl: Table,
stats: ScaleneStatistics,
profile_this_code: Callable[[Filename, LineNumber], bool],
force_print: bool = False,
suppress_lineno_print: bool = False,
is_function_summary: bool = False,
profile_memory: bool = False,
reduced_profile: bool = False,
) -> bool:
"""Print at most one line of the profile (true == printed one)."""
obj = json.output_profile_line(
fname=fname,
fname_print=fname,
line_no=line_no,
stats=stats,
profile_this_code=profile_this_code,
force_print=force_print,
)
if not obj:
return False
if -1 < obj["n_peak_mb"] < 1:
# Don't print out "-0" or anything below 1.
obj["n_peak_mb"] = 0
# Finally, print results.
n_cpu_percent_c_str: str = (
""
if obj["n_cpu_percent_c"] < 1
else f"{obj['n_cpu_percent_c']:5.0f}%"
)
n_gpu_percent_str: str = (
"" if obj["n_gpu_percent"] < 1 else f"{obj['n_gpu_percent']:3.0f}%"
)
n_cpu_percent_python_str: str = (
""
if obj["n_cpu_percent_python"] < 1
else f"{obj['n_cpu_percent_python']:5.0f}%"
)
n_growth_mem_str = ""
if obj["n_peak_mb"] < 1024:
n_growth_mem_str = (
""
if (not obj["n_peak_mb"] and not obj["n_usage_fraction"])
else f"{obj['n_peak_mb']:5.0f}M"
)
else:
n_growth_mem_str = (
""
if (not obj["n_peak_mb"] and not obj["n_usage_fraction"])
else f"{(obj['n_peak_mb'] / 1024):5.2f}G"
)
n_usage_fraction_str: str = (
""
if obj["n_usage_fraction"] < 0.01
else f"{(100 * obj['n_usage_fraction']):4.0f}%"
)
n_python_fraction_str: str = (
""
if obj["n_python_fraction"] < 0.01
else f"{(obj['n_python_fraction'] * 100):4.0f}%"
)
n_copy_mb_s_str: str = (
"" if obj["n_copy_mb_s"] < 0.5 else f"{obj['n_copy_mb_s']:6.0f}"
)
# Only report utilization where there is more than 1% CPU total usage.
sys_str: str = (
"" if obj["n_sys_percent"] < 1 else f"{obj['n_sys_percent']:4.0f}%"
)
if not is_function_summary:
print_line_no = "" if suppress_lineno_print else str(line_no)
else:
print_line_no = (
""
if fname not in stats.firstline_map
else str(stats.firstline_map[fname])
)
if profile_memory:
spark_str: str = ""
# Scale the sparkline by the usage fraction.
samples = obj["memory_samples"]
# Randomly downsample to ScaleneOutput.max_sparkline_len_line.
if len(samples) > ScaleneOutput.max_sparkline_len_line:
random_samples = sorted(
random.sample(
samples, ScaleneOutput.max_sparkline_len_line
)
)
else:
random_samples = samples
sparkline_samples = []
for i in range(0, len(random_samples)):
sparkline_samples.append(
random_samples[i][1] * obj["n_usage_fraction"]
)
if random_samples:
_, _, spark_str = sparkline.generate(
sparkline_samples, 0, stats.max_footprint
)
# Red highlight
ncpps: Any = ""
ncpcs: Any = ""
nufs: Any = ""
ngpus: Any = ""
if (
obj["n_usage_fraction"] >= self.highlight_percentage
or (
obj["n_cpu_percent_c"]
+ obj["n_cpu_percent_python"]
+ obj["n_gpu_percent"]
)
>= self.highlight_percentage
):
ncpps = Text.assemble(
(n_cpu_percent_python_str, self.highlight_color)
)
ncpcs = Text.assemble(
(n_cpu_percent_c_str, self.highlight_color)
)
nufs = Text.assemble(
(spark_str + n_usage_fraction_str, self.highlight_color)
)
ngpus = Text.assemble(
(n_gpu_percent_str, self.highlight_color)
)
else:
ncpps = n_cpu_percent_python_str
ncpcs = n_cpu_percent_c_str
ngpus = n_gpu_percent_str
nufs = spark_str + n_usage_fraction_str
if not reduced_profile or ncpps + ncpcs + nufs:
if self.gpu:
tbl.add_row(
print_line_no,
ncpps, # n_cpu_percent_python_str,
ncpcs, # n_cpu_percent_c_str,
sys_str,
ngpus,
n_python_fraction_str,
n_growth_mem_str,
nufs, # spark_str + n_usage_fraction_str,
n_copy_mb_s_str,
line,
)
else:
tbl.add_row(
print_line_no,
ncpps, # n_cpu_percent_python_str,
ncpcs, # n_cpu_percent_c_str,
sys_str,
n_python_fraction_str,
n_growth_mem_str,
nufs, # spark_str + n_usage_fraction_str,
n_copy_mb_s_str,
line,
)
return True
else:
return False
else:
# Red highlight
if (
obj["n_cpu_percent_c"]
+ obj["n_cpu_percent_python"]
+ obj["n_gpu_percent"]
) >= self.highlight_percentage:
ncpps = Text.assemble(
(n_cpu_percent_python_str, self.highlight_color)
)
ncpcs = Text.assemble(
(n_cpu_percent_c_str, self.highlight_color)
)
ngpus = Text.assemble(
(n_gpu_percent_str, self.highlight_color)
)
else:
ncpps = n_cpu_percent_python_str
ncpcs = n_cpu_percent_c_str
ngpus = n_gpu_percent_str
if not reduced_profile or ncpps + ncpcs:
if self.gpu:
tbl.add_row(
print_line_no,
ncpps, # n_cpu_percent_python_str,
ncpcs, # n_cpu_percent_c_str,
sys_str,
ngpus, # n_gpu_percent_str
line,
)
else:
tbl.add_row(
print_line_no,
ncpps, # n_cpu_percent_python_str,
ncpcs, # n_cpu_percent_c_str,
sys_str,
line,
)
return True
else:
return False
def output_profiles(
self,
column_width: int,
stats: ScaleneStatistics,
pid: int,
profile_this_code: Callable[[Filename, LineNumber], bool],
python_alias_dir: Path,
profile_memory: bool = True,
reduced_profile: bool = False,
) -> bool:
"""Write the profile out."""
# Get the children's stats, if any.
json = ScaleneJSON()
json.gpu = self.gpu
if not pid:
stats.merge_stats(python_alias_dir)
# If we've collected any samples, dump them.
if (
not stats.total_cpu_samples
and not stats.total_memory_malloc_samples
and not stats.total_memory_free_samples
):
# Nothing to output.
return False
# Collect all instrumented filenames.
all_instrumented_files: List[Filename] = list(
set(
list(stats.cpu_samples_python.keys())
+ list(stats.cpu_samples_c.keys())
+ list(stats.memory_free_samples.keys())
+ list(stats.memory_malloc_samples.keys())
)
)
if not all_instrumented_files:
# We didn't collect samples in source files.
return False
mem_usage_line: Union[Text, str] = ""
growth_rate = 0.0
if profile_memory:
samples = stats.memory_footprint_samples
if len(samples) > 0:
# Randomly downsample samples
if len(samples) > ScaleneOutput.max_sparkline_len_file:
random_samples = sorted(
random.sample(
samples, ScaleneOutput.max_sparkline_len_file
)
)
else:
random_samples = samples
sparkline_samples = [item[1] for item in random_samples]
# Output a sparkline as a summary of memory usage over time.
_, _, spark_str = sparkline.generate(
sparkline_samples[: ScaleneOutput.max_sparkline_len_file],
0,
stats.max_footprint,
)
# Compute growth rate (slope), between 0 and 1.
if stats.allocation_velocity[1] > 0:
growth_rate = (
100.0
* stats.allocation_velocity[0]
/ stats.allocation_velocity[1]
)
# If memory used is > 1GB, use GB as the unit.
if stats.max_footprint > 1024:
mem_usage_line = Text.assemble(
"Memory usage: ",
((spark_str, "dark_green")),
(
f" (max: {(stats.max_footprint / 1024):6.2f}GB, growth rate: {growth_rate:3.0f}%)\n"
),
)
else:
# Otherwise, use MB.
mem_usage_line = Text.assemble(
"Memory usage: ",
((spark_str, "dark_green")),
(
f" (max: {stats.max_footprint:6.2f}MB, growth rate: {growth_rate:3.0f}%)\n"
),
)
null = tempfile.TemporaryFile(mode="w+")
console = Console(
width=column_width,
record=True,
force_terminal=True,
file=null,
force_jupyter=False,
)
# Build a list of files we will actually report on.
report_files: List[Filename] = []
# Sort in descending order of CPU cycles, and then ascending order by filename
for fname in sorted(
all_instrumented_files,
key=lambda f: (-(stats.cpu_samples[f]), f),
):
fname = Filename(fname)
try:
percent_cpu_time = (
100 * stats.cpu_samples[fname] / stats.total_cpu_samples
)
except ZeroDivisionError:
percent_cpu_time = 0
# Ignore files responsible for less than some percent of execution time and fewer than a threshold # of mallocs.
if (
stats.malloc_samples[fname] < ScaleneJSON.malloc_threshold
and percent_cpu_time < ScaleneJSON.cpu_percent_threshold
):
continue
report_files.append(fname)
# Don't actually output the profile if we are a child process.
# Instead, write info to disk for the main process to collect.
if pid:
stats.output_stats(pid, python_alias_dir)
return True
if len(report_files) == 0:
return False
for fname in report_files:
# If the file was actually a Jupyter (IPython) cell,
# restore its name, as in "[12]".
fname_print = fname
import re
result = re.match("<ipython-input-([0-9]+)-.*>", fname_print)
if result:
fname_print = Filename("[" + result.group(1) + "]")
# Print header.
if not stats.total_cpu_samples:
percent_cpu_time = 0
else:
percent_cpu_time = (
100 * stats.cpu_samples[fname] / stats.total_cpu_samples
)
new_title = mem_usage_line + (
f"{fname_print}: % of time = {percent_cpu_time:6.2f} out of {stats.elapsed_time:6.2f}."
)
# Only display total memory usage once.
mem_usage_line = ""
tbl = Table(
box=box.MINIMAL_HEAVY_HEAD,
title=new_title,
collapse_padding=True,
width=column_width - 1,
)
tbl.add_column(
Markdown("Line", style="dim"),
style="dim",
justify="right",
no_wrap=True,
width=4,
)
tbl.add_column(
Markdown("Time " + "\n" + "_Python_", style="blue"),
style="blue",
no_wrap=True,
width=6,
)
tbl.add_column(
Markdown("–––––– \n_native_", style="blue"),
style="blue",
no_wrap=True,
width=6,
)
tbl.add_column(
Markdown("–––––– \n_system_", style="blue"),
style="blue",
no_wrap=True,
width=6,
)
if self.gpu:
tbl.add_column(
Markdown("–––––– \n_GPU_", style="yellow4"),
style="yellow4",
no_wrap=True,
width=6,
)
other_columns_width = 0 # Size taken up by all columns BUT code
if profile_memory:
tbl.add_column(
Markdown("Memory \n_Python_", style="dark_green"),
style="dark_green",
no_wrap=True,
width=7,
)
tbl.add_column(
Markdown("–––––– \n_peak_", style="dark_green"),
style="dark_green",
no_wrap=True,
width=6,
)
tbl.add_column(
Markdown(
"––––––––––– \n_timeline_/%", style="dark_green"
),
style="dark_green",
no_wrap=True,
width=15,
)
tbl.add_column(
Markdown("Copy \n_(MB/s)_", style="yellow4"),
style="yellow4",
no_wrap=True,
width=6,
)
other_columns_width = 75 + (6 if self.gpu else 0)
tbl.add_column(
"\n" + fname_print,
width=column_width - other_columns_width,
no_wrap=True,
)
else:
other_columns_width = 37 + (5 if self.gpu else 0)
tbl.add_column(
"\n" + fname_print,
width=column_width - other_columns_width,
no_wrap=True,
)
# Print out the the profile for the source, line by line.
if fname == "<BOGUS>":
continue
if not fname:
continue
# Print out the profile for the source, line by line.
with open(fname, "r", encoding="utf-8") as source_file:
# We track whether we should put in ellipsis (for reduced profiles)
# or not.
did_print = True # did we print a profile line last time?
code_lines = source_file.read()
# Generate syntax highlighted version for the whole file,
# which we will consume a line at a time.
# See https://github.com/willmcgugan/rich/discussions/965#discussioncomment-314233
syntax_highlighted = Syntax(
code_lines,
"python",
theme="default" if self.html else "vim",
line_numbers=False,
code_width=None,
)
capture_console = Console(
width=column_width - other_columns_width,
force_terminal=True,
)
formatted_lines = [
SyntaxLine(segments)
for segments in capture_console.render_lines(
syntax_highlighted
)
]
for line_no, line in enumerate(formatted_lines, start=1):
old_did_print = did_print
did_print = self.output_profile_line(
json=json,
fname=fname,
line_no=LineNumber(line_no),
line=line,
console=console,
tbl=tbl,
stats=stats,
profile_this_code=profile_this_code,
profile_memory=profile_memory,
force_print=False,
suppress_lineno_print=False,
is_function_summary=False,
reduced_profile=reduced_profile,
)
if old_did_print and not did_print:
# We are skipping lines, so add an ellipsis.
tbl.add_row("...")
old_did_print = did_print
# Potentially print a function summary.
fn_stats = stats.build_function_stats(fname)
print_fn_summary = False
# Check CPU samples and memory samples.
all_samples = set()
all_samples |= set(fn_stats.cpu_samples_python.keys())
all_samples |= set(fn_stats.cpu_samples_c.keys())
all_samples |= set(fn_stats.memory_malloc_samples.keys())
all_samples |= set(fn_stats.memory_free_samples.keys())
for fn_name in all_samples:
if fn_name == fname:
continue
print_fn_summary = True
break
if print_fn_summary:
try:
tbl.add_row(None, end_section=True)
except TypeError: # rich < 9.4.0 compatibility
tbl.add_row(None)
txt = Text.assemble(
f"function summary for {fname}", style="bold italic"
)
if profile_memory:
if self.gpu:
tbl.add_row("", "", "", "", "", "", "", "", "", txt)
else:
tbl.add_row("", "", "", "", "", "", "", "", txt)
else:
if self.gpu:
tbl.add_row("", "", "", "", "", txt)
else:
tbl.add_row("", "", "", "", txt)
for fn_name in sorted(
fn_stats.cpu_samples_python,
key=lambda k: stats.firstline_map[k],
):
if fn_name == fname:
continue
syntax_highlighted = Syntax(
fn_name,
"python",
theme="default" if self.html else "vim",
line_numbers=False,
code_width=None,
)
# force print, suppress line numbers
self.output_profile_line(
json=json,
fname=fn_name,
line_no=LineNumber(1),
line=syntax_highlighted, # type: ignore
console=console,
tbl=tbl,
stats=fn_stats,
profile_this_code=profile_this_code,
profile_memory=profile_memory,
force_print=True,
suppress_lineno_print=True,
is_function_summary=True,
reduced_profile=reduced_profile,
)
console.print(tbl)
# Compute AVERAGE memory consumption.
avg_mallocs: Dict[LineNumber, float] = defaultdict(float)
for line_no in stats.bytei_map[fname]:
n_malloc_mb = stats.memory_aggregate_footprint[fname][line_no]
count = stats.memory_malloc_count[fname][line_no]
if count:
avg_mallocs[line_no] = n_malloc_mb / count
else:
# Setting to n_malloc_mb addresses the edge case where this allocation is the last line executed.
avg_mallocs[line_no] = n_malloc_mb
avg_mallocs = OrderedDict(
sorted(avg_mallocs.items(), key=itemgetter(1), reverse=True)
)
# Compute (really, aggregate) PEAK memory consumption.
peak_mallocs: Dict[LineNumber, float] = defaultdict(float)
for line_no in stats.bytei_map[fname]:
peak_mallocs[line_no] = stats.memory_max_footprint[fname][
line_no
]
peak_mallocs = OrderedDict(
sorted(peak_mallocs.items(), key=itemgetter(1), reverse=True)
)
# Print the top N lines by AVERAGE memory consumption, as long
# as they are above some threshold MB in size.
self.output_top_memory(
"Top AVERAGE memory consumption, by line:",
console,
avg_mallocs,
)
# Print the top N lines by PEAK memory consumption, as long
# as they are above some threshold MB in size.
self.output_top_memory(
"Top PEAK memory consumption, by line:", console, peak_mallocs
)
# Only report potential leaks if the allocation velocity (growth rate) is above some threshold.
leaks = ScaleneLeakAnalysis.compute_leaks(
growth_rate, stats, avg_mallocs, fname
)
if len(leaks) > 0:
# Report in descending order by least likelihood
for leak in sorted(leaks, key=itemgetter(1), reverse=True):
output_str = f"Possible memory leak identified at line {str(leak[0])} (estimated likelihood: {(leak[1] * 100):3.0f}%, velocity: {(leak[2] / stats.elapsed_time):3.0f} MB/s)"
console.print(output_str)
if self.html:
# Write HTML file.
md = Markdown(
"generated by the [scalene](https://github.com/plasma-umass/scalene) profiler"
)
console.print(md)
if not self.output_file:
self.output_file = "/dev/stdout"
console.save_html(self.output_file, clear=False)
else:
if not self.output_file:
# No output file specified: write to stdout.
sys.stdout.write(console.export_text(styles=True))
else:
# Don't output styles to text file.
console.save_text(self.output_file, styles=False, clear=False)
return True
| 26,914 | 36.908451 | 192 | py |
scalene | scalene-master/scalene/scalene_statistics.py | import os
import pathlib
import pickle
import time
from collections import defaultdict
from typing import Any, DefaultDict, Dict, List, NewType, Set, Tuple, TypeVar
import cloudpickle
# from scalene.adaptive import Adaptive
from scalene.runningstats import RunningStats
Address = NewType("Address", str)
Filename = NewType("Filename", str)
LineNumber = NewType("LineNumber", int)
ByteCodeIndex = NewType("ByteCodeIndex", int)
T = TypeVar("T")
class ScaleneStatistics:
# Statistics counters:
#
def __init__(self) -> None:
# time the profiling started
self.start_time: float = 0
# total time spent in program being profiled
self.elapsed_time: float = 0
# CPU samples for each location in the program
# spent in the interpreter
self.cpu_samples_python: Dict[
Filename, Dict[LineNumber, float]
] = defaultdict(lambda: defaultdict(float))
# CPU samples for each location in the program
# spent in C / libraries / system calls
self.cpu_samples_c: Dict[
Filename, Dict[LineNumber, float]
] = defaultdict(lambda: defaultdict(float))
# GPU samples for each location in the program
self.gpu_samples: Dict[
Filename, Dict[LineNumber, float]
] = defaultdict(lambda: defaultdict(float))
# GPU memory samples for each location in the program
self.gpu_mem_samples: DefaultDict[
Filename, DefaultDict[LineNumber, RunningStats]
] = defaultdict(lambda: defaultdict(RunningStats))
# Running stats for the fraction of time running on the CPU.
self.cpu_utilization: Dict[
Filename, Dict[LineNumber, RunningStats]
] = defaultdict(lambda: defaultdict(RunningStats))
# Running count of total CPU samples per file. Used to prune reporting.
self.cpu_samples: Dict[Filename, float] = defaultdict(float)
# Running count of malloc samples per file. Used to prune reporting.
self.malloc_samples: Dict[Filename, float] = defaultdict(float)
# malloc samples for each location in the program
self.memory_malloc_samples: Dict[
Filename, Dict[LineNumber, float]
] = defaultdict(lambda: defaultdict(float))
# number of times samples were added for the above
self.memory_malloc_count: Dict[
Filename, Dict[LineNumber, int]
] = defaultdict(lambda: defaultdict(int))
# the current footprint for this line
self.memory_current_footprint: Dict[
Filename, Dict[LineNumber, float]
] = defaultdict(lambda: defaultdict(float))
# the max footprint for this line
self.memory_max_footprint: Dict[
Filename, Dict[LineNumber, float]
] = defaultdict(lambda: defaultdict(float))
# the current high watermark for this line
self.memory_current_highwater_mark: Dict[
Filename, Dict[LineNumber, float]
] = defaultdict(lambda: defaultdict(float))
# the aggregate footprint for this line (sum of all final "current"s)
self.memory_aggregate_footprint: Dict[
Filename, Dict[LineNumber, float]
] = defaultdict(lambda: defaultdict(float))
# the last malloc to trigger a sample (used for leak detection)
self.last_malloc_triggered: Tuple[Filename, LineNumber, Address] = (
Filename(""),
LineNumber(0),
Address("0x0"),
)
# mallocs attributable to Python, for each location in the program
self.memory_python_samples: Dict[
Filename, Dict[LineNumber, float]
] = defaultdict(lambda: defaultdict(float))
# free samples for each location in the program
self.memory_free_samples: Dict[
Filename, Dict[LineNumber, float]
] = defaultdict(lambda: defaultdict(float))
# number of times samples were added for the above
self.memory_free_count: Dict[
Filename, Dict[LineNumber, int]
] = defaultdict(lambda: defaultdict(int))
# memcpy samples for each location in the program
self.memcpy_samples: Dict[
Filename, Dict[LineNumber, int]
] = defaultdict(lambda: defaultdict(int))
# leak score tracking
self.leak_score: Dict[
Filename, Dict[LineNumber, Tuple[int, int]]
] = defaultdict(lambda: defaultdict(lambda: ((0, 0))))
self.allocation_velocity: Tuple[float, float] = (0.0, 0.0)
# how many CPU samples have been collected
self.total_cpu_samples: float = 0.0
# how many GPU samples have been collected
self.total_gpu_samples: float = 0.0
# " " malloc " " " "
self.total_memory_malloc_samples: float = 0.0
# " " free " " " "
self.total_memory_free_samples: float = 0.0
# the current memory footprint
self.current_footprint: float = 0.0
# the peak memory footprint
self.max_footprint: float = 0.0
# memory footprint samples (time, footprint)
self.memory_footprint_samples: List[List[float]] = []
# same, but per line
self.per_line_footprint_samples: Dict[
Filename, Dict[LineNumber, List[Any]]
] = defaultdict(lambda: defaultdict(list))
# maps byte indices to line numbers (collected at runtime)
# [filename][lineno] -> set(byteindex)
self.bytei_map: Dict[
Filename, Dict[LineNumber, Set[ByteCodeIndex]]
] = defaultdict(lambda: defaultdict(lambda: set()))
# maps filenames and line numbers to functions (collected at runtime)
# [filename][lineno] -> function name
self.function_map: Dict[
Filename, Dict[LineNumber, Filename]
] = defaultdict(lambda: defaultdict(lambda: Filename("")))
self.firstline_map: Dict[Filename, LineNumber] = defaultdict(
lambda: LineNumber(1)
)
def clear(self) -> None:
self.start_time = 0
self.elapsed_time = 0
self.cpu_samples_python.clear()
self.cpu_samples_c.clear()
self.cpu_utilization.clear()
self.cpu_samples.clear()
self.gpu_samples.clear()
self.malloc_samples.clear()
self.memory_malloc_samples.clear()
self.memory_malloc_count.clear()
self.memory_current_footprint.clear()
self.memory_max_footprint.clear()
self.memory_current_highwater_mark.clear()
self.memory_aggregate_footprint.clear()
self.memory_python_samples.clear()
self.memory_free_samples.clear()
self.memory_free_count.clear()
self.memcpy_samples.clear()
self.total_cpu_samples = 0.0
self.total_gpu_samples = 0.0
self.total_memory_malloc_samples = 0.0
self.total_memory_free_samples = 0.0
self.current_footprint = 0.0
self.leak_score.clear()
self.last_malloc_triggered = (
Filename(""),
LineNumber(0),
Address("0x0"),
)
self.allocation_velocity = (0.0, 0.0)
self.per_line_footprint_samples.clear()
self.bytei_map.clear()
# Not clearing current footprint
# Not clearing max footprint
# FIXME: leak score, leak velocity
def clear_all(self) -> None:
self.clear()
self.current_footprint = 0
self.max_footprint = 0
self.per_line_footprint_samples.clear()
def start_clock(self) -> None:
self.start_time = time.time()
def stop_clock(self) -> None:
if self.start_time > 0:
self.elapsed_time += time.time() - self.start_time
self.start_time = 0
def build_function_stats(self, filename: Filename): # type: ignore
fn_stats = ScaleneStatistics()
fn_stats.elapsed_time = self.elapsed_time
fn_stats.total_cpu_samples = self.total_cpu_samples
fn_stats.total_gpu_samples = self.total_gpu_samples
fn_stats.total_memory_malloc_samples = self.total_memory_malloc_samples
first_line_no = LineNumber(1)
fn_stats.function_map = self.function_map
fn_stats.firstline_map = self.firstline_map
for line_no in self.function_map[filename]:
fn_name = self.function_map[filename][line_no]
if fn_name == "<module>":
continue
fn_stats.cpu_samples_c[fn_name][
first_line_no
] += self.cpu_samples_c[filename][line_no]
fn_stats.cpu_samples_python[fn_name][
first_line_no
] += self.cpu_samples_python[filename][line_no]
fn_stats.gpu_samples[fn_name][first_line_no] += self.gpu_samples[
filename
][line_no]
fn_stats.gpu_mem_samples[fn_name][
first_line_no
] += self.gpu_mem_samples[filename][line_no]
fn_stats.cpu_utilization[fn_name][
first_line_no
] += self.cpu_utilization[filename][line_no]
fn_stats.per_line_footprint_samples[fn_name][
first_line_no
] += self.per_line_footprint_samples[filename][line_no]
fn_stats.memory_malloc_count[fn_name][
first_line_no
] += self.memory_malloc_count[filename][line_no]
fn_stats.memory_free_count[fn_name][
first_line_no
] += self.memory_free_count[filename][line_no]
fn_stats.memory_malloc_samples[fn_name][
first_line_no
] += self.memory_malloc_samples[filename][line_no]
fn_stats.memory_python_samples[fn_name][
first_line_no
] += self.memory_python_samples[filename][line_no]
fn_stats.memory_free_samples[fn_name][
first_line_no
] += self.memory_free_samples[filename][line_no]
for index in self.bytei_map[filename][line_no]:
fn_stats.bytei_map[fn_name][first_line_no].add(
ByteCodeIndex(0)
)
fn_stats.memcpy_samples[fn_name][
first_line_no
] += self.memcpy_samples[filename][line_no]
fn_stats.leak_score[fn_name][first_line_no] = (
fn_stats.leak_score[fn_name][first_line_no][0]
+ self.leak_score[filename][line_no][0],
fn_stats.leak_score[fn_name][first_line_no][1]
+ self.leak_score[filename][line_no][1],
)
fn_stats.memory_max_footprint[fn_name][first_line_no] = max(
fn_stats.memory_max_footprint[fn_name][first_line_no],
self.memory_max_footprint[filename][line_no],
)
fn_stats.memory_aggregate_footprint[fn_name][
first_line_no
] += self.memory_aggregate_footprint[filename][line_no]
return fn_stats
payload_contents = [
"max_footprint",
"current_footprint",
"elapsed_time",
"total_cpu_samples",
"cpu_samples_c",
"cpu_samples_python",
"bytei_map",
"cpu_samples",
"cpu_utilization",
"memory_malloc_samples",
"memory_python_samples",
"memory_free_samples",
"memcpy_samples",
"memory_max_footprint",
"per_line_footprint_samples",
"total_memory_free_samples",
"total_memory_malloc_samples",
"memory_footprint_samples",
"function_map",
"firstline_map",
"gpu_samples",
"total_gpu_samples",
"memory_malloc_count",
"memory_free_count",
]
# To be added: __malloc_samples
def output_stats(self, pid: int, dir_name: pathlib.Path) -> None:
"""Output statistics for a particular process to a given directory."""
payload: List[Any] = []
for n in ScaleneStatistics.payload_contents:
payload.append(getattr(self, n))
# Create a file in the Python alias directory with the relevant info.
out_filename = os.path.join(
dir_name,
"scalene" + str(pid) + "-" + str(os.getpid()),
)
with open(out_filename, "wb") as out_file:
cloudpickle.dump(payload, out_file)
@staticmethod
def increment_per_line_samples(
dest: Dict[Filename, Dict[LineNumber, T]],
src: Dict[Filename, Dict[LineNumber, T]],
) -> None:
"""Increment single-line dest samples by their value in src."""
for filename in src:
for lineno in src[filename]:
v = src[filename][lineno]
dest[filename][lineno] += v # type: ignore
@staticmethod
def increment_cpu_utilization(
dest: Dict[Filename, Dict[LineNumber, RunningStats]],
src: Dict[Filename, Dict[LineNumber, RunningStats]],
) -> None:
for filename in src:
for lineno in src[filename]:
dest[filename][lineno] += src[filename][lineno]
def merge_stats(self, the_dir_name: pathlib.Path) -> None:
"""Merge all statistics in a given directory."""
the_dir = pathlib.Path(the_dir_name)
for f in list(the_dir.glob("**/scalene*")):
# Skip empty files.
if os.path.getsize(f) == 0:
continue
with open(f, "rb") as file:
unpickler = pickle.Unpickler(file)
value = unpickler.load()
x = ScaleneStatistics()
for i, n in enumerate(ScaleneStatistics.payload_contents):
setattr(x, n, value[i])
self.max_footprint = max(self.max_footprint, x.max_footprint)
self.current_footprint = max(
self.current_footprint, x.current_footprint
)
self.increment_cpu_utilization(
self.cpu_utilization, x.cpu_utilization
)
self.elapsed_time = max(self.elapsed_time, x.elapsed_time)
self.total_cpu_samples += x.total_cpu_samples
self.total_gpu_samples += x.total_gpu_samples
self.increment_per_line_samples(
self.cpu_samples_c, x.cpu_samples_c
)
self.increment_per_line_samples(
self.cpu_samples_python, x.cpu_samples_python
)
self.increment_per_line_samples(
self.gpu_samples, x.gpu_samples
)
self.increment_per_line_samples(
self.memcpy_samples, x.memcpy_samples
)
self.increment_per_line_samples(
self.per_line_footprint_samples,
x.per_line_footprint_samples,
)
self.increment_per_line_samples(
self.memory_malloc_count, x.memory_malloc_count
)
self.increment_per_line_samples(
self.memory_malloc_samples, x.memory_malloc_samples
)
self.increment_per_line_samples(
self.memory_python_samples, x.memory_python_samples
)
self.increment_per_line_samples(
self.memory_free_samples, x.memory_free_samples
)
self.increment_per_line_samples(
self.memory_free_count, x.memory_free_count
)
for filename in x.bytei_map:
for lineno in x.bytei_map[filename]:
v = x.bytei_map[filename][lineno]
self.bytei_map[filename][lineno] |= v
self.memory_max_footprint[filename][lineno] = max(
self.memory_max_footprint[filename][lineno],
x.memory_max_footprint[filename][lineno],
)
for filename in x.cpu_samples:
self.cpu_samples[filename] += x.cpu_samples[filename]
self.total_memory_free_samples += x.total_memory_free_samples
self.total_memory_malloc_samples += (
x.total_memory_malloc_samples
)
self.memory_footprint_samples += x.memory_footprint_samples
for k, val in x.function_map.items():
if k in self.function_map:
self.function_map[k].update(val)
else:
self.function_map[k] = val
self.firstline_map.update(x.firstline_map)
os.remove(f)
| 16,816 | 38.569412 | 79 | py |
scalene | scalene-master/scalene/adaptive.py | from typing import List
class Adaptive:
"""Implements sampling to achieve the effect of a uniform random sample."""
def __init__(self, size: int):
# size must be a power of two
self.max_samples = size
self.current_index = 0
self.sample_array = [0.0] * size
def __add__(self: "Adaptive", other: "Adaptive") -> "Adaptive":
n = Adaptive(self.max_samples)
for i in range(0, self.max_samples):
n.sample_array[i] = self.sample_array[i] + other.sample_array[i]
n.current_index = max(self.current_index, other.current_index)
return n
def __iadd__(self: "Adaptive", other: "Adaptive") -> "Adaptive":
for i in range(0, self.max_samples):
self.sample_array[i] += other.sample_array[i]
self.current_index = max(self.current_index, other.current_index)
return self
def add(self, value: float) -> None:
if self.current_index >= self.max_samples:
# Decimate
new_array = [0.0] * self.max_samples
for i in range(0, self.max_samples // 3):
arr = [self.sample_array[i * 3 + j] for j in range(0, 3)]
arr.sort()
new_array[i] = arr[1] # Median
self.current_index = self.max_samples // 3
self.sample_array = new_array
self.sample_array[self.current_index] = value
self.current_index += 1
def get(self) -> List[float]:
return self.sample_array
def len(self) -> int:
return self.current_index
| 1,565 | 34.590909 | 79 | py |
scalene | scalene-master/scalene/profile.py | import argparse
import os
import sys
from textwrap import dedent
from scalene.scalene_signals import ScaleneSignals
usage = dedent("""Turn Scalene profiling on or off for a specific process.""")
parser = argparse.ArgumentParser(
prog="scalene.profile",
description=usage,
formatter_class=argparse.RawTextHelpFormatter,
allow_abbrev=False,
)
parser.add_argument(
"--pid", dest="pid", type=int, default=0, help="process ID"
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--on", action="store_true", help="turn profiling on")
group.add_argument("--off", action="store_false", help="turn profiling off")
args, left = parser.parse_known_args()
if len(sys.argv) == 1 or args.pid == 0:
parser.print_help(sys.stderr)
sys.exit(-1)
try:
if args.on:
os.kill(args.pid, ScaleneSignals().start_profiling_signal)
print("Scalene: profiling turned on.")
else:
os.kill(args.pid, ScaleneSignals().stop_profiling_signal)
print("Scalene: profiling turned off.")
except ProcessLookupError:
print("Process " + str(args.pid) + " not found.")
| 1,130 | 28.763158 | 78 | py |
scalene | scalene-master/scalene/scalene_json.py | import copy
import linecache
import random
import re
from collections import OrderedDict, defaultdict
from operator import itemgetter
from pathlib import Path
from typing import Any, Callable, Dict, List
from scalene.scalene_leak_analysis import ScaleneLeakAnalysis
from scalene.scalene_statistics import Filename, LineNumber, ScaleneStatistics
class ScaleneJSON:
# Default threshold for percent of CPU time to report a file.
cpu_percent_threshold = 1
# Default threshold for number of mallocs to report a file.
malloc_threshold = 1 # 100
# Fraction of the maximum footprint to use as granularity for memory timelines
# (used for compression). E.g., 10 => 1/10th of the max.
memory_granularity_fraction = 10
# Maximum number of sparkline samples.
max_sparkline_samples = 100
def __init__(self) -> None:
# where we write profile info
self.output_file = ""
# if we are on a GPU or not
self.gpu = False
def compress_samples(
self, uncompressed_samples: List[Any], max_footprint: float
) -> List[Any]:
# Compress the samples so that the granularity is at least
# a certain fraction of the maximum footprint.
samples = []
granularity = max_footprint / self.memory_granularity_fraction
last_mem = 0
for (t, mem) in uncompressed_samples:
if abs(mem - last_mem) >= granularity:
# We're above the granularity.
# Force all memory amounts to be positive.
mem = max(0, mem)
# Add a tiny bit of random noise to force different values (for the GUI).
mem += abs(random.gauss(0.01, 0.01))
# Now we append it and set the last amount to be the
# current footprint.
samples.append([t, mem])
last_mem = mem
if len(samples) > self.max_sparkline_samples:
# Too many samples. We randomly downsample.
samples = sorted(
random.sample(samples, self.max_sparkline_samples)
)
return samples
# Profile output methods
def output_profile_line(
self,
*,
fname: Filename,
fname_print: Filename,
line_no: LineNumber,
stats: ScaleneStatistics,
profile_this_code: Callable[[Filename, LineNumber], bool],
profile_memory: bool = False,
force_print: bool = False,
) -> Dict[str, Any]:
"""Print at most one line of the profile (true == printed one)."""
if not force_print and not profile_this_code(fname, line_no):
return {}
# Prepare output values.
n_cpu_samples_c = stats.cpu_samples_c[fname][line_no]
# Correct for negative CPU sample counts. This can happen
# because of floating point inaccuracies, since we perform
# subtraction to compute it.
n_cpu_samples_c = max(0, n_cpu_samples_c)
n_cpu_samples_python = stats.cpu_samples_python[fname][line_no]
n_gpu_samples = stats.gpu_samples[fname][line_no]
n_gpu_mem_samples = stats.gpu_mem_samples[fname][line_no]
# Compute percentages of CPU time.
if stats.total_cpu_samples:
n_cpu_percent_c = n_cpu_samples_c * 100 / stats.total_cpu_samples
n_cpu_percent_python = (
n_cpu_samples_python * 100 / stats.total_cpu_samples
)
else:
n_cpu_percent_c = 0
n_cpu_percent_python = 0
if stats.total_gpu_samples:
n_gpu_percent = n_gpu_samples * 100 / stats.total_gpu_samples
else:
n_gpu_percent = 0
# Now, memory stats.
# Total volume of memory allocated.
n_malloc_mb = stats.memory_malloc_samples[fname][line_no]
# Number of distinct allocation calls (those from the same line are counted as 1).
n_mallocs = stats.memory_malloc_count[fname][line_no]
# Total volume of memory allocated by Python (not native code).
n_python_malloc_mb = stats.memory_python_samples[fname][line_no]
n_usage_fraction = (
0
if not stats.total_memory_malloc_samples
else n_malloc_mb / stats.total_memory_malloc_samples
)
n_python_fraction = (
0
if not n_malloc_mb
else n_python_malloc_mb / stats.total_memory_malloc_samples
)
# Average memory consumed by this line.
n_avg_mb = (
stats.memory_aggregate_footprint[fname][line_no]
if n_mallocs == 0
else stats.memory_aggregate_footprint[fname][line_no] / n_mallocs
)
# Peak memory consumed by this line.
n_peak_mb = stats.memory_max_footprint[fname][line_no]
# Force the reporting of average to be no more than peak.
# In principle, this should never happen, but...
# assert n_avg_mb <= n_peak_mb
if n_avg_mb > n_peak_mb:
n_avg_mb = n_peak_mb
n_cpu_percent = n_cpu_percent_c + n_cpu_percent_python
# Adjust CPU time by utilization.
mean_cpu_util = stats.cpu_utilization[fname][line_no].mean()
n_sys_percent = n_cpu_percent * (1.0 - mean_cpu_util)
n_cpu_percent_python *= mean_cpu_util
n_cpu_percent_c *= mean_cpu_util
del mean_cpu_util
n_copy_b = stats.memcpy_samples[fname][line_no]
if stats.elapsed_time:
n_copy_mb_s = n_copy_b / (1024 * 1024 * stats.elapsed_time)
else:
n_copy_mb_s = 0
samples = self.compress_samples(
stats.per_line_footprint_samples[fname][line_no],
stats.max_footprint,
)
return {
"lineno": line_no,
"line": linecache.getline(fname, line_no),
"n_cpu_percent_c": n_cpu_percent_c,
"n_cpu_percent_python": n_cpu_percent_python,
"n_sys_percent": n_sys_percent,
"n_gpu_percent": n_gpu_percent,
"n_gpu_avg_memory_mb": n_gpu_mem_samples.mean() / 1048576,
"n_gpu_peak_memory_mb": n_gpu_mem_samples.peak() / 1048576,
"n_peak_mb": n_peak_mb,
"n_growth_mb": n_peak_mb, # For backwards compatibility
"n_avg_mb": n_avg_mb,
"n_mallocs": n_mallocs,
"n_malloc_mb": n_malloc_mb,
"n_usage_fraction": n_usage_fraction,
"n_python_fraction": n_python_fraction,
"n_copy_mb_s": n_copy_mb_s,
"memory_samples": samples,
}
def output_profiles(
self,
program: Filename,
stats: ScaleneStatistics,
pid: int,
profile_this_code: Callable[[Filename, LineNumber], bool],
python_alias_dir: Path,
profile_memory: bool = True,
) -> Dict[str, Any]:
"""Write the profile out."""
# Get the children's stats, if any.
if not pid:
stats.merge_stats(python_alias_dir)
# If we've collected any samples, dump them.
if (
not stats.total_cpu_samples
and not stats.total_memory_malloc_samples
and not stats.total_memory_free_samples
):
# Nothing to output.
return {}
# Collect all instrumented filenames.
all_instrumented_files: List[Filename] = list(
set(
list(stats.cpu_samples_python.keys())
+ list(stats.cpu_samples_c.keys())
+ list(stats.memory_free_samples.keys())
+ list(stats.memory_malloc_samples.keys())
)
)
if not all_instrumented_files:
# We didn't collect samples in source files.
return {}
growth_rate = 0.0
if profile_memory:
samples = self.compress_samples(
stats.memory_footprint_samples, stats.max_footprint
)
# Compute growth rate (slope), between 0 and 1.
if stats.allocation_velocity[1] > 0:
growth_rate = (
100.0
* stats.allocation_velocity[0]
/ stats.allocation_velocity[1]
)
else:
samples = []
output: Dict[str, Any] = {
"program": program,
"elapsed_time_sec": stats.elapsed_time,
"growth_rate": growth_rate,
"max_footprint_mb": stats.max_footprint,
"files": {},
"gpu": self.gpu,
"memory": profile_memory,
"samples": samples,
}
# Build a list of files we will actually report on.
report_files: List[Filename] = []
# Sort in descending order of CPU cycles, and then ascending order by filename
for fname in sorted(
all_instrumented_files,
key=lambda f: (-(stats.cpu_samples[f]), f),
):
fname = Filename(fname)
try:
percent_cpu_time = (
100 * stats.cpu_samples[fname] / stats.total_cpu_samples
)
except ZeroDivisionError:
percent_cpu_time = 0
# Ignore files responsible for less than some percent of execution time and fewer than a threshold # of mallocs.
if (
stats.malloc_samples[fname] < self.malloc_threshold
and percent_cpu_time < self.cpu_percent_threshold
):
continue
report_files.append(fname)
# Don't actually output the profile if we are a child process.
# Instead, write info to disk for the main process to collect.
if pid:
stats.output_stats(pid, python_alias_dir)
return {}
if len(report_files) == 0:
return {}
for fname in report_files:
# If the file was actually a Jupyter (IPython) cell,
# restore its name, as in "[12]".
fname_print = fname
result = re.match("<ipython-input-([0-9]+)-.*>", fname_print)
if result:
fname_print = Filename("[" + result.group(1) + "]")
# Leak analysis
# First, compute AVERAGE memory consumption.
avg_mallocs: Dict[LineNumber, float] = defaultdict(float)
for line_no in stats.bytei_map[fname]:
n_malloc_mb = stats.memory_aggregate_footprint[fname][line_no]
count = stats.memory_malloc_count[fname][line_no]
if count:
avg_mallocs[line_no] = n_malloc_mb / count
else:
# Setting to n_malloc_mb addresses the edge case where this allocation is the last line executed.
avg_mallocs[line_no] = n_malloc_mb
avg_mallocs = OrderedDict(
sorted(avg_mallocs.items(), key=itemgetter(1), reverse=True)
)
# Now only report potential leaks if the allocation
# velocity (growth rate) is above some threshold.
leaks = ScaleneLeakAnalysis.compute_leaks(
growth_rate, stats, avg_mallocs, fname
)
# Sort in descending order by least likelihood
leaks = sorted(leaks, key=itemgetter(1), reverse=True)
reported_leaks = {}
for (leak_lineno, leak_likelihood, leak_velocity) in leaks:
reported_leaks[str(leak_lineno)] = {
"likelihood": leak_likelihood,
"velocity_mb_s": leak_velocity / stats.elapsed_time,
}
# Print header.
if not stats.total_cpu_samples:
percent_cpu_time = 0
else:
percent_cpu_time = (
100 * stats.cpu_samples[fname] / stats.total_cpu_samples
)
# Print out the the profile for the source, line by line.
with open(fname, "r", encoding="utf-8") as source_file:
code_lines = source_file.readlines()
output["files"][fname_print] = {
"percent_cpu_time": percent_cpu_time,
"lines": [],
"leaks": reported_leaks,
}
for line_no, _line in enumerate(code_lines, start=1):
profile_line = self.output_profile_line(
fname=fname,
fname_print=fname_print,
line_no=LineNumber(line_no),
stats=stats,
profile_this_code=profile_this_code,
profile_memory=profile_memory,
force_print=False,
)
# Only output if the payload for the line is non-zero.
if profile_line:
profile_line_copy = copy.copy(profile_line)
del profile_line_copy["line"]
del profile_line_copy["lineno"]
if any(profile_line_copy.values()):
output["files"][fname_print]["lines"].append(
profile_line
)
fn_stats = stats.build_function_stats(fname)
# Check CPU samples and memory samples.
print_fn_summary = False
all_samples = set()
all_samples |= set(fn_stats.cpu_samples_python.keys())
all_samples |= set(fn_stats.cpu_samples_c.keys())
all_samples |= set(fn_stats.memory_malloc_samples.keys())
all_samples |= set(fn_stats.memory_free_samples.keys())
print_fn_summary = any(fn != fname for fn in all_samples)
output["files"][fname_print]["functions"] = []
if print_fn_summary:
for fn_name in sorted(
all_samples,
key=lambda k: stats.firstline_map[k],
):
if fn_name == fname:
continue
profile_line = self.output_profile_line(
fname=fn_name,
fname_print=fn_name,
# line 1 is where function stats are
# accumulated; see
# ScaleneStatistics.build_function_stats
line_no=LineNumber(1),
stats=fn_stats,
profile_this_code=profile_this_code,
profile_memory=profile_memory,
force_print=True,
)
if profile_line:
# Change the source code to just the function name.
profile_line["line"] = fn_name
# Fix the line number to point to the first line of the function.
profile_line["lineno"] = stats.firstline_map[fn_name]
output["files"][fname_print]["functions"].append(
profile_line
)
return output
| 15,219 | 38.025641 | 124 | py |
scalene | scalene-master/scalene/sparkline.py | import os
from typing import List, Optional, Tuple
"""Produces a sparkline, as in ▁▁▁▁▁▂▃▂▄▅▄▆█▆█▆
From https://rosettacode.org/wiki/Sparkline_in_unicode#Python
"""
def generate(
arr: List[float],
minimum: Optional[float] = None,
maximum: Optional[float] = None,
) -> Tuple[float, float, str]:
all_zeros = all(i == 0 for i in arr)
if all_zeros:
return 0, 0, ""
# Prevent negative memory output due to sampling error.
samples = [i if i > 0 else 0.0 for i in arr]
return _create(samples[0 : len(arr)], minimum, maximum)
def _create(
numbers: List[float],
fixed_min: Optional[float] = None,
fixed_max: Optional[float] = None,
) -> Tuple[float, float, str]:
min_ = fixed_min if fixed_min is not None else float(min(numbers))
max_ = fixed_max if fixed_max is not None else float(max(numbers))
extent = _get_extent(max_, min_)
spark = "".join(
__bars[
min(
[
__bar_count - 1,
int((n - min_) / extent * __bar_count),
]
)
]
for n in numbers
)
return min_, max_, spark
def _get_extent(max_: float, min_: float) -> float:
extent = max_ - min_
if extent == 0:
extent = 1
return extent
def _in_wsl() -> bool:
"""Are we in Windows Subsystem for Linux?"""
return "WSL_DISTRO_NAME" in os.environ
def _in_windows_terminal() -> bool:
"""Are we in Windows Terminal?
https://aka.ms/windowsterminal
"""
return "WT_PROFILE_ID" in os.environ
def _get_bars() -> str:
if _in_wsl() and not _in_windows_terminal():
# We are running in the Windows Subsystem for Linux Display, a
# crappy version of the sparkline because the Windows console
# *still* only properly displays IBM Code page 437 by default.
# ▄▄■■■■▀▀
return chr(0x2584) * 2 + chr(0x25A0) * 3 + chr(0x2580) * 3
else:
# Reasonable system. Use Unicode characters.
# Unicode: 9601, 9602, 9603, 9604, 9605, 9606, 9607, 9608
# ▁▂▃▄▅▆▇█
return "".join([chr(i) for i in range(9601, 9609)])
__bars = _get_bars()
__bar_count = len(__bars)
| 2,204 | 25.890244 | 70 | py |
scalene | scalene-master/scalene/syntaxline.py | from typing import Any, Iterator, List
from rich.console import Console
from rich.segment import Segment
class SyntaxLine:
def __init__(self, segments: List[Segment]):
self.segments = segments
def __rich_console__(
self, console: Console, _options: Any
) -> Iterator[Segment]:
yield from self.segments
| 342 | 21.866667 | 48 | py |
scalene | scalene-master/scalene/scalene_parseargs.py | import argparse
import contextlib
import sys
from textwrap import dedent
from typing import Any, List, NoReturn, Optional, Tuple
from scalene.scalene_arguments import ScaleneArguments
from scalene.scalene_version import scalene_version
class RichArgParser(argparse.ArgumentParser):
def __init__(self, *args: Any, **kwargs: Any):
from rich.console import Console
self.console = Console()
super().__init__(*args, **kwargs)
def _print_message(self, message: Optional[str], file: Any = None) -> None:
if message:
self.console.print(message)
class StopJupyterExecution(Exception):
"""NOP exception to enable clean exits from within Jupyter notebooks."""
def _render_traceback_(self) -> None:
pass
class ScaleneParseArgs:
@staticmethod
def clean_exit(code: object = 0) -> NoReturn:
"""Replacement for sys.exit that exits cleanly from within Jupyter notebooks."""
raise StopJupyterExecution
@staticmethod
def parse_args() -> Tuple[argparse.Namespace, List[str]]:
# In IPython, intercept exit cleanly (because sys.exit triggers a backtrace).
with contextlib.suppress(BaseException):
from IPython import get_ipython
if get_ipython():
sys.exit = ScaleneParseArgs.clean_exit
sys._exit = ScaleneParseArgs.clean_exit # type: ignore
defaults = ScaleneArguments()
usage = dedent(
f"""[b]Scalene[/b]: a high-precision CPU and memory profiler, version {scalene_version}
[link=https://github.com/plasma-umass/scalene]https://github.com/plasma-umass/scalene[/link]
command-line:
% [b]scalene \[options] yourprogram.py[/b]
or
% [b]python3 -m scalene \[options] yourprogram.py[/b]
in Jupyter, line mode:
[b] %scrun \[options] statement[/b]
in Jupyter, cell mode:
[b] %%scalene \[options]
your code here
[/b]
"""
)
epilog = dedent(
"""When running Scalene in the background, you can suspend/resume profiling
for the process ID that Scalene reports. For example:
% python3 -m scalene [options] yourprogram.py &
Scalene now profiling process 12345
to suspend profiling: python3 -m scalene.profile --off --pid 12345
to resume profiling: python3 -m scalene.profile --on --pid 12345
"""
)
parser = RichArgParser( # argparse.ArgumentParser(
prog="scalene",
description=usage,
epilog=epilog,
formatter_class=argparse.RawTextHelpFormatter,
allow_abbrev=False,
)
parser.add_argument(
"--version",
dest="version",
action="store_const",
const=True,
help="prints the version number for this release of Scalene and exits",
)
parser.add_argument(
"--column-width",
dest="column_width",
type=int,
default=defaults.column_width,
help=f"Column width for profile output (default: [blue]{defaults.column_width}[/blue])",
)
parser.add_argument(
"--outfile",
type=str,
default=defaults.outfile,
help="file to hold profiler output (default: [blue]"
+ ("stdout" if not defaults.outfile else defaults.outfile)
+ "[/blue])",
)
parser.add_argument(
"--html",
dest="html",
action="store_const",
const=True,
default=defaults.html,
help="output as HTML (default: [blue]"
+ str("html" if defaults.html else "text")
+ "[/blue])",
)
parser.add_argument(
"--json",
dest="json",
action="store_const",
const=True,
default=defaults.json,
help="output as JSON (default: [blue]"
+ str("json" if defaults.json else "text")
+ "[/blue])",
)
parser.add_argument(
"--cli",
dest="cli",
action="store_const",
const=True,
default=defaults.cli,
help="forces use of the command-line",
)
parser.add_argument(
"--web",
dest="web",
action="store_const",
const=True,
default=defaults.web,
help="writes 'profile.json' and opens the web UI (http://plasma-umass.org/scalene-gui/)",
)
parser.add_argument(
"--port",
dest="port",
type=int,
default=defaults.port,
help=f"binds the web UI server to this port (default: {defaults.port})",
)
parser.add_argument(
"--reduced-profile",
dest="reduced_profile",
action="store_const",
const=True,
default=defaults.reduced_profile,
help=f"generate a reduced profile, with non-zero lines only (default: [blue]{defaults.reduced_profile}[/blue])",
)
parser.add_argument(
"--profile-interval",
type=float,
default=defaults.profile_interval,
help=f"output profiles every so many seconds (default: [blue]{defaults.profile_interval}[/blue])",
)
parser.add_argument(
"--cpu-only",
dest="cpu_only",
action="store_const",
const=True,
default=defaults.cpu_only,
help="only profile CPU+GPU time (default: [blue]profile "
+ (
"CPU only"
if defaults.cpu_only
else "CPU+GPU, memory, and copying"
)
+ "[/blue])",
)
parser.add_argument(
"--profile-all",
dest="profile_all",
action="store_const",
const=True,
default=defaults.profile_all,
help="profile all executed code, not just the target program (default: [blue]"
+ (
"all code"
if defaults.profile_all
else "only the target program"
)
+ "[/blue])",
)
parser.add_argument(
"--profile-only",
dest="profile_only",
type=str,
default=defaults.profile_only,
help="profile only code in filenames that contain the given strings, separated by commas (default: [blue]"
+ (
"no restrictions"
if not defaults.profile_only
else defaults.profile_only
)
+ "[/blue])",
)
parser.add_argument(
"--profile-exclude",
dest="profile_exclude",
type=str,
default=defaults.profile_exclude,
help="do not profile code in filenames that contain the given strings, separated by commas (default: [blue]"
+ (
"no restrictions"
if not defaults.profile_exclude
else defaults.profile_exclude
)
+ "[/blue])",
)
parser.add_argument(
"--use-virtual-time",
dest="use_virtual_time",
action="store_const",
const=True,
default=defaults.use_virtual_time,
help=f"measure only CPU time, not time spent in I/O or blocking (default: [blue]{defaults.use_virtual_time}[/blue])",
)
parser.add_argument(
"--cpu-percent-threshold",
dest="cpu_percent_threshold",
type=int,
default=defaults.cpu_percent_threshold,
help=f"only report profiles with at least this percent of CPU time (default: [blue]{defaults.cpu_percent_threshold}%%[/blue])",
)
parser.add_argument(
"--cpu-sampling-rate",
dest="cpu_sampling_rate",
type=float,
default=defaults.cpu_sampling_rate,
help=f"CPU sampling rate (default: every [blue]{defaults.cpu_sampling_rate}s[/blue])",
)
parser.add_argument(
"--allocation-sampling-window",
dest="allocation_sampling_window",
type=int,
default=defaults.allocation_sampling_window,
help=f"Allocation sampling window size, in bytes (default: [blue]{defaults.allocation_sampling_window} bytes[/blue])",
)
parser.add_argument(
"--malloc-threshold",
dest="malloc_threshold",
type=int,
default=defaults.malloc_threshold,
help=f"only report profiles with at least this many allocations (default: [blue]{defaults.malloc_threshold}[/blue])",
)
parser.add_argument(
"--program-path",
dest="program_path",
type=str,
default="",
help="The directory containing the code to profile (default: [blue]the path to the profiled program[/blue])",
)
parser.add_argument(
"--memory-leak-detector",
dest="memory_leak_detector",
action="store_true",
default=defaults.memory_leak_detector,
help="EXPERIMENTAL: report likely memory leaks (default: [blue]"
+ (str(defaults.memory_leak_detector))
+ "[/blue])",
)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"--on",
action="store_true",
help="start with profiling on (default)",
)
group.add_argument(
"--off", action="store_true", help="start with profiling off"
)
# the PID of the profiling process (for internal use only)
parser.add_argument(
"--pid", type=int, default=0, help=argparse.SUPPRESS
)
# collect all arguments after "---", which Scalene will ignore
parser.add_argument(
"---",
dest="unused_args",
default=[],
help=argparse.SUPPRESS,
nargs=argparse.REMAINDER,
)
# Parse out all Scalene arguments.
# https://stackoverflow.com/questions/35733262/is-there-any-way-to-instruct-argparse-python-2-7-to-remove-found-arguments-fro
args, left = parser.parse_known_args()
left += args.unused_args
import re
in_jupyter_notebook = len(sys.argv) >= 1 and re.match(
"<ipython-input-([0-9]+)-.*>", sys.argv[0]
)
# If the user did not enter any commands (just `scalene` or `python3 -m scalene`),
# print the usage information and bail.
if not in_jupyter_notebook and (len(sys.argv) + len(left) == 1):
parser.print_help(sys.stderr)
sys.exit(-1)
if args.version:
print(f"Scalene version {scalene_version}")
sys.exit(-1)
return args, left
| 10,968 | 34.498382 | 139 | py |
scalene | scalene-master/scalene/scalene_magics.py | import contextlib
import sys
import textwrap
from typing import Any
with contextlib.suppress(Exception):
from IPython.core.magic import (
Magics,
line_cell_magic,
line_magic,
magics_class,
)
from scalene import scalene_profiler
from scalene.scalene_arguments import ScaleneArguments
from scalene.scalene_parseargs import ScaleneParseArgs
@magics_class
class ScaleneMagics(Magics): # type: ignore
"""IPython (Jupyter) support for magics for Scalene (%scrun and %%scalene)."""
def run_code(self, args: ScaleneArguments, code: str) -> None:
import IPython
# Create a file to hold the supplied code.
# We encode the cell number in the string for later recovery.
# The length of the history buffer lets us find the most recent string (this one).
filename = f"<ipython-input-{len(IPython.get_ipython().history_manager.input_hist_raw)-1}-profile>"
# Drop the first line (%%scalene).
newcode = "\n" + code
with open(filename, "w+") as tmpfile:
tmpfile.write(newcode)
args.cpu_only = True # full Scalene is not yet working, force to use CPU-only mode
scalene_profiler.Scalene.set_initialized()
scalene_profiler.Scalene.run_profiler(
args, [filename], is_jupyter=True
)
@line_cell_magic
def scalene(self, line: str, cell: str = "") -> None:
"""%%scalene magic: see https://github.com/plasma-umass/scalene for usage info."""
if line:
sys.argv = ["scalene"]
sys.argv.extend(line.split(" "))
(args, _left) = ScaleneParseArgs.parse_args()
else:
args = ScaleneArguments()
if cell:
self.run_code(args, cell) # type: ignore
@line_magic
def scrun(self, line: str = "") -> None:
"""%scrun magic: see https://github.com/plasma-umass/scalene for usage info."""
if line:
sys.argv = ["scalene"]
sys.argv.extend(line.split(" "))
(args, left) = ScaleneParseArgs.parse_args()
self.run_code(args, " ".join(left)) # type: ignore
def load_ipython_extension(ip: Any) -> None:
ip.register_magics(ScaleneMagics)
with contextlib.suppress(Exception):
# For some reason, this isn't loading correctly on the web.
with open("scalene-usage.txt", "r") as usage:
usage_str = usage.read()
ScaleneMagics.scrun.__doc__ = usage_str
ScaleneMagics.scalene.__doc__ = usage_str
print(
"\n".join(
textwrap.wrap(
"Scalene extension successfully loaded. Note: Scalene currently only supports CPU+GPU profiling inside Jupyter notebooks. For full Scalene profiling, use the command line version."
)
)
)
if sys.platform == "darwin":
print()
print(
"\n".join(
textwrap.wrap(
"NOTE: in Jupyter notebook on MacOS, Scalene cannot profile child processes. Do not run to try Scalene with multiprocessing in Jupyter Notebook."
)
)
)
| 3,403 | 38.126437 | 200 | py |
scalene | scalene-master/scalene/replacement_pjoin.py | import multiprocessing
import os
import sys
import threading
import time
from scalene.scalene_profiler import Scalene
minor_version = sys.version_info.minor
@Scalene.shim
def replacement_pjoin(scalene: Scalene) -> None:
def replacement_process_join(self, timeout: float = -1) -> None: # type: ignore
"""
A drop-in replacement for multiprocessing.Process.join
that periodically yields to handle signals
"""
# print(multiprocessing.process.active_children())
if minor_version >= 7:
self._check_closed()
assert self._parent_pid == os.getpid(), "can only join a child process"
assert self._popen is not None, "can only join a started process"
tident = threading.get_ident()
if timeout < 0:
interval = sys.getswitchinterval()
else:
interval = min(timeout, sys.getswitchinterval())
start_time = time.perf_counter()
while True:
scalene.set_thread_sleeping(tident)
res = self._popen.wait(interval)
if res is not None:
from multiprocessing.process import _children # type: ignore
scalene.remove_child_pid(self.pid)
_children.discard(self)
return
scalene.reset_thread_sleeping(tident)
# I think that this should be timeout--
# Interval is the sleep time per-tic,
# but timeout determines whether it returns
if timeout != -1:
end_time = time.perf_counter()
if end_time - start_time >= timeout:
from multiprocessing.process import (
_children,
) # type: ignore
_children.discard(self)
return
multiprocessing.Process.join = replacement_process_join # type: ignore
| 1,906 | 34.314815 | 84 | py |
scalene | scalene-master/scalene/scalene_leak_analysis.py | from typing import Any, List, OrderedDict
from scalene.scalene_statistics import Filename, LineNumber, ScaleneStatistics
class ScaleneLeakAnalysis:
# Only report potential leaks if the allocation velocity is above this threshold
growth_rate_threshold = 0.01
# Only report leaks whose likelihood is 1 minus this threshold
leak_reporting_threshold = 0.05
@staticmethod
def compute_leaks(
growth_rate: float,
stats: ScaleneStatistics,
avg_mallocs: OrderedDict[LineNumber, float],
fname: Filename,
) -> List[Any]:
if growth_rate / 100 < ScaleneLeakAnalysis.growth_rate_threshold:
return []
leaks = []
keys = list(stats.leak_score[fname].keys())
for index, item in enumerate(stats.leak_score[fname].values()):
# See https://en.wikipedia.org/wiki/Rule_of_succession
frees = item[1]
allocs = item[0]
expected_leak = (frees + 1) / (frees + allocs + 2)
if expected_leak <= ScaleneLeakAnalysis.leak_reporting_threshold:
if keys[index] in avg_mallocs:
leaks.append(
(
keys[index],
1 - expected_leak,
avg_mallocs[keys[index]],
)
)
return leaks
| 1,398 | 33.975 | 84 | py |
scalene | scalene-master/scalene/scalene_gpu.py | import contextlib
from typing import Tuple
import pynvml
class ScaleneGPU:
"""A wrapper around the nvidia device driver library (nvidia-ml-py)."""
def __init__(self) -> None:
self.__ngpus = 0
self.__has_gpu = False
self.__handle = []
with contextlib.suppress(Exception):
pynvml.nvmlInit()
self.__has_gpu = True
self.__ngpus = pynvml.nvmlDeviceGetCount()
for i in range(self.__ngpus):
self.__handle.append(pynvml.nvmlDeviceGetHandleByIndex(i))
def has_gpu(self) -> bool:
"""True iff the system has a detected GPU."""
return self.__has_gpu
def nvml_reinit(self) -> None:
"""Reinitialize the nvidia wrapper."""
self.__handle = []
with contextlib.suppress(Exception):
pynvml.nvmlInit()
self.__ngpus = pynvml.nvmlDeviceGetCount()
for i in range(self.__ngpus):
self.__handle.append(pynvml.nvmlDeviceGetHandleByIndex(i))
def get_stats(self) -> Tuple[float, float]:
"""Returns a tuple of (utilization %, memory in use)."""
if self.__has_gpu:
total_load = 0.0
mem_used = 0
for i in range(self.__ngpus):
with contextlib.suppress(Exception):
total_load += pynvml.nvmlDeviceGetUtilizationRates(
self.__handle[i]
).gpu
mem_info = pynvml.nvmlDeviceGetMemoryInfo(self.__handle[i])
mem_used += mem_info.used
total_load = (total_load / self.__ngpus) / 100.0
return (total_load, mem_used)
return (0.0, 0.0)
| 1,708 | 33.877551 | 79 | py |
scalene | scalene-master/scalene/replacement_exit.py | import os
import sys
from scalene.scalene_profiler import Scalene
@Scalene.shim
def replacement_exit(scalene: Scalene) -> None:
"""
Shims out the unconditional exit with
the "neat exit" (which raises the SystemExit error and
allows Scalene to exit neatly)
"""
# Note: MyPy doesn't like this, but it works because passing an int
# to sys.exit does the right thing
os._exit = sys.exit # type: ignore
| 434 | 24.588235 | 71 | py |
scalene | scalene-master/scalene/__init__.py | # Jupyter support
from scalene.scalene_magics import *
| 57 | 10.6 | 36 | py |
scalene | scalene-master/scalene/replacement_thread_join.py | import sys
import threading
import time
from typing import Optional
from scalene.scalene_profiler import Scalene
@Scalene.shim
def replacement_thread_join(scalene: Scalene) -> None:
orig_thread_join = threading.Thread.join
def thread_join_replacement(
self: threading.Thread, timeout: Optional[float] = None
) -> None:
"""We replace threading.Thread.join with this method which always
periodically yields."""
start_time = time.perf_counter()
interval = sys.getswitchinterval()
while self.is_alive():
scalene.set_thread_sleeping(threading.get_ident())
orig_thread_join(self, interval)
scalene.reset_thread_sleeping(threading.get_ident())
# If a timeout was specified, check to see if it's expired.
if timeout:
end_time = time.perf_counter()
if end_time - start_time >= timeout:
return None
return None
threading.Thread.join = thread_join_replacement # type: ignore
| 1,054 | 31.96875 | 73 | py |
scalene | scalene-master/scalene/scalene_signals.py | import signal
import sys
from typing import List, Tuple
class ScaleneSignals:
def __init__(self) -> None:
self.start_profiling_signal = signal.SIGILL
self.set_timer_signals(True)
if sys.platform != "win32":
self.stop_profiling_signal = signal.SIGBUS
self.memcpy_signal = signal.SIGPROF
# Malloc and free signals are generated by include/sampleheap.hpp.
self.malloc_signal = signal.SIGXCPU
self.free_signal = signal.SIGXFSZ
# Set these by default to virtual time (changeable with `set_timer_signals`)
self.cpu_timer_signal = signal.ITIMER_VIRTUAL
self.cpu_signal = signal.SIGVTALRM
else:
self.stop_profiling_signal = signal.SIGTERM
# TO DO - not yet activated for Windows
self.memcpy_signal = None
self.malloc_signal = None
self.free_signal = None
self.cpu_signal = signal.SIGBREAK
self.cpu_timer_signal = None
def set_timer_signals(self, use_virtual_time: bool) -> None:
"""Set up timer signals for CPU profiling."""
if sys.platform == "win32":
self.cpu_signal = signal.SIGBREAK
self.cpu_timer_signal = None
else:
if use_virtual_time:
self.cpu_timer_signal = signal.ITIMER_VIRTUAL
self.cpu_signal = signal.SIGVTALRM
else:
self.cpu_timer_signal = signal.ITIMER_REAL
self.cpu_signal = signal.SIGALRM
def get_timer_signals(self) -> Tuple[int, signal.Signals]:
return self.cpu_timer_signal, self.cpu_signal
def get_all_signals(self) -> List[int]:
return [
self.start_profiling_signal,
self.stop_profiling_signal,
self.memcpy_signal,
self.malloc_signal,
self.free_signal,
self.cpu_signal,
]
| 1,951 | 35.830189 | 88 | py |
scalene | scalene-master/scalene/scalene_profiler.py | """Scalene: a scripting-language aware profiler for Python.
https://github.com/plasma-umass/scalene
See the paper "docs/scalene-paper.pdf" in this repository for technical
details on an earlier version of Scalene's design; note that a
number of these details have changed.
by Emery Berger
https://emeryberger.com
usage: scalene test/testme.py
usage help: scalene --help
"""
import argparse
import atexit
import builtins
import contextlib
from copy import copy
import functools
import gc
import http.server
import inspect
import json
import math
import multiprocessing
import os
import pathlib
import platform
import random
import re
import signal
import socketserver
import stat
import sys
import tempfile
import threading
import time
import traceback
import webbrowser
from collections import defaultdict
from functools import lru_cache
from signal import Handlers, Signals
from types import FrameType
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union, cast
from scalene.scalene_arguments import ScaleneArguments
from scalene.scalene_client_timer import ScaleneClientTimer
from scalene.scalene_funcutils import ScaleneFuncUtils
from scalene.scalene_json import ScaleneJSON
from scalene.scalene_mapfile import ScaleneMapFile
from scalene.scalene_output import ScaleneOutput
from scalene.scalene_preload import ScalenePreload
from scalene.scalene_signals import ScaleneSignals
from scalene.scalene_statistics import (
Address,
ByteCodeIndex,
Filename,
LineNumber,
ScaleneStatistics,
)
if sys.platform != "win32":
import resource
if platform.system() == "Darwin":
from scalene.scalene_apple_gpu import ScaleneAppleGPU as ScaleneGPU
else:
from scalene.scalene_gpu import ScaleneGPU # type: ignore
from scalene.scalene_parseargs import ScaleneParseArgs, StopJupyterExecution
from scalene.scalene_sigqueue import ScaleneSigQueue
def require_python(version: Tuple[int, int]) -> None:
assert (
sys.version_info >= version
), f"Scalene requires Python version {version[0]}.{version[1]} or above."
require_python((3, 8))
# Scalene fully supports Unix-like operating systems; in
# particular, Linux, Mac OS X, and WSL 2 (Windows Subsystem for Linux 2 = Ubuntu).
# It also has partial support for Windows.
# Install our profile decorator.
def scalene_redirect_profile(func: Any) -> Any:
return Scalene.profile(func)
builtins.profile = scalene_redirect_profile # type: ignore
# Must equal src/include/sampleheap.hpp NEWLINE *minus 1*
NEWLINE_TRIGGER_LENGTH = 98820 # SampleHeap<...>::NEWLINE-1
def start() -> None:
Scalene.start()
def stop() -> None:
Scalene.stop()
class Scalene:
"""The Scalene profiler itself."""
__in_jupyter = False # are we running inside a Jupyter notebook
__start_time = 0 # start of profiling, in nanoseconds
# Whether the current profiler is a child
__is_child = -1
# the pid of the primary profiler
__parent_pid = -1
__initialized: bool = False
__last_profiled = (Filename("NADA"), LineNumber(0), ByteCodeIndex(0))
__last_profiled_invalidated = False
# Support for @profile
# decorated files
__files_to_profile: Dict[Filename, bool] = defaultdict(bool)
# decorated functions
__functions_to_profile: Dict[Filename, Dict[Any, bool]] = defaultdict(
lambda: {}
)
# Cache the original thread join function, which we replace with our own version.
__original_thread_join = threading.Thread.join
# As above; we'll cache the original thread and replace it.
__original_lock = threading.Lock
__args = ScaleneArguments()
__signals = ScaleneSignals()
__stats = ScaleneStatistics()
__output = ScaleneOutput()
__json = ScaleneJSON()
__gpu = ScaleneGPU()
__output.gpu = __gpu.has_gpu()
__json.gpu = __gpu.has_gpu()
@staticmethod
def get_original_lock() -> threading.Lock:
return Scalene.__original_lock()
# Likely names for the Python interpreter.
__all_python_names = [
os.path.basename(sys.executable),
os.path.basename(sys.executable) + str(sys.version_info.major),
os.path.basename(sys.executable)
+ str(sys.version_info.major)
+ "."
+ str(sys.version_info.minor),
]
# when did we last receive a signal?
__last_signal_time_virtual: float = 0
__last_signal_time_wallclock: float = 0
__last_signal_time_sys: float = 0
__last_signal_time_user: float = 0
# path for the program being profiled
__program_path: str = ""
# temporary directory to hold aliases to Python
__python_alias_dir: pathlib.Path
# Profile output parameters
# when we output the next profile
__next_output_time: float = float("inf")
# pid for tracking child processes
__pid: int = 0
__malloc_mapfile: ScaleneMapFile
__memcpy_mapfile: ScaleneMapFile
# Program-specific information:
# the name of the program being profiled
__program_being_profiled = Filename("")
# Is the thread sleeping? (We use this to properly attribute CPU time.)
__is_thread_sleeping: Dict[int, bool] = defaultdict(
bool
) # False by default
child_pids: Set[
int
] = set() # Needs to be unmangled to be accessed by shims
# Signal queues for CPU timers, allocations, and memcpy
__cpu_sigq: ScaleneSigQueue[Any]
__alloc_sigq: ScaleneSigQueue[Any]
__memcpy_sigq: ScaleneSigQueue[Any]
__sigqueues: List[ScaleneSigQueue[Any]]
client_timer: ScaleneClientTimer = ScaleneClientTimer()
__orig_signal = signal.signal
if sys.version_info < (3, 8):
__orig_raise_signal = lambda s: os.kill(os.getpid(), s)
else:
__orig_raise_signal = signal.raise_signal
__orig_kill = os.kill
if sys.platform != "win32":
__orig_setitimer = signal.setitimer
__orig_siginterrupt = signal.siginterrupt
@staticmethod
def get_all_signals_set() -> Set[int]:
return set(Scalene.__signals.get_all_signals())
@staticmethod
def get_timer_signals() -> Tuple[int, signal.Signals]:
return Scalene.__signals.get_timer_signals()
@staticmethod
def set_in_jupyter() -> None:
Scalene.__in_jupyter = True
@staticmethod
def in_jupyter() -> bool:
return Scalene.__in_jupyter
@staticmethod
def interruption_handler(
signum: Union[
Callable[[Signals, FrameType], None], int, Handlers, None
],
this_frame: Optional[FrameType],
) -> None:
raise KeyboardInterrupt
@staticmethod
def on_stack(
frame: FrameType, fname: Filename, lineno: LineNumber
) -> Optional[FrameType]:
"""Returns true iff the given filename and line number are anywhere on the stack starting at frame."""
found_frame = None
f = frame
while f:
if (f.f_code.co_filename, f.f_lineno) == (fname, lineno):
found_frame = f
break
f = cast(FrameType, f.f_back)
return found_frame
@staticmethod
def update_line() -> None:
# Mark a new line by allocating the trigger number of bytes.
bytearray(NEWLINE_TRIGGER_LENGTH)
@staticmethod
def invalidate_lines(frame: FrameType, _event: str, _arg: str) -> Any:
"""Mark the last_profiled information as invalid as soon as we execute a different line of code."""
try:
# If we are still on the same line, return.
ff = frame.f_code.co_filename
fl = frame.f_lineno
(fname, lineno, lasti) = Scalene.__last_profiled
if (ff == fname) and (fl == lineno):
return None
# Different line: stop tracing this frame.
frame.f_trace = None
frame.f_trace_lines = False
# If we are not in a file we should be tracing, return.
if not Scalene.should_trace(ff):
return None
# Check if we are still executing the same line of code or
# not (whether in this frame or one above it).
f = Scalene.on_stack(frame, fname, lineno)
if f:
# Still the same line, but somewhere up the stack
# (since we returned when it was the same line in this
# frame). Stop tracing in this frame.
return None
# We are on a different line; stop tracing and increment the count.
sys.settrace(None)
Scalene.update_line()
Scalene.__last_profiled_invalidated = False
Scalene.__last_profiled = (
Filename(ff),
LineNumber(fl),
ByteCodeIndex(frame.f_lasti),
)
return None
except AttributeError:
# This can happen when Scalene shuts down.
return None
except Exception as e:
print("Error in program being profiled:\n", e)
traceback.print_exc()
return None
@classmethod
def clear_metrics(cls) -> None:
"""
Clears the various states so that each forked process
can start with a clean slate
"""
cls.__stats.clear()
cls.child_pids.clear()
@classmethod
def add_child_pid(cls, pid: int) -> None:
cls.child_pids.add(pid)
@classmethod
def remove_child_pid(cls, pid: int) -> None:
try:
cls.child_pids.remove(pid)
except KeyError:
# Defensive programming: this should never happen.
pass
# Replacement @profile decorator function.
# We track which functions - in which files - have been decorated,
# and only report stats for those.
@staticmethod
def profile(func: Any) -> Any:
# Record the file and function name
Scalene.__files_to_profile[func.__code__.co_filename] = True
Scalene.__functions_to_profile[func.__code__.co_filename][func] = True
@functools.wraps(func)
def wrapper_profile(*args: Any, **kwargs: Any) -> Any:
value = func(*args, **kwargs)
return value
return wrapper_profile
@staticmethod
def shim(func: Callable[[Any], Any]) -> Any:
"""
Provides a decorator that, when used, calls the wrapped function with the Scalene type
Wrapped function must be of type (s: Scalene) -> Any
This decorator allows for marking a function in a separate file as a drop-in replacement for an existing
library function. The intention is for these functions to replace a function that indefinitely blocks (which
interferes with Scalene) with a function that awakens periodically to allow for signals to be delivered
"""
func(Scalene)
# Returns the function itself to the calling file for the sake
# of not displaying unusual errors if someone attempts to call
# it
@functools.wraps(func)
def wrapped(*args: Any, **kwargs: Any) -> Any:
return func(*args, **kwargs)
return wrapped
@staticmethod
def cleanup_files() -> None:
Scalene.__malloc_mapfile.cleanup()
Scalene.__memcpy_mapfile.cleanup()
@staticmethod
def set_thread_sleeping(tid: int) -> None:
Scalene.__is_thread_sleeping[tid] = True
@staticmethod
def reset_thread_sleeping(tid: int) -> None:
Scalene.__is_thread_sleeping[tid] = False
timer_signals = True
@staticmethod
def windows_timer_loop() -> None:
"""For Windows, send periodic timer signals; launch as a background thread."""
Scalene.timer_signals = True
while Scalene.timer_signals:
time.sleep(Scalene.__args.cpu_sampling_rate)
Scalene.__orig_raise_signal(Scalene.__signals.cpu_signal)
@staticmethod
def start_signal_queues() -> None:
"""Starts the signal processing queues (i.e., their threads)"""
for sigq in Scalene.__sigqueues:
sigq.start()
@staticmethod
def stop_signal_queues() -> None:
"""Stops the signal processing queues (i.e., their threads)"""
for sigq in Scalene.__sigqueues:
sigq.stop()
@staticmethod
def malloc_signal_handler(
signum: Union[
Callable[[Signals, FrameType], None], int, Handlers, None
],
this_frame: Optional[FrameType],
) -> None:
invalidated = Scalene.__last_profiled_invalidated
(fname, lineno, lasti) = Scalene.__last_profiled
if this_frame:
Scalene.enter_function_meta(this_frame, Scalene.__stats)
# Walk the stack till we find a line of code in a file we are tracing.
found_frame = False
f = this_frame
while f:
if Scalene.should_trace(f.f_code.co_filename):
found_frame = True
break
f = cast(FrameType, f.f_back)
if not found_frame:
return
assert f
# Start tracing until we execute a different line of
# code in a file we are tracking.
# First, see if we have now executed a different line of code.
# If so, increment.
if invalidated or not (
fname == Filename(f.f_code.co_filename)
and lineno == LineNumber(f.f_lineno)
):
Scalene.update_line()
Scalene.__last_profiled_invalidated = False
Scalene.__last_profiled = (
Filename(f.f_code.co_filename),
LineNumber(f.f_lineno),
ByteCodeIndex(f.f_lasti),
)
Scalene.__alloc_sigq.put([0])
# Start tracing.
sys.settrace(Scalene.invalidate_lines)
f.f_trace = Scalene.invalidate_lines
f.f_trace_lines = True
del this_frame
@staticmethod
def free_signal_handler(
signum: Union[
Callable[[Signals, FrameType], None], int, Handlers, None
],
this_frame: Optional[FrameType],
) -> None:
if this_frame:
Scalene.enter_function_meta(this_frame, Scalene.__stats)
Scalene.__alloc_sigq.put([0])
del this_frame
@staticmethod
def memcpy_signal_handler(
signum: Union[
Callable[[Signals, FrameType], None], int, Handlers, None
],
this_frame: Optional[FrameType],
) -> None:
Scalene.__memcpy_sigq.put((signum, this_frame))
del this_frame
@staticmethod
def enable_signals() -> None:
"""Set up the signal handlers to handle interrupts for profiling and start the
timer interrupts."""
if sys.platform == "win32":
Scalene.timer_signals = True
Scalene.__orig_signal(
Scalene.__signals.cpu_signal,
Scalene.cpu_signal_handler,
)
# On Windows, we simulate timer signals by running a background thread.
Scalene.timer_signals = True
t = threading.Thread(target=Scalene.windows_timer_loop)
t.start()
Scalene.start_signal_queues()
return
Scalene.start_signal_queues()
# Set signal handlers for memory allocation and memcpy events.
Scalene.__orig_signal(
Scalene.__signals.malloc_signal, Scalene.malloc_signal_handler
)
Scalene.__orig_signal(
Scalene.__signals.free_signal, Scalene.free_signal_handler
)
Scalene.__orig_signal(
Scalene.__signals.memcpy_signal, Scalene.memcpy_signal_handler
)
# Set every signal to restart interrupted system calls.
for s in Scalene.__signals.get_all_signals():
Scalene.__orig_siginterrupt(s, False)
# Turn on the CPU profiling timer to run at the sampling rate (exactly once).
Scalene.__orig_signal(
Scalene.__signals.cpu_signal,
Scalene.cpu_signal_handler,
)
if sys.platform != "win32":
Scalene.__orig_setitimer(
Scalene.__signals.cpu_timer_signal,
Scalene.__args.cpu_sampling_rate,
)
def __init__(
self,
arguments: argparse.Namespace,
program_being_profiled: Optional[Filename] = None,
) -> None:
import scalene.replacement_exit
import scalene.replacement_get_context
# Hijack lock, poll, thread_join, fork, and exit.
import scalene.replacement_lock
import scalene.replacement_mp_lock
import scalene.replacement_pjoin
import scalene.replacement_signal_fns
import scalene.replacement_thread_join
if sys.platform != "win32":
import scalene.replacement_fork
import scalene.replacement_poll_selector
Scalene.__args = cast(ScaleneArguments, arguments)
Scalene.__cpu_sigq = ScaleneSigQueue(Scalene.cpu_sigqueue_processor)
Scalene.__alloc_sigq = ScaleneSigQueue(
Scalene.alloc_sigqueue_processor
)
Scalene.__memcpy_sigq = ScaleneSigQueue(
Scalene.memcpy_sigqueue_processor
)
Scalene.__sigqueues = [
Scalene.__cpu_sigq,
Scalene.__alloc_sigq,
Scalene.__memcpy_sigq,
]
# Initialize the malloc related files; if for whatever reason
# the files don't exist and we are supposed to be profiling
# memory, exit.
try:
Scalene.__malloc_mapfile = ScaleneMapFile("malloc")
Scalene.__memcpy_mapfile = ScaleneMapFile("memcpy")
except:
# Ignore if we aren't profiling memory; otherwise, exit.
if not arguments.cpu_only:
sys.exit(1)
Scalene.__signals.set_timer_signals(arguments.use_virtual_time)
if arguments.pid:
# Child process.
# We need to use the same directory as the parent.
# The parent always puts this directory as the first entry in the PATH.
# Extract the alias directory from the path.
dirname = os.environ["PATH"].split(os.pathsep)[0]
Scalene.__python_alias_dir = pathlib.Path(dirname)
Scalene.__pid = arguments.pid
else:
# Parent process.
Scalene.__python_alias_dir = pathlib.Path(
tempfile.mkdtemp(prefix="scalene")
)
# Create a temporary directory to hold aliases to the Python
# executable, so scalene can handle multiple processes; each
# one is a shell script that redirects to Scalene.
Scalene.__pid = 0
cmdline = ""
# Pass along commands from the invoking command line.
cmdline += f" --cpu-sampling-rate={arguments.cpu_sampling_rate}"
if arguments.use_virtual_time:
cmdline += " --use-virtual-time"
if "off" in arguments and arguments.off:
cmdline += " --off"
if arguments.cpu_only:
cmdline += " --cpu-only"
environ = ScalenePreload.get_preload_environ(arguments)
preface = " ".join(
"=".join((k, str(v))) for (k, v) in environ.items()
)
# Add the --pid field so we can propagate it to the child.
cmdline += f" --pid={os.getpid()} ---"
payload = """#!/bin/bash
echo $$
%s %s -m scalene %s $@
""" % (
preface,
sys.executable,
cmdline,
)
# Now create all the files.
for name in Scalene.__all_python_names:
fname = os.path.join(Scalene.__python_alias_dir, name)
with open(fname, "w") as file:
file.write(payload)
os.chmod(fname, stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR)
# Finally, insert this directory into the path.
sys.path.insert(0, str(Scalene.__python_alias_dir))
os.environ["PATH"] = (
str(Scalene.__python_alias_dir)
+ os.pathsep
+ os.environ["PATH"]
)
# Force the executable (if anyone invokes it later) to point to one of our aliases.
sys.executable = Scalene.__all_python_names[0]
# Register the exit handler to run when the program terminates or we quit.
atexit.register(Scalene.exit_handler)
# Store relevant names (program, path).
if program_being_profiled:
Scalene.__program_being_profiled = Filename(
# os.path.abspath(program_being_profiled)
program_being_profiled
)
@staticmethod
def cpu_signal_handler(
signum: Union[
Callable[[Signals, FrameType], None], int, Handlers, None
],
this_frame: Optional[FrameType],
) -> None:
"""Wrapper for CPU signal handlers."""
# Get current time stats.
if sys.platform != "win32":
# On Linux/Mac, use getrusage, which provides higher
# resolution values than os.times() for some reason.
ru = resource.getrusage(resource.RUSAGE_SELF)
now_sys = ru.ru_stime
now_user = ru.ru_utime
else:
time_info = os.times()
now_sys = time_info.system
now_user = time_info.user
now_virtual = time.process_time()
now_wallclock = time.perf_counter()
if (
Scalene.__last_signal_time_virtual == 0
or Scalene.__last_signal_time_wallclock == 0
):
# Initialization: store values and update on the next pass.
Scalene.__last_signal_time_virtual = now_virtual
Scalene.__last_signal_time_wallclock = now_wallclock
Scalene.__last_signal_time_sys = now_sys
Scalene.__last_signal_time_user = now_user
if sys.platform != "win32":
Scalene.__orig_setitimer(
Scalene.__signals.cpu_timer_signal,
Scalene.__args.cpu_sampling_rate,
)
return
# Periodically sample GPU load as well.
if random.randint(0, 9) == 0:
(gpu_load, gpu_mem_used) = Scalene.__gpu.get_stats()
else:
(gpu_load, gpu_mem_used) = (0.0, 0.0)
# Pass on to the signal queue.
Scalene.__cpu_sigq.put(
(
signum,
this_frame,
now_virtual,
now_wallclock,
now_sys,
now_user,
gpu_load,
gpu_mem_used,
Scalene.__last_signal_time_virtual,
Scalene.__last_signal_time_wallclock,
Scalene.__last_signal_time_sys,
Scalene.__last_signal_time_user,
copy(Scalene.__is_thread_sleeping),
)
)
elapsed = now_wallclock - Scalene.__last_signal_time_wallclock
# Store the latest values as the previously recorded values.
Scalene.__last_signal_time_virtual = now_virtual
Scalene.__last_signal_time_wallclock = now_wallclock
Scalene.__last_signal_time_sys = now_sys
Scalene.__last_signal_time_user = now_user
if sys.platform != "win32":
if Scalene.client_timer.is_set:
(
should_raise,
remaining_time,
) = Scalene.client_timer.yield_next_delay(elapsed)
if should_raise:
Scalene.__orig_raise_signal(signal.SIGUSR1)
# NOTE-- 0 will only be returned if the 'seconds' have elapsed
# and there is no interval
if remaining_time > 0:
to_wait = min(
remaining_time, Scalene.__args.cpu_sampling_rate
)
else:
to_wait = Scalene.__args.cpu_sampling_rate
Scalene.client_timer.reset()
Scalene.__orig_setitimer(
Scalene.__signals.cpu_timer_signal,
to_wait,
)
else:
Scalene.__orig_setitimer(
Scalene.__signals.cpu_timer_signal,
Scalene.__args.cpu_sampling_rate,
)
@staticmethod
def output_profile() -> bool:
if Scalene.__args.json:
json_output = Scalene.__json.output_profiles(
Scalene.__program_being_profiled,
Scalene.__stats,
Scalene.__pid,
Scalene.profile_this_code,
Scalene.__python_alias_dir,
profile_memory=not Scalene.__args.cpu_only,
)
if json_output:
if not Scalene.__output.output_file:
Scalene.__output.output_file = "/dev/stdout"
with open(Scalene.__output.output_file, "w") as f:
f.write(
json.dumps(json_output, sort_keys=True, indent=4)
+ "\n"
)
return True
else:
return False
else:
output = Scalene.__output
column_width = Scalene.__args.column_width
if not Scalene.__args.html:
# Get column width of the terminal and adjust to fit.
try:
# If we are in a Jupyter notebook, stick with 132
if "ipykernel" in sys.modules:
column_width = 132
else:
import shutil
column_width = shutil.get_terminal_size().columns
except:
pass
did_output: bool = output.output_profiles(
column_width,
Scalene.__stats,
Scalene.__pid,
Scalene.profile_this_code,
Scalene.__python_alias_dir,
profile_memory=not Scalene.__args.cpu_only,
reduced_profile=Scalene.__args.reduced_profile,
)
return did_output
@staticmethod
def profile_this_code(fname: Filename, lineno: LineNumber) -> bool:
"""When using @profile, only profile files & lines that have been decorated."""
if not Scalene.__files_to_profile:
return True
if fname not in Scalene.__files_to_profile:
return False
# Now check to see if it's the right line range.
line_info = (
inspect.getsourcelines(fn)
for fn in Scalene.__functions_to_profile[fname]
)
found_function = any(
line_start <= lineno < line_start + len(lines)
for (lines, line_start) in line_info
)
return found_function
@staticmethod
def cpu_sigqueue_processor(
_signum: Union[
Callable[[Signals, FrameType], None], int, Handlers, None
],
this_frame: FrameType,
now_virtual: float,
now_wallclock: float,
now_sys: float,
now_user: float,
gpu_load: float,
gpu_mem_used: float,
prev_virtual: float,
prev_wallclock: float,
_prev_sys: float,
prev_user: float,
is_thread_sleeping: Dict[int, bool],
) -> None:
"""Handle interrupts for CPU profiling."""
# We have recorded how long it has been since we received a timer
# before. See the logic below.
# If it's time to print some profiling info, do so.
if now_wallclock >= Scalene.__next_output_time:
# Print out the profile. Set the next output time, stop
# signals, print the profile, and then start signals
# again.
Scalene.__next_output_time += Scalene.__args.profile_interval
stats = Scalene.__stats
# pause (lock) all the queues to prevent updates while we output
with contextlib.ExitStack() as stack:
locks = [
stack.enter_context(s.lock) for s in Scalene.__sigqueues
]
stats.stop_clock()
Scalene.output_profile()
stats.start_clock()
# Here we take advantage of an ostensible limitation of Python:
# it only delivers signals after the interpreter has given up
# control. This seems to mean that sampling is limited to code
# running purely in the interpreter, and in fact, that was a limitation
# of the first version of Scalene, meaning that native code was entirely ignored.
#
# (cf. https://docs.python.org/3.9/library/signal.html#execution-of-python-signal-handlers)
#
# However: lemons -> lemonade: this "problem" is in fact
# an effective way to separate out time spent in
# Python vs. time spent in native code "for free"! If we get
# the signal immediately, we must be running in the
# interpreter. On the other hand, if it was delayed, that means
# we are running code OUTSIDE the interpreter, e.g.,
# native code (be it inside of Python or in a library). We
# account for this time by tracking the elapsed (process) time
# and compare it to the interval, and add any computed delay
# (as if it were sampled) to the C counter.
elapsed_virtual = now_virtual - prev_virtual
elapsed_wallclock = now_wallclock - prev_wallclock
# CPU utilization is the fraction of time spent on the CPU
# over the total time.
elapsed_user = now_user - prev_user
try:
cpu_utilization = elapsed_user / elapsed_wallclock
except ZeroDivisionError:
cpu_utilization = 0.0
# On multicore systems running multi-threaded native code, CPU
# utilization can exceed 1; that is, elapsed user time is
# longer than elapsed wallclock time. If this occurs, set
# wall clock time to user time and set CPU utilization to 100%.
if cpu_utilization > 1.0:
cpu_utilization = 1.0
elapsed_wallclock = elapsed_user
# Deal with an odd case reported here: https://github.com/plasma-umass/scalene/issues/124
# (Note: probably obsolete now that Scalene is using the nvidia wrappers, but just in case...)
# We don't want to report 'nan', so turn the load into 0.
if math.isnan(gpu_load):
gpu_load = 0.0
gpu_time = gpu_load * Scalene.__args.cpu_sampling_rate
Scalene.__stats.total_gpu_samples += gpu_time
python_time = Scalene.__args.cpu_sampling_rate
c_time = elapsed_virtual - python_time
if c_time < 0:
c_time = 0
# Update counters for every running thread.
new_frames = Scalene.compute_frames_to_record(this_frame)
# Now update counters (weighted) for every frame we are tracking.
total_time = python_time + c_time
# First, find out how many frames are not sleeping. We need
# to know this number so we can parcel out time appropriately
# (equally to each running thread).
total_frames = sum(
1
for (frame, tident, orig_frame) in new_frames
if not is_thread_sleeping[tident]
)
if total_frames == 0:
normalized_time = total_time
else:
normalized_time = total_time / total_frames
# Now attribute execution time.
for (frame, tident, orig_frame) in new_frames:
fname = Filename(frame.f_code.co_filename)
lineno = LineNumber(frame.f_lineno)
Scalene.enter_function_meta(frame, Scalene.__stats)
if frame == new_frames[0][0]:
# Main thread.
if not is_thread_sleeping[tident]:
Scalene.__stats.cpu_samples_python[fname][lineno] += (
python_time / total_frames
)
Scalene.__stats.cpu_samples_c[fname][lineno] += (
c_time / total_frames
)
Scalene.__stats.cpu_samples[fname] += (
python_time + c_time
) / total_frames
Scalene.__stats.cpu_utilization[fname][lineno].push(
cpu_utilization
)
Scalene.__stats.gpu_samples[fname][lineno] += (
gpu_time / total_frames
)
Scalene.__stats.gpu_mem_samples[fname][lineno].push(
gpu_mem_used
)
else:
# We can't play the same game here of attributing
# time, because we are in a thread, and threads don't
# get signals in Python. Instead, we check if the
# bytecode instruction being executed is a function
# call. If so, we attribute all the time to native.
# NOTE: for now, we don't try to attribute GPU time to threads.
if not is_thread_sleeping[tident]:
# Check if the original caller is stuck inside a call.
if ScaleneFuncUtils.is_call_function(
orig_frame.f_code,
ByteCodeIndex(orig_frame.f_lasti),
):
# It is. Attribute time to native.
Scalene.__stats.cpu_samples_c[fname][
lineno
] += normalized_time
else:
# Not in a call function so we attribute the time to Python.
Scalene.__stats.cpu_samples_python[fname][
lineno
] += normalized_time
Scalene.__stats.cpu_samples[fname] += normalized_time
Scalene.__stats.cpu_utilization[fname][lineno].push(
cpu_utilization
)
# Clean up all the frames
del new_frames[:]
del new_frames
del this_frame
del is_thread_sleeping
Scalene.__stats.total_cpu_samples += total_time
# Returns final frame (up to a line in a file we are profiling), the thread identifier, and the original frame.
@staticmethod
def compute_frames_to_record(
_this_frame: FrameType,
) -> List[Tuple[FrameType, int, FrameType]]:
"""Collects all stack frames that Scalene actually processes."""
frames: List[Tuple[FrameType, int]] = [
(
cast(
FrameType,
sys._current_frames().get(cast(int, t.ident), None),
),
cast(int, t.ident),
)
for t in threading.enumerate()
if t != threading.main_thread()
]
# Put the main thread in the front.
tid = cast(int, threading.main_thread().ident)
frames.insert(
0,
(
sys._current_frames().get(tid, cast(FrameType, None)),
tid,
),
)
# Process all the frames to remove ones we aren't going to track.
new_frames: List[Tuple[FrameType, int, FrameType]] = []
for (frame, tident) in frames:
orig_frame = frame
if not frame:
continue
fname = frame.f_code.co_filename
# Record samples only for files we care about.
if not fname:
# 'eval/compile' gives no f_code.co_filename. We have
# to look back into the outer frame in order to check
# the co_filename.
back = cast(FrameType, frame.f_back)
fname = Filename(back.f_code.co_filename)
while not Scalene.should_trace(fname):
# Walk the stack backwards until we hit a frame that
# IS one we should trace (if there is one). i.e., if
# it's in the code being profiled, and it is just
# calling stuff deep in libraries.
if frame:
frame = cast(FrameType, frame.f_back)
if frame:
fname = frame.f_code.co_filename
else:
break
if frame:
new_frames.append((frame, tident, orig_frame))
del frames[:]
return new_frames
@staticmethod
def enter_function_meta(
frame: FrameType, stats: ScaleneStatistics
) -> None:
"""Update tracking info so we can correctly report line number info later."""
fname = Filename(frame.f_code.co_filename)
lineno = LineNumber(frame.f_lineno)
f = frame
try:
while "<" in Filename(f.f_code.co_name):
f = cast(FrameType, f.f_back)
# Handle case where the function with the name wrapped in triangle brackets is at the bottom of the stack
if f is None:
return
except:
return
if not Scalene.should_trace(f.f_code.co_filename):
return
fn_name = Filename(f.f_code.co_name)
firstline = f.f_code.co_firstlineno
# Prepend the class, if any
while (
f
and f.f_back
and f.f_back.f_code
# NOTE: next line disabled as it is interfering with name resolution for thread run methods
# and Scalene.should_trace(f.f_back.f_code.co_filename)
):
if "self" in f.f_locals:
prepend_name = f.f_locals["self"].__class__.__name__
if "Scalene" not in prepend_name:
fn_name = prepend_name + "." + fn_name
break
if "cls" in f.f_locals:
prepend_name = getattr(f.f_locals["cls"], "__name__", None)
if not prepend_name or "Scalene" in prepend_name:
break
fn_name = prepend_name + "." + fn_name
break
f = f.f_back
stats.function_map[fname][lineno] = fn_name
stats.firstline_map[fn_name] = LineNumber(firstline)
@staticmethod
def alloc_sigqueue_processor(x: Optional[List[int]]) -> None:
"""Handle interrupts for memory profiling (mallocs and frees)."""
stats = Scalene.__stats
curr_pid = os.getpid()
# Process the input array from where we left off reading last time.
arr: List[
Tuple[
int,
str,
float,
float,
str,
Filename,
LineNumber,
ByteCodeIndex,
]
] = []
with contextlib.suppress(FileNotFoundError):
while Scalene.__malloc_mapfile.read():
count_str = Scalene.__malloc_mapfile.get_str()
if count_str.strip() == "":
break
(
action,
alloc_time_str,
count_str,
python_fraction_str,
pid,
pointer,
reported_fname,
reported_lineno,
bytei_str,
) = count_str.split(",")
if int(curr_pid) == int(pid):
arr.append(
(
int(alloc_time_str),
action,
float(count_str),
float(python_fraction_str),
pointer,
Filename(reported_fname),
LineNumber(int(reported_lineno)),
ByteCodeIndex(int(bytei_str)),
)
)
# Iterate through the array to compute the new current footprint
# and update the global __memory_footprint_samples. Since on some systems,
# we get free events before mallocs, force `before` to always be at least 0.
before = max(stats.current_footprint, 0)
prevmax = stats.max_footprint
freed_last_trigger = 0
for (index, item) in enumerate(arr):
(
_alloc_time,
action,
count,
_python_fraction,
pointer,
fname,
lineno,
bytei,
) = item
is_malloc = action == "M"
count /= 1024 * 1024
if is_malloc:
stats.current_footprint += count
stats.max_footprint = max(
stats.current_footprint, stats.max_footprint
)
else:
assert action == "f" or action == "F"
stats.current_footprint -= count
# Force current footprint to be non-negative; this
# code is needed because Scalene can miss some initial
# allocations at startup.
stats.current_footprint = max(0, stats.current_footprint)
if action == "f":
# Check if pointer actually matches
if stats.last_malloc_triggered[2] == pointer:
freed_last_trigger += 1
timestamp = time.monotonic_ns() - Scalene.__start_time
if len(stats.memory_footprint_samples) > 2:
# Compress the footprints by discarding intermediate
# points along increases and decreases. For example:
# if the new point is an increase over the previous
# point, and that point was also an increase,
# eliminate the previous (intermediate) point.
(t1, prior_y) = stats.memory_footprint_samples[-2]
(t2, last_y) = stats.memory_footprint_samples[-1]
y = stats.current_footprint
if (prior_y < last_y and last_y < y) or (
prior_y > last_y and last_y > y
):
# Same direction.
# Replace the previous (intermediate) point.
stats.memory_footprint_samples[-1] = [timestamp, y]
else:
stats.memory_footprint_samples.append([timestamp, y])
else:
stats.memory_footprint_samples.append(
[
timestamp,
stats.current_footprint,
]
)
after = stats.current_footprint
if freed_last_trigger:
if freed_last_trigger > 1:
# Ignore the case where we have multiple last triggers in the sample file,
# since this can lead to false positives.
pass
else:
# We freed the last allocation trigger. Adjust scores.
this_fn, this_ln, _this_ptr = stats.last_malloc_triggered
if this_ln != 0:
mallocs, frees = stats.leak_score[this_fn][this_ln]
stats.leak_score[this_fn][this_ln] = (
mallocs,
frees + 1,
)
stats.last_malloc_triggered = (
Filename(""),
LineNumber(0),
Address("0x0"),
)
allocs = 0.0
last_malloc = (Filename(""), LineNumber(0), Address("0x0"))
malloc_pointer = "0x0"
curr = before
# Go through the array again and add each updated current footprint.
for item in arr:
(
_alloc_time,
action,
count,
python_fraction,
pointer,
fname,
lineno,
bytei,
) = item
is_malloc = action == "M"
if is_malloc and count == NEWLINE_TRIGGER_LENGTH + 1:
stats.memory_malloc_count[fname][lineno] += 1
stats.memory_aggregate_footprint[fname][
lineno
] += stats.memory_current_highwater_mark[fname][lineno]
stats.memory_current_footprint[fname][lineno] = 0
stats.memory_current_highwater_mark[fname][lineno] = 0
continue
# Add the byte index to the set for this line (if it's not there already).
stats.bytei_map[fname][lineno].add(bytei)
count /= 1024 * 1024
if is_malloc:
allocs += count
curr += count
malloc_pointer = pointer
stats.memory_malloc_samples[fname][lineno] += count
stats.memory_python_samples[fname][lineno] += (
python_fraction * count
)
stats.malloc_samples[fname] += 1
stats.total_memory_malloc_samples += count
# Update current and max footprints for this file & line.
stats.memory_current_footprint[fname][lineno] += count
if (
stats.memory_current_footprint[fname][lineno]
> stats.memory_current_highwater_mark[fname][lineno]
):
stats.memory_current_highwater_mark[fname][
lineno
] = stats.memory_current_footprint[fname][lineno]
stats.memory_current_highwater_mark[fname][lineno] = max(
stats.memory_current_highwater_mark[fname][lineno],
stats.memory_current_footprint[fname][lineno],
)
stats.memory_max_footprint[fname][lineno] = max(
stats.memory_current_footprint[fname][lineno],
stats.memory_max_footprint[fname][lineno],
)
else:
assert action == "f" or action == "F"
curr -= count
stats.memory_free_samples[fname][lineno] += count
stats.memory_free_count[fname][lineno] += 1
stats.total_memory_free_samples += count
stats.memory_current_footprint[fname][lineno] -= count
# Ensure that we never drop the current footprint below 0.
stats.memory_current_footprint[fname][lineno] = max(
0, stats.memory_current_footprint[fname][lineno]
)
stats.per_line_footprint_samples[fname][lineno].append(
[time.monotonic_ns() - Scalene.__start_time, curr]
)
# If we allocated anything, then mark this as the last triggering malloc
if allocs > 0:
last_malloc = (
Filename(fname),
LineNumber(lineno),
Address(malloc_pointer),
)
stats.allocation_velocity = (
stats.allocation_velocity[0] + (after - before),
stats.allocation_velocity[1] + allocs,
)
if Scalene.__args.memory_leak_detector:
# Update leak score if we just increased the max footprint (starting at a fixed threshold, currently 100MB
if prevmax < stats.max_footprint and stats.max_footprint > 100:
stats.last_malloc_triggered = last_malloc
fname, lineno, _ = last_malloc
mallocs, frees = stats.leak_score[fname][lineno]
stats.leak_score[fname][lineno] = (mallocs + 1, frees)
@staticmethod
def before_fork() -> None:
"""Executed just before a fork."""
Scalene.stop_signal_queues()
@staticmethod
def after_fork_in_parent(child_pid: int) -> None:
"""Executed by the parent process after a fork."""
Scalene.add_child_pid(child_pid)
Scalene.start_signal_queues()
@staticmethod
def after_fork_in_child() -> None:
"""
Executed by a child process after a fork and mutates the
current profiler into a child.
"""
Scalene.__is_child = True
Scalene.clear_metrics()
if Scalene.__gpu.has_gpu():
Scalene.__gpu.nvml_reinit()
# Note-- __parent_pid of the topmost process is its own pid
Scalene.__pid = Scalene.__parent_pid
if not "off" in Scalene.__args or not Scalene.__args.off:
Scalene.enable_signals()
@staticmethod
def memcpy_sigqueue_processor(
_signum: Union[
Callable[[Signals, FrameType], None], int, Handlers, None
],
frame: FrameType,
) -> None:
curr_pid = os.getpid()
arr: List[Tuple[str, int, int, int, int]] = []
# Process the input array.
with contextlib.suppress(ValueError):
while Scalene.__memcpy_mapfile.read():
count_str = Scalene.__memcpy_mapfile.get_str()
(
memcpy_time_str,
count_str2,
pid,
filename,
lineno,
bytei,
) = count_str.split(",")
if int(curr_pid) == int(pid):
arr.append(
(
filename,
int(lineno),
int(bytei),
int(memcpy_time_str),
int(count_str2),
)
)
arr.sort()
for item in arr:
filename, linenum, byteindex, _memcpy_time, count = item
fname = Filename(filename)
line_no = LineNumber(linenum)
byteidx = ByteCodeIndex(byteindex)
# Add the byte index to the set for this line.
Scalene.__stats.bytei_map[fname][line_no].add(byteidx)
Scalene.__stats.memcpy_samples[fname][line_no] += int(count)
@staticmethod
@lru_cache(None)
def should_trace(filename: str) -> bool:
"""Return true if the filename is one we should trace."""
if not filename:
return False
if "scalene/scalene" in filename:
# Don't profile the profiler.
return False
if "site-packages" in filename or "/lib/python" in filename:
# Don't profile Python internals by default.
if not Scalene.__args.profile_all:
return False
# Generic handling follows (when no @profile decorator has been used).
profile_exclude_list = Scalene.__args.profile_exclude.split(",")
if any(
prof in filename for prof in profile_exclude_list if prof != ""
):
return False
if filename[0] == "<":
if "<ipython" in filename:
# Profiling code created in a Jupyter cell:
# create a file to hold the contents.
import re
import IPython
# Find the input where the function was defined;
# we need this to properly annotate the code.
result = re.match("<ipython-input-([0-9]+)-.*>", filename)
if result:
# Write the cell's contents into the file.
with open(filename, "w+") as f:
f.write(
IPython.get_ipython().history_manager.input_hist_raw[
int(result.group(1))
]
)
return True
else:
# Not a real file and not a function created in Jupyter.
return False
# If (a) `profile-only` was used, and (b) the file matched
# NONE of the provided patterns, don't profile it.
profile_only_set = set(Scalene.__args.profile_only.split(","))
not_found_in_profile_only = profile_only_set and not any(
prof in filename for prof in profile_only_set
)
if not_found_in_profile_only:
return False
# Now we've filtered out any non matches to profile-only patterns.
# If `profile-all` is specified, profile this file.
if Scalene.__args.profile_all:
return True
# Profile anything in the program's directory or a child directory,
# but nothing else, unless otherwise specified.
filename = os.path.abspath(filename)
return Scalene.__program_path in filename
@staticmethod
def clear_mmap_data() -> None:
if not Scalene.__args.cpu_only:
while Scalene.__malloc_mapfile.read():
pass
while Scalene.__memcpy_mapfile.read():
pass
__done = False
@staticmethod
def start() -> None:
"""Initiate profiling."""
# Scalene.clear_mmap_data()
if not Scalene.__initialized:
print(
"ERROR: Do not try to invoke `start` when you have not called Scalene using one of the methods "
"in https://github.com/plasma-umass/scalene#using-scalene"
)
sys.exit(1)
Scalene.__stats.start_clock()
Scalene.enable_signals()
Scalene.__start_time = time.monotonic_ns()
Scalene.__done = False
@staticmethod
def stop() -> None:
"""Complete profiling."""
Scalene.__done = True
Scalene.disable_signals()
Scalene.__stats.stop_clock()
if Scalene.__args.web and not Scalene.__args.cli:
if Scalene.in_jupyter():
# Force JSON output to profile.json.
Scalene.__args.json = True
Scalene.__output.html = False
Scalene.__output.output_file = "profile.json"
else:
# Check for a browser.
try:
if (
not webbrowser.get()
or type(webbrowser.get()).__name__ == "GenericBrowser"
):
# Could not open a graphical web browser tab;
# act as if --web was not specified
# (GenericBrowser means text-based browsers like Lynx.)
Scalene.__args.web = False
else:
# Force JSON output to profile.json.
Scalene.__args.json = True
Scalene.__output.html = False
Scalene.__output.output_file = "profile.json"
except:
# Couldn't find a browser.
Scalene.__args.web = False
@staticmethod
def is_done() -> bool:
return Scalene.__done
@staticmethod
def start_signal_handler(
_signum: Union[
Callable[[Signals, FrameType], None], int, Handlers, None
],
_this_frame: Optional[FrameType],
) -> None:
for pid in Scalene.child_pids:
Scalene.__orig_kill(pid, Scalene.__signals.start_profiling_signal)
Scalene.start()
@staticmethod
def stop_signal_handler(
_signum: Union[
Callable[[Signals, FrameType], None], int, Handlers, None
],
_this_frame: Optional[FrameType],
) -> None:
for pid in Scalene.child_pids:
Scalene.__orig_kill(pid, Scalene.__signals.stop_profiling_signal)
Scalene.stop()
@staticmethod
def disable_signals(retry: bool = True) -> None:
"""Turn off the profiling signals."""
if sys.platform == "win32":
Scalene.timer_signals = False
return
try:
Scalene.__orig_setitimer(Scalene.__signals.cpu_timer_signal, 0)
Scalene.__orig_signal(
Scalene.__signals.malloc_signal, signal.SIG_IGN
)
Scalene.__orig_signal(
Scalene.__signals.free_signal, signal.SIG_IGN
)
Scalene.__orig_signal(
Scalene.__signals.memcpy_signal, signal.SIG_IGN
)
Scalene.stop_signal_queues()
except:
# Retry just in case we get interrupted by one of our own signals.
if retry:
Scalene.disable_signals(retry=False)
@staticmethod
def exit_handler() -> None:
"""When we exit, disable all signals."""
Scalene.disable_signals()
# Delete the temporary directory.
with contextlib.suppress(Exception):
if not Scalene.__pid:
Scalene.__python_alias_dir.cleanup() # type: ignore
with contextlib.suppress(Exception):
os.remove(f"/tmp/scalene-malloc-lock{os.getpid()}")
@staticmethod
def termination_handler(
_signum: Union[
Callable[[Signals, FrameType], None], int, Handlers, None
],
_this_frame: FrameType,
) -> None:
sys.exit(1)
def profile_code(
self,
code: str,
the_globals: Dict[str, str],
the_locals: Dict[str, str],
) -> int:
# If --off is set, tell all children to not profile and stop profiling before we even start.
if "off" not in Scalene.__args or not Scalene.__args.off:
self.start()
# Run the code being profiled.
exit_status = 0
try:
exec(code, the_globals, the_locals)
except SystemExit as se:
# Intercept sys.exit and propagate the error code.
exit_status = se.code
except KeyboardInterrupt:
# Cleanly handle keyboard interrupts (quits execution and dumps the profile).
print("Scalene execution interrupted.")
except Exception as e:
print("Error in program being profiled:\n", e)
traceback.print_exc()
exit_status = 1
finally:
self.stop()
sys.settrace(None)
# If we've collected any samples, dump them.
if not Scalene.output_profile():
print(
"Scalene: Program did not run for long enough to profile."
)
if Scalene.__args.web and not Scalene.__args.cli:
# Start up a web server (in a background thread) to host the GUI,
# and open a browser tab to the server. If this fails, fail-over
# to using the CLI.
try:
PORT = Scalene.__args.port
# Silence web server output by overriding logging messages.
class NoLogs(http.server.SimpleHTTPRequestHandler):
def log_message(
self, format: str, *args: List[Any]
) -> None:
return
def log_request(
self,
code: Union[int, str] = 0,
size: Union[int, str] = 0,
) -> None:
return
Handler = NoLogs
socketserver.TCPServer.allow_reuse_address = True
with socketserver.TCPServer(("", PORT), Handler) as httpd:
import threading
t = threading.Thread(target=httpd.serve_forever)
# Copy files into a new directory and then point the tab there.
import shutil
webgui_dir = pathlib.Path(
tempfile.mkdtemp(prefix="scalene-gui")
)
shutil.copytree(
os.path.join(os.path.dirname(__file__), "scalene-gui"),
os.path.join(webgui_dir, "scalene-gui"),
)
shutil.copy(
"profile.json", os.path.join(webgui_dir, "scalene-gui")
)
os.chdir(os.path.join(webgui_dir, "scalene-gui"))
t.start()
if Scalene.in_jupyter():
from IPython.core.display import HTML, display
from IPython.display import IFrame
display(
IFrame(
src=f"http://localhost:{PORT}/profiler.html",
width=700,
height=600,
)
)
else:
webbrowser.open_new_tab(
f"http://localhost:{PORT}/profiler.html"
)
# Wait long enough for the server to serve the page, and then shut down the server.
time.sleep(5)
httpd.shutdown()
except OSError:
print(f"Scalene: unable to run the Scalene GUI on port {PORT}.")
print("Possible solutions:")
print("(1) Use a different port (with --port)")
print("(2) Use the text version (with --cli)")
print("(3) Upload a generated profile.json file to the web GUI: https://plasma-umass.org/scalene-gui/.")
return exit_status
@staticmethod
def process_args(args: argparse.Namespace) -> None:
Scalene.__args = cast(ScaleneArguments, args)
Scalene.__next_output_time = (
time.perf_counter() + Scalene.__args.profile_interval
)
Scalene.__output.html = args.html
Scalene.__output.output_file = args.outfile
Scalene.__is_child = args.pid != 0
# the pid of the primary profiler
Scalene.__parent_pid = args.pid if Scalene.__is_child else os.getpid()
@staticmethod
def set_initialized() -> None:
Scalene.__initialized = True
@staticmethod
def main() -> None:
(
args,
left,
) = ScaleneParseArgs.parse_args()
Scalene.set_initialized()
Scalene.run_profiler(args, left)
@staticmethod
def run_profiler(
args: argparse.Namespace, left: List[str], is_jupyter: bool = False
) -> None:
# Set up signal handlers for starting and stopping profiling.
if is_jupyter:
Scalene.set_in_jupyter()
if not Scalene.__initialized:
print(
"ERROR: Do not try to manually invoke `run_profiler`.\n"
"To invoke Scalene programmatically, see the usage noted in https://github.com/plasma-umass/scalene#using-scalene"
)
sys.exit(1)
Scalene.__orig_signal(
Scalene.__signals.start_profiling_signal,
Scalene.start_signal_handler,
)
Scalene.__orig_signal(
Scalene.__signals.stop_profiling_signal,
Scalene.stop_signal_handler,
)
if sys.platform != "win32":
Scalene.__orig_siginterrupt(
Scalene.__signals.start_profiling_signal, False
)
Scalene.__orig_siginterrupt(
Scalene.__signals.stop_profiling_signal, False
)
Scalene.__orig_signal(signal.SIGINT, Scalene.interruption_handler)
if not is_jupyter:
did_preload = ScalenePreload.setup_preload(args)
else:
did_preload = False
if not did_preload:
with contextlib.suppress(Exception):
# If running in the background, print the PID.
if os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()):
# In the background.
print(f"Scalene now profiling process {os.getpid()}")
print(
f" to disable profiling: python3 -m scalene.profile --off --pid {os.getpid()}"
)
print(
f" to resume profiling: python3 -m scalene.profile --on --pid {os.getpid()}"
)
Scalene.__stats.clear_all()
sys.argv = left
with contextlib.suppress(Exception):
if not is_jupyter:
multiprocessing.set_start_method("fork")
try:
Scalene.process_args(args)
progs = None
exit_status = 0
try:
# Look for something ending in '.py'. Treat the first one as our executable.
progs = [x for x in sys.argv if re.match(".*\.py$", x)]
# Just in case that didn't work, try sys.argv[0] and __file__.
with contextlib.suppress(Exception):
progs.append(sys.argv[0])
progs.append(__file__)
if not progs:
raise FileNotFoundError
with open(progs[0], "rb") as prog_being_profiled:
# Read in the code and compile it.
try:
code = compile(
prog_being_profiled.read(),
progs[0],
"exec",
)
except SyntaxError:
traceback.print_exc()
sys.exit(1)
# Push the program's path.
program_path = os.path.dirname(os.path.abspath(progs[0]))
sys.path.insert(0, program_path)
if len(args.program_path) > 0:
Scalene.__program_path = os.path.abspath(
args.program_path
)
else:
Scalene.__program_path = program_path
# Grab local and global variables.
if not Scalene.__args.cpu_only:
from scalene import pywhere # type: ignore
pywhere.register_files_to_profile(
list(Scalene.__files_to_profile.keys()),
Scalene.__program_path,
Scalene.__args.profile_all,
)
import __main__
the_locals = __main__.__dict__
the_globals = __main__.__dict__
# Splice in the name of the file being executed instead of the profiler.
the_globals["__file__"] = os.path.abspath(progs[0])
# Some mysterious module foo to make this work the same with -m as with `scalene`.
the_globals["__spec__"] = None
# Do a GC before we start.
gc.collect()
# Start the profiler.
profiler = Scalene(args, Filename(progs[0]))
try:
# We exit with this status (returning error code as appropriate).
exit_status = profiler.profile_code(
code, the_locals, the_globals
)
sys.exit(exit_status)
except StopJupyterExecution:
# Running in Jupyter notebooks
pass
except AttributeError:
# don't let the handler below mask programming errors
raise
except Exception as ex:
template = "Scalene: An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
print(traceback.format_exc())
except (FileNotFoundError, IOError):
if progs:
print("Scalene: could not find input file " + progs[0])
else:
print("Scalene: no input file specified.")
sys.exit(1)
except SystemExit:
pass
except StopJupyterExecution:
pass
except Exception:
print("Scalene failed to initialize.\n" + traceback.format_exc())
sys.exit(1)
finally:
with contextlib.suppress(Exception):
Scalene.__malloc_mapfile.close()
Scalene.__memcpy_mapfile.close()
if not Scalene.__is_child:
Scalene.cleanup_files()
sys.exit(exit_status)
if __name__ == "__main__":
Scalene.main()
| 69,267 | 37.482222 | 130 | py |
scalene | scalene-master/scalene/replacement_fork.py | import os
from scalene.scalene_profiler import Scalene
from scalene.scalene_signals import ScaleneSignals
@Scalene.shim
def replacement_fork(scalene: Scalene) -> None:
"""
Executes Scalene fork() handling.
Works just like os.register_at_fork(), but unlike that also provides the child PID.
"""
orig_fork = os.fork
def fork_replacement() -> int:
scalene.before_fork()
child_pid = orig_fork()
if child_pid == 0:
scalene.after_fork_in_child()
else:
scalene.after_fork_in_parent(child_pid)
return child_pid
os.fork = fork_replacement
| 629 | 22.333333 | 87 | py |
scalene | scalene-master/scalene/scalene_funcutils.py | import dis
import sys
from functools import lru_cache
from types import CodeType
from typing import FrozenSet
from scalene.scalene_statistics import ByteCodeIndex
class ScaleneFuncUtils:
"""Utility class to determine whether a bytecode corresponds to function calls."""
# We use these in is_call_function to determine whether a
# particular bytecode is a function call. We use this to
# distinguish between Python and native code execution when
# running in threads.
__call_opcodes: FrozenSet[int] = frozenset(
{
dis.opmap[op_name]
for op_name in dis.opmap
if op_name.startswith("CALL_FUNCTION")
or (sys.version_info >= (3, 11) and op_name.startswith("CALL"))
}
)
@staticmethod
@lru_cache(maxsize=None)
def is_call_function(code: CodeType, bytei: ByteCodeIndex) -> bool:
"""Returns true iff the bytecode at the given index is a function call."""
return any(
(
ins.offset == bytei
and ins.opcode in ScaleneFuncUtils.__call_opcodes
)
for ins in dis.get_instructions(code)
)
| 1,174 | 30.756757 | 86 | py |
scalene | scalene-master/scalene/replacement_mp_lock.py | import multiprocessing.synchronize
import sys
import threading
from typing import Any
import _multiprocessing
from scalene.scalene_profiler import Scalene
# The _multiprocessing module is entirely undocumented-- the header of the
# acquire function is
# static PyObject * _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, PyObject *timeout_obj)
#
# timeout_obj is parsed as a double
@Scalene.shim
def replacement_mp_semlock(scalene: Scalene) -> None:
class ReplacementSemLock(multiprocessing.synchronize.Lock):
def __enter__(self) -> bool:
timeout = sys.getswitchinterval()
tident = threading.get_ident()
while True:
scalene.set_thread_sleeping(tident)
acquired = self._semlock.acquire(timeout=timeout) # type: ignore
scalene.reset_thread_sleeping(tident)
if acquired:
return True
def __exit__(self, *args: Any) -> None:
super().__exit__(*args)
multiprocessing.synchronize.Lock = ReplacementSemLock # type: ignore
| 1,101 | 32.393939 | 115 | py |
scalene | scalene-master/scalene/scalene_apple_gpu.py | import platform
import re
import subprocess
from typing import Tuple
class ScaleneAppleGPU:
"""Wrapper class for Apple integrated GPU statistics."""
def __init__(self) -> None:
assert platform.system() == "Darwin"
self.cmd = (
'DYLD_INSERT_LIBRARIES="" ioreg -r -d 1 -w 0 -c "IOAccelerator"'
)
self.regex_util = re.compile(r'"Device Utilization %"=(\d+)')
self.regex_inuse = re.compile(r'"In use system memory"=(\d+)')
def has_gpu(self) -> bool:
"""Returns true: as far as I am aware, all Macs have had integrated GPUs for some time."""
return True
def nvml_reinit(self) -> None:
"""A NOP, here for compatibility with the nvidia wrapper."""
return
def get_stats(self) -> Tuple[float, float]:
"""Returns a tuple of (utilization%, memory in use)"""
if not self.has_gpu():
return (0.0, 0.0)
try:
in_use = 0.0
util = 0.0
read_process = subprocess.Popen(
self.cmd, shell=True, stdout=subprocess.PIPE
)
if read_process.stdout is not None:
read_process_return = read_process.stdout.readlines()
for line in read_process_return:
decoded_line = line.decode("utf-8")
if "In use system memory" in decoded_line:
in_use_re = self.regex_inuse.search(decoded_line)
if in_use_re:
in_use = float(in_use_re.group(1))
if "Device Utilization %" in decoded_line:
util_re = self.regex_util.search(decoded_line)
if util_re:
util = int(util_re.group(1)) / 1000
if util and in_use:
break
return (util, in_use)
except:
pass
return (0.0, 0.0)
| 1,969 | 35.481481 | 98 | py |
scalene | scalene-master/scalene/scalene_sigqueue.py | import queue
import threading
from typing import Any, Generic, Optional, TypeVar
T = TypeVar("T")
class ScaleneSigQueue(Generic[T]):
def __init__(self, process: Any) -> None:
self.queue: queue.SimpleQueue[Optional[T]] = queue.SimpleQueue()
self.process = process
self.thread: Optional[threading.Thread] = None
self.lock = threading.RLock() # held while processing an item
def put(self, item: Optional[T]) -> None:
self.queue.put(item)
def get(self) -> Optional[T]:
return self.queue.get()
def start(self) -> None:
# We use a daemon thread to defensively avoid hanging if we never join with it
if not self.thread:
self.thread = threading.Thread(target=self.run, daemon=True)
self.thread.start()
def stop(self) -> None:
if self.thread:
self.queue.put(None)
# We need to join all threads before a fork() to avoid an inconsistent
# state, locked mutexes, etc.
self.thread.join()
self.thread = None
def run(self) -> None:
while True:
item = self.queue.get()
if item is None: # None => stop request
break
with self.lock:
self.process(*item)
| 1,301 | 30 | 86 | py |
scalene | scalene-master/scalene/old/leak_analysis.py | import math
from typing import Any, List, Tuple
import numpy as np
from numpy.random import default_rng
rng = default_rng()
def zlog(x: float) -> float:
"""Redefine log so that if x is <= 0, log x is 0."""
if x <= 0:
return 0
else:
return math.log(x)
def xform(i: float, n: int) -> float:
assert n > 0
return i / n * zlog(i / n)
import operator as op
from functools import reduce
def ncr(n: int, r: int) -> int:
r = min(r, n - r)
numerator = reduce(op.mul, range(n, n - r, -1), 1)
denominator = reduce(op.mul, range(1, r + 1), 1)
return numerator // denominator # or / in Python 2
def choose(n: int, k: int) -> int:
"""
A fast way to calculate binomial coefficients by Andrew Dalke (contrib).
"""
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in range(1, min(k, n - k) + 1):
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0
def approx_binomial(total: int, observed: int, success: float) -> float:
n = total
p = success
q = 1 - success
k = observed
return (
1
/ math.sqrt(2 * math.pi * n * p * q)
* math.exp(-((k - n * p) ** 2) / (2 * n * p * q))
)
def exact_binomial(total: int, observed: int, success: float) -> float:
c = choose(total, observed)
return (
c
* (success**observed) # pow(success, observed)
* (1.0 - success)
** (total - observed) # pow(1.0 - success, total - observed)
)
def binomial(total: int, observed: int, success: float) -> float:
if total * success > 100 and total * (1.0 - success) > 100:
return approx_binomial(total, observed, success)
else:
return exact_binomial(total, observed, success)
def one_sided_binomial_test_ge(
total: int, observed: int, success: float
) -> float:
return sum(binomial(total, o, success) for o in range(observed, total + 1))
def one_sided_binomial_test_lt(
total: int, observed: int, success: float
) -> float:
return 1.0 - one_sided_binomial_test_ge(total, observed, success)
def normalized_entropy(v: List[Any]) -> float:
"""Returns a value between 0 (all mass concentrated in one item) and 1 (uniformly spread)."""
assert len(v) > 0
if len(v) == 1:
return 1
n = int(np.nansum(v))
assert n > 0
h = -sum([xform(i, n) for i in v])
return h / math.log(len(v))
def multinomial_pvalue(vec: List[Any], trials: int = 2000) -> float:
"""Returns the empirical likelihood (via Monte Carlo trials) of randomly finding a vector with as low entropy as this one."""
n = np.nansum(vec)
newvec = list(filter(lambda x: not np.isnan(x), vec))
m = len(newvec)
ne = normalized_entropy(newvec)
sampled_vec = rng.multinomial(n, [1 / m for i in range(m)], trials)
# Return the fraction of times the sampled vector has no more entropy than the original vector
return sum(normalized_entropy(v) <= ne for v in sampled_vec) / trials
def argmax(vec: List[Any]) -> int:
"""Return the (first) index with the maximum value."""
m = np.nanmax(vec)
for (index, value) in enumerate(vec):
if value == m:
return index
return 0 # never reached
def harmonic_number(n: int) -> float:
"""Returns an approximate value of n-th harmonic number.
http://en.wikipedia.org/wiki/Harmonic_number
"""
if n < 100:
return sum(1 / d for d in range(2, n + 1))
# Euler-Mascheroni constant
gamma = 0.57721566490153286060651209008240243104215933593992
return (
gamma
+ math.log(n)
+ 0.5 / n
- 1.0 / (12 * n**2)
+ 1.0 / (120 * n**4)
)
def outliers(
vec: List[Any], alpha: float = 0.01, trials: int = 3000
) -> List[Tuple[int, float]]:
"""Returns the indices with values that are significant outliers, with their p-values"""
m = len(vec)
if m == 0:
return []
removed = 0
results = []
# pv = multinomial_pvalue(vec, trials)
# Hack: for now, set pv to alpha because computing exact multinomial p-values is too expensive
pv = alpha
# We use the Benjamin-Yekutieli procedure to control false-discovery rate.
# See https://en.wikipedia.org/wiki/False_discovery_rate#Benjamini%E2%80%93Yekutieli_procedure
c_m = harmonic_number(m)
if pv <= alpha:
while removed < m:
# While we remain below the threshold, remove (zero-out by
# setting to NaN) the max and add its index to the list of
# results with its p-value.
max_index = argmax(vec)
# See how unlikely this bin is to have occurred at random,
# assuming a uniform distribution into bins.
this_pvalue = one_sided_binomial_test_ge(
int(np.nansum(vec)), vec[max_index], 1 / (m - removed)
)
# print("max_index = ", max_index, "p-value = ", this_pvalue)
if this_pvalue <= (alpha * (removed + 1) / (m * c_m)):
results.append((max_index, this_pvalue))
vec[max_index] = np.nan
removed += 1
else:
break
return results
if __name__ == "__main__":
# Run a simple test.
print(outliers([1000, 8, 8, 1, 0], alpha=0.01, trials=10000))
print(outliers([8, 8, 1, 0], alpha=0.01, trials=10000))
print(outliers([8, 1, 0], alpha=0.01, trials=10000))
print(outliers([1, 0], alpha=0.01, trials=10000))
| 5,533 | 29.240437 | 129 | py |
WasabiDataset | WasabiDataset-master/preprocessing.py | from time import time
import spacy as spacy
from gensim.models.phrases import Phraser, Phrases
from gensim.utils import simple_preprocess
def flatten_list(lst):
return [item for sublist in lst for item in sublist]
def lemmatization(spacy_nlp, texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
texts_out = []
for sent in texts:
doc = spacy_nlp(" ".join(sent))
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
return texts_out
def complex_preprocess(corpus):
t = time()
unigrams = list(map(lambda text: simple_preprocess(text, min_len=1, max_len=100), corpus))
print('Extracted', len(set(flatten_list(unigrams))), 'unigrams:', time() - t, '\t', unigrams[0][:10])
bigram_model = Phraser(Phrases(unigrams))
unigrams_bigrams = [bigram_model[text] for text in unigrams]
del unigrams
print('Extracted', len(set(flatten_list(unigrams_bigrams))), 'uni/bigrams:', time() - t, '\t', [b for b in unigrams_bigrams[0] if '_' in b][:10])
spacy_nlp = spacy.load("en_core_web_sm", disable=['parser', 'ner'])
lemmatized_tokens = lemmatization(spacy_nlp, unigrams_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
del spacy_nlp
del unigrams_bigrams
print('Extracted', len(set(flatten_list(lemmatized_tokens))), 'lemmas:', time() - t, '\t', lemmatized_tokens[0])
return lemmatized_tokens | 1,405 | 41.606061 | 149 | py |
muTable | muTable-main/src/main.py | import multiprocessing
from multiprocessing.managers import BaseManager
from mock_tap_receiver import start_tap_receiving
from calibration import ArucoBasedCalibration
from depth_calibration import DepthCalibration
from hand_location_detector import start_hand_tracking
from event_manager import start_receving_tap_events_with_location
from instruments.drums.drums import start_playing_drums, Drums
from utils import get_aruco_image
from projection import Projection, start_projecting
from ble_tap_receiver import left_tap_receiver, right_tap_receiver
import cv2
if __name__ == "__main__":
width = 1920
height = 1080
space_for_ui = 0.15
is_debug_mode = False
# creating a tap pipe
tap_sender_conn, tap_receiver_conn = multiprocessing.Pipe()
sound_signal_sender_conn, sound_signal_receiver_conn = multiprocessing.Pipe()
tap_location_sender_conn, tap_location_receiver_conn = multiprocessing.Pipe()
# Do calibration step
aruco_image, aruco_dict = get_aruco_image(height, height)
if is_debug_mode:
cv2.imwrite("markers.jpg", aruco_image)
drums = Drums(width, height, space_for_ui)
drum_with_ui_image = drums.get_full_image_with_ui()
if is_debug_mode:
cv2.imwrite("./drums.jpg", drum_with_ui_image)
BaseManager.register('ProjectionData', Projection)
manager = BaseManager()
manager.start()
projectionData = manager.ProjectionData(aruco_image, "L")
projectionProcess = multiprocessing.Process(target=start_projecting, args=[projectionData])
projectionProcess.start()
calibration = ArucoBasedCalibration(aruco_image, aruco_dict)
calibration_matrix = calibration.start_calibrating()
calibration.release_resources()
if calibration_matrix is None:
print("Couldn't Perform Calibration")
exit()
print("Calibration Matrix = ", calibration_matrix)
projectionData.update_pic(drum_with_ui_image, "RGB")
depthCalibration = DepthCalibration()
surface_depth = depthCalibration.start_calibrating()
depthCalibration.release_resources()
print("Surface Depth = ", surface_depth)
# create tap detector object
# leftTapDetectorProcess = multiprocessing.Process(target=start_tap_receiving, args=(tap_sender_conn,))
leftTapDetectorProcess = multiprocessing.Process(target=left_tap_receiver, args=(tap_sender_conn,))
rightTapDetectorProcess = multiprocessing.Process(target=right_tap_receiver, args=(tap_sender_conn,))
# create hand location detector
handLocationDetectionProcess = multiprocessing.Process(target=start_hand_tracking, args=(
calibration_matrix, surface_depth, tap_receiver_conn, tap_location_sender_conn))
# start receiving tap events with location
tapLocationProcess = multiprocessing.Process(target=start_receving_tap_events_with_location, args=(
width, height, space_for_ui, tap_location_receiver_conn, sound_signal_sender_conn, projectionData))
# create musical instrument
instrumentProcess = multiprocessing.Process(target=start_playing_drums,
args=(width, height, sound_signal_receiver_conn, projectionData))
# running processes
leftTapDetectorProcess.start()
rightTapDetectorProcess.start()
handLocationDetectionProcess.start()
tapLocationProcess.start()
instrumentProcess.start()
# wait until processes finish
leftTapDetectorProcess.join()
rightTapDetectorProcess.join()
handLocationDetectionProcess.join()
tapLocationProcess.join()
instrumentProcess.join()
projectionProcess.terminate()
| 3,613 | 37.446809 | 113 | py |
muTable | muTable-main/src/sound_event.py | from dataclasses import dataclass
@dataclass
class SoundEvent:
"""Data Class for Sound Event Object"""
intensity: float
locationX: float
locationY: float
@dataclass
class TapLocationEvent:
"""Data Class for Sound Event Object"""
intensity: float
locationX: float
locationY: float
| 315 | 17.588235 | 43 | py |
muTable | muTable-main/src/camera.py | import pyrealsense2 as rs
import numpy as np
import time
import multiprocessing
class Camera:
def __init__(self):
self.pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
# Start streaming
profile = self.pipeline.start(config)
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = profile.get_device().first_depth_sensor()
self.depth_scale = depth_sensor.get_depth_scale()
print("Depth Scale = ", self.depth_scale)
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
self.align = rs.align(align_to)
def get_current_image(self):
print("Fetch an Image")
try:
frames = self.pipeline.wait_for_frames()
aligned_frames = self.align.process(frames)
color_frame = aligned_frames.get_color_frame()
color_image = np.asanyarray(color_frame.get_data())
return color_image
except Exception as e:
raise e
def get_current_depth_image(self):
try:
frames = self.pipeline.wait_for_frames()
aligned_frames = self.align.process(frames)
depth_frame = aligned_frames.get_depth_frame()
depth_image = np.asanyarray(depth_frame.get_data())
return depth_image
except Exception as e:
raise e
def get_color_and_depth_image(self):
try:
frames = self.pipeline.wait_for_frames()
aligned_frames = self.align.process(frames)
color_frame = aligned_frames.get_color_frame()
color_image = np.asanyarray(color_frame.get_data())
depth_frame = aligned_frames.get_depth_frame()
depth_image = np.asanyarray(depth_frame.get_data())
return color_image, depth_image
except Exception as e:
raise e
def stop_pipeline(self):
self.pipeline.stop()
def acquire_images():
camera = Camera()
while 1:
img = camera.get_current_image()
time.sleep(1)
if __name__ == '__main__':
p = multiprocessing.Process(target=acquire_images, args=())
p.start()
while 1:
time.sleep(1)
| 2,537 | 31.961039 | 87 | py |
muTable | muTable-main/src/ble_tap_receiver.py | import asyncio
from typing import Any
from tap import *
import multiprocessing
from bleak import BleakClient, discover
import struct
class Connection:
client: BleakClient = None
def __init__(
self,
loop: asyncio.AbstractEventLoop,
read_characteristic: str,
tap_sender_conn,
arduino_mac: str,
hand
):
self.loop = loop
self.read_characteristic = read_characteristic
self.connected = False
self.connected_device = None
self.tap_sender_conn = tap_sender_conn
self.arduino_mac = arduino_mac
self.hand = hand
def on_disconnect(self, client: BleakClient):
self.connected = False
# Put code here to handle what happens on disconnet.
print(f"Disconnected from {self.connected_device}!")
async def cleanup(self):
if self.client:
await self.client.stop_notify(self.read_characteristic)
await self.client.disconnect()
async def manager(self):
print("Starting connection manager.")
while True:
if self.client:
await self.connect()
else:
await self.select_device()
await asyncio.sleep(15.0, loop=self.loop)
async def connect(self):
if self.connected:
return
try:
await self.client.connect()
self.connected = await self.client.is_connected()
if self.connected:
print(F"Connected to {self.connected_device}")
self.client.set_disconnected_callback(self.on_disconnect)
await self.client.start_notify(
self.read_characteristic, self.notification_handler,
)
while True:
if not self.connected:
break
await asyncio.sleep(3.0, loop=self.loop)
else:
print(f"Failed to connect to {self.connected_device}")
except Exception as e:
print(e)
async def select_device(self):
print("Bluetooh LE hardware warming up...")
await asyncio.sleep(2.0, loop=self.loop) # Wait for BLE to initialize.
devices = await discover()
discovered_devices_macs = []
for i, device in enumerate(devices):
discovered_devices_macs.append(device.address)
# print(f"{i}: {device.address}")
if str.upper(self.arduino_mac) not in discovered_devices_macs:
print("Device Not Found")
return
print(f"Connecting to {self.arduino_mac}")
self.connected_device = self.arduino_mac
self.client = BleakClient(self.arduino_mac, loop=self.loop)
def notification_handler(self, sender: str, data: Any):
tap_detected = struct.unpack('<f', data)
print("Data from - ", self.hand, " is = ", tap_detected)
self.tap_sender_conn.send(Tap(self.hand, tap_detected[0], True))
def left_tap_receiver(tap_sender_conn):
read_characteristic = "C8F88594-2217-0CA6-8F06-A4270B675D68"
left_arduino_mac = "5d:c8:38:fd:3c:2f"
# Create the event loop.
loop = asyncio.get_event_loop()
connection = Connection(
loop, read_characteristic, tap_sender_conn, left_arduino_mac, Hand.LEFT
)
try:
asyncio.ensure_future(connection.manager())
loop.run_forever()
except KeyboardInterrupt:
print("User stopped program.")
finally:
print("Disconnecting...")
loop.run_until_complete(connection.cleanup())
def right_tap_receiver(tap_sender_conn):
read_characteristic = "C8F88594-2217-0CA6-8F06-A4270B675D68"
right_arduino_mac = "24:55:06:86:c2:a3"
# Create the event loop.
loop = asyncio.get_event_loop()
connection = Connection(
loop, read_characteristic, tap_sender_conn, right_arduino_mac, Hand.RIGHT
)
try:
asyncio.ensure_future(connection.manager())
loop.run_forever()
except KeyboardInterrupt:
print("User stopped program.")
finally:
print("Disconnecting...")
loop.run_until_complete(connection.cleanup())
if __name__ == '__main__':
tap_sender_conn, tap_receiver_conn = multiprocessing.Pipe()
leftTapDetectorProcess = multiprocessing.Process(target=left_tap_receiver, args=(tap_sender_conn,))
rightTapDetectorProcess = multiprocessing.Process(target=right_tap_receiver, args=(tap_sender_conn,))
leftTapDetectorProcess.start()
rightTapDetectorProcess.start()
leftTapDetectorProcess.join()
rightTapDetectorProcess.join()
| 4,658 | 32.76087 | 105 | py |
muTable | muTable-main/src/event_manager.py | from sound_event import SoundEvent
from instruments.drums.drums import Drums
import time
def play_predefined_sound(width, height, projectionData):
drums = Drums(width, height)
pieces = drums.pieces
highlighed_images = drums.get_highlighted_images()
time_delay = 0.35
intensity = 1.8
for num_reps in range(1):
for i in range(2):
drums.play_sound_from_point(SoundEvent(intensity, pieces[2].shape.center[0], pieces[2].shape.center[1]))
projectionData.update_pic(highlighed_images["Piece3"], "RGB")
time.sleep(time_delay)
drums.play_sound_from_point(SoundEvent(intensity, pieces[0].shape.center[0], pieces[0].shape.center[1]))
projectionData.update_pic(highlighed_images["Piece1"], "RGB")
time.sleep(time_delay)
for i in range(2):
drums.play_sound_from_point(SoundEvent(intensity, pieces[1].shape.center[0], pieces[1].shape.center[1]))
projectionData.update_pic(highlighed_images["Piece2"], "RGB")
time.sleep(time_delay)
drums.play_sound_from_point(SoundEvent(intensity, pieces[0].shape.center[0], pieces[0].shape.center[1]))
projectionData.update_pic(highlighed_images["Piece1"], "RGB")
time.sleep(time_delay)
projectionData.update_pic(drums.get_full_image_with_ui(), "RGB")
def start_receving_tap_events_with_location(width, height, space_for_ui, tap_location_receiver_conn,
sound_signal_sender_conn, projectionData):
while 1:
tap_location_event = tap_location_receiver_conn.recv()
if tap_location_event.locationX > (1 - space_for_ui) * width:
play_predefined_sound(width, height, projectionData)
print("UI Event Detected")
else:
print("Instrument Event Detected")
sound_signal_sender_conn.send(
SoundEvent(tap_location_event.intensity, tap_location_event.locationX, tap_location_event.locationY))
| 2,026 | 46.139535 | 117 | py |
muTable | muTable-main/src/utils.py | import numpy as np
from cv2 import aruco
def get_aruco_image(width, height):
image_size = (height, width)
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
aruco_image = np.ones(image_size, dtype=np.uint8) * 255
n = 9
for i in range(0, n):
row = i // 3
col = i % 3
img = aruco.drawMarker(aruco_dict, i + 1, int(0.3 * width))
x = int((0.025 * (col + 1) + 0.3 * col) * width)
y = int((0.025 * (row + 1) + 0.3 * row) * height)
aruco_image[y:y + img.shape[0], x:x + img.shape[1]] = img
return aruco_image, aruco_dict | 591 | 30.157895 | 67 | py |
muTable | muTable-main/src/depth_calibration.py | import numpy as np
from camera import Camera
class DepthCalibration:
def __init__(self):
# TODO: Camera object should be global
self.camera = Camera()
def start_calibrating(self, max_tries=60):
try_count = 0
sum_projection_depths = 0.0
while try_count < max_tries:
depth_image = self.camera.get_current_depth_image()
dmap = depth_image.reshape(-1)
unique, counts = np.unique(dmap[dmap > 1], return_counts=True)
projection_depth = unique[np.argmax(counts)] * self.camera.depth_scale
sum_projection_depths += projection_depth
try_count += 1
return sum_projection_depths/try_count
def release_resources(self):
self.camera.stop_pipeline()
| 780 | 30.24 | 82 | py |
muTable | muTable-main/src/calibration.py | import numpy as np
import cv2
from cv2 import aruco
import time
from camera import Camera
class ArucoBasedCalibration:
def __init__(self, aruco_image, aruco_dict):
# TODO: Camera object should be global
self.camera = Camera()
self.aruco_dict = aruco_dict
self.count_threshold = 6
self.parameters = aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(aruco_image, self.aruco_dict, parameters=self.parameters)
self.image_plane_coordinates = {}
for i in range(ids.shape[0]):
self.image_plane_coordinates[ids[i][0]] = corners[i][0]
print("Image Plane Coordinates - ", self.image_plane_coordinates)
def start_calibrating(self, max_tries=15):
try_count = 0
while try_count < max_tries:
curr_image = self.camera.get_current_image()
corners, ids, rejectedImgPoints = aruco.detectMarkers(curr_image, self.aruco_dict,
parameters=self.parameters)
projected_world_coordinates = np.zeros((0, 2))
image_world_coordinates = np.zeros((0, 2))
if ids is not None and ids.shape[0] >= self.count_threshold:
print(ids)
for i in range(ids.shape[0]):
image_world_coordinates = np.vstack([image_world_coordinates, self.image_plane_coordinates[ids[i][0]]])
projected_world_coordinates = np.vstack([projected_world_coordinates, corners[i][0]])
homo_matrix, _ = cv2.findHomography(projected_world_coordinates, image_world_coordinates)
return homo_matrix
time.sleep(0.5)
try_count += 1
print("Calibration Failed")
return None
def release_resources(self):
self.camera.stop_pipeline()
| 1,889 | 39.212766 | 123 | py |
muTable | muTable-main/src/mock_tap_receiver.py | import time
from tap import *
class MockTapReceiver:
def __init__(self, tap_sender_pipe_connection):
print("Initialize Bluetooth and all")
self.tap_sender_pipe_connection = tap_sender_pipe_connection
def start_receiving(self):
print("Start Receiving Tap Events")
while 1:
print("Sending Tap Event")
self.tap_sender_pipe_connection.send(Tap(Hand.LEFT, 1.4, checkHandLocation=True))
time.sleep(0.5)
def start_tap_receiving(tap_sender_conn):
tapDetector = MockTapReceiver(tap_sender_conn)
tapDetector.start_receiving()
| 606 | 26.590909 | 93 | py |
muTable | muTable-main/src/projection.py | import pyglet
from pyglet.canvas import Display
import cv2
class Projection:
def __init__(self, img, channels="RGBA"):
self.pic = pyglet.image.ImageData(img.shape[1], img.shape[0], channels, img.tobytes(),
-1 * img.shape[1] * len(channels))
def update_pic(self, img, channels="RGBA"):
self.pic = pyglet.image.ImageData(img.shape[1], img.shape[0], channels, img.tobytes(),
-1 * img.shape[1] * len(channels))
def get_pic(self):
return self.pic
def start_projecting(projection_data):
screens = Display().get_screens()
window = pyglet.window.Window(fullscreen=True, screen=screens[1])
def update(dt):
pass
@window.event
def on_draw():
window.clear()
projection_data.get_pic().blit(0, 0)
pyglet.clock.schedule_interval(update, 1 / 30.0)
pyglet.app.run()
| 931 | 26.411765 | 94 | py |
muTable | muTable-main/src/hand_location_detector.py | import time
import pyrealsense2 as rs
import numpy as np
import cv2
import mediapipe as mp
from sound_event import TapLocationEvent
from camera import Camera
from tap import *
class HandLocationDetector:
def __init__(self, calibratrion_matrix, surface_depth, tap_receiver_conn, tap_location_sender_conn):
self.mp_hands = mp.solutions.hands
self.hands = self.mp_hands.Hands(model_complexity=0, min_detection_confidence=0.5, min_tracking_confidence=0.5)
self.tap_receiver_conn = tap_receiver_conn
self.tap_location_sender_conn = tap_location_sender_conn
self.calibration_matrix = calibratrion_matrix
self.surface_depth = surface_depth
# TODO : Camera object should be shared as it is used by multiple modules
self.camera = Camera()
self.left_hand_location = None
self.right_hand_location = None
def start_hand_tracking(self):
while 1:
detected_tap = self.tap_receiver_conn.recv()
already_set_value = False
if detected_tap.hand == Hand.LEFT and self.left_hand_location is not None:
already_set_value = True
if detected_tap.hand == Hand.RIGHT and self.right_hand_location is not None:
already_set_value = True
if detected_tap.checkHandLocation or not already_set_value:
print("Received the message: {}".format(detected_tap))
start_time_hand = time.time()
curr_image, curr_depth_image = self.camera.get_color_and_depth_image()
curr_image.flags.writeable = False
curr_image = cv2.cvtColor(curr_image, cv2.COLOR_BGR2RGB)
results = self.hands.process(curr_image)
curr_image.flags.writeable = True
handedness = []
if results.multi_handedness:
handedness = [hdnss.classification[0].label for hdnss in results.multi_handedness]
print(handedness)
index_found = -1
if detected_tap.hand == Hand.LEFT and "Right" in handedness:
index_found = handedness.index("Right")
elif detected_tap.hand == Hand.RIGHT and "Left" in handedness:
index_found = handedness.index("Left")
if results.multi_hand_landmarks and index_found != -1:
# for hand_landmarks in results.multi_hand_landmarks:
hand_landmarks = results.multi_hand_landmarks[index_found]
hand_x = int(hand_landmarks.landmark[self.mp_hands.HandLandmark.MIDDLE_FINGER_TIP].x * 640)
hand_y = int(hand_landmarks.landmark[self.mp_hands.HandLandmark.MIDDLE_FINGER_TIP].y * 480)
hand_coordinates = np.dot(self.calibration_matrix, np.array([hand_x, hand_y, 1]))
hand_coordinates = hand_coordinates / hand_coordinates[2]
# print("Hand Coordinates - ", hand_coordinates)
min_hand_x = 640
max_hand_x = 0
min_hand_y = 480
max_hand_y = 0
sum_depth_of_landmarks = 0
count_landmarks = 0
depth_hand = 0.0
for landmark in hand_landmarks.landmark:
x = min(int(landmark.x * 640), 639)
y = min(int(landmark.y * 480), 479)
min_hand_x = min(x, min_hand_x)
max_hand_x = max(x, max_hand_x)
min_hand_y = min(y, min_hand_y)
max_hand_y = max(y, max_hand_y)
if curr_depth_image[y, x] > 0:
sum_depth_of_landmarks += curr_depth_image[y, x]
count_landmarks += 1
if count_landmarks > 0:
depth_hand = sum_depth_of_landmarks * self.camera.depth_scale / count_landmarks
# print("Depth of Hand - ", depth_hand)
if detected_tap.hand == Hand.LEFT:
self.left_hand_location = (hand_coordinates[0], hand_coordinates[1])
else:
self.right_hand_location = (hand_coordinates[0], hand_coordinates[1])
if abs(depth_hand - self.surface_depth) < 0.1:
end_time_hand = time.time()
print("Time Taken in ", end_time_hand - start_time_hand)
self.tap_location_sender_conn.send(TapLocationEvent(detected_tap.intensity, hand_coordinates[0], hand_coordinates[1]))
print("Sound Produced Sent!")
else:
if detected_tap.hand == Hand.LEFT:
self.tap_location_sender_conn.send(TapLocationEvent(detected_tap.intensity, self.left_hand_location[0], self.left_hand_location[1]))
else:
self.tap_location_sender_conn.send(TapLocationEvent(detected_tap.intensity, self.right_hand_location[0], self.right_hand_location[1]))
def start_hand_tracking(calibration_matrix, surface_depth, tap_receiver_conn, tap_location_sender_conn):
handLocationDetector = HandLocationDetector(calibration_matrix, surface_depth, tap_receiver_conn, tap_location_sender_conn)
handLocationDetector.start_hand_tracking()
| 5,435 | 51.269231 | 154 | py |
muTable | muTable-main/src/tap.py | from dataclasses import dataclass
from enum import Enum
class Hand(Enum):
LEFT = 1
RIGHT = 2
@dataclass
class Tap:
"""Data Class for Tap Object"""
hand: Hand
intensity: float
checkHandLocation: bool
| 227 | 13.25 | 35 | py |
muTable | muTable-main/src/__init__.py | 0 | 0 | 0 | py | |
muTable | muTable-main/src/instruments/circle.py | import math
class Circle:
def __init__(self, center=(0, 0), radius=10):
self.center = center
self.radius = radius
def is_point_inside(self, point):
assert len(point) == 2
distance_from_center = math.sqrt(
math.pow(point[0] - self.center[0], 2) + math.pow(point[1] - self.center[1], 2))
return distance_from_center < self.radius
| 392 | 25.2 | 92 | py |
muTable | muTable-main/src/instruments/rectangle.py | import math
class Rectangle:
def __init__(self, topLeft=(0, 0), bottomRight=(0, 0)):
self.topLeft = topLeft
self.bottomRight = bottomRight
def is_point_inside(self, point):
assert len(point) == 2
return (point[0] > self.topLeft[0]) and (point[0] < self.bottomRight[0]) and (point[1] > self.topLeft[1]) and (point[1] < self.bottomRight[1]) | 382 | 30.916667 | 150 | py |
muTable | muTable-main/src/instruments/__init__.py | 0 | 0 | 0 | py | |
muTable | muTable-main/src/instruments/concentric_circle.py | import math
class ConcentricCircle:
def __init__(self, center=(0, 0), radius=10, thickness=10):
self.center = center
self.radius1 = radius
self.radius2 = radius + thickness
self.thickness = thickness
def is_point_inside(self, point):
assert len(point) == 2
distance_from_center = math.sqrt(
math.pow(point[0] - self.center[0], 2) + math.pow(point[1] - self.center[1], 2))
return (distance_from_center > self.radius1) and (distance_from_center < self.radius2)
| 539 | 30.764706 | 94 | py |
muTable | muTable-main/src/instruments/drums/ui.py | import cv2
import numpy as np
from instruments.rectangle import Rectangle
class UI:
def __init__(self, width=1920, height=1080, space_for_ui=0.15):
self.height = height
self.width = width
self.space_for_ui = space_for_ui
self.pieces = self.get_ui_pieces(width, height, space_for_ui)
def get_ui_pieces(self, width, height, space_for_ui):
learnTopLeft = (int(0.86 * width), int(0.3 * height))
learnBottomRight = (int((1 - 0.025) * width), int(0.42 * height))
textCoordinates = (int(0.868 * width), int(0.38 * height))
return [("LEARN", Rectangle(learnTopLeft, learnBottomRight), textCoordinates)]
def get_ui_image(self):
image_size = (self.height, self.width, 3)
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 2
color = (1, 1, 1)
thickness = 5
yellow_color = (255, 204, 153)
ui = np.zeros(shape=image_size, dtype=np.uint8)
for piece in self.pieces:
ui = cv2.rectangle(ui, piece[1].topLeft, piece[1].bottomRight, yellow_color, -1)
ui = cv2.putText(ui, piece[0], piece[2], font, fontScale, color, thickness, cv2.LINE_AA)
ui[piece[1].topLeft[1]:piece[1].bottomRight[1], piece[1].topLeft[0]:piece[1].bottomRight[0], :] = cv2.rotate(ui[piece[1].topLeft[1]:piece[1].bottomRight[1], piece[1].topLeft[0]:piece[1].bottomRight[0], :], cv2.ROTATE_180)
return ui
| 1,436 | 41.264706 | 233 | py |
muTable | muTable-main/src/instruments/drums/drums.py | import cv2
import numpy as np
from dataclasses import dataclass
from ..circle import Circle
from sound_event import SoundEvent
import soundfile as sf
from .ui import UI
import pathlib
import time
from _thread import *
def update_pair_pics(projectorData, firstPic, secondPic):
projectorData.update_pic(firstPic, "RGB")
time.sleep(0.5)
projectorData.update_pic(secondPic, "RGB")
@dataclass
class Piece:
"""Data Class for Sound Event Object"""
name: str
shape: Circle
sound: tuple
class Drums:
def __init__(self, width=1920, height=1080, space_for_ui=0.15):
self.height = height
self.width = width
self.space_for_ui = space_for_ui
self.pieces = self.get_drum_pieces(width, height, space_for_ui)
ui = UI(width, height, space_for_ui)
self.ui_image = ui.get_ui_image()
self.highlighted_images_with_ui = self.get_highlighted_images()
self.full_image_with_ui = self.get_full_image_with_ui()
def get_drum_pieces(self, width, height, space_for_ui):
piece_widths = np.array([0.3, 0.23, 0.43]) * (1 - space_for_ui)
angle = np.pi * 0.25
piece2_d = 0.03 + piece_widths[2] / 2 + piece_widths[2] / 2
piece1_x = 0.025 + piece_widths[0] / 2
piece3_x = 1 - space_for_ui - 0.025 - piece_widths[2] / 2
piece2_x = piece3_x - np.cos(angle) * piece2_d
piece1_y = 0.45
piece3_y = 0.45
piece2_y = piece3_y + np.sin(angle) * piece2_d
piece_x_coords = np.array([piece1_x, piece2_x, piece3_x]) * width
piece_y_coords = np.array([piece1_y, piece2_y, piece3_y]) * height
piece_radius = np.array(piece_widths) * width / 2
sound_path = f"{pathlib.Path(__file__).parent.resolve()}/sound_data"
print(sound_path)
pieces = [Piece(f"Piece{i + 1}",
Circle((int(piece_x_coords[i]), int(piece_y_coords[i])), int(piece_radius[i])),
sf.read(f"{sound_path}/{i + 1}.wav", dtype='float32'))
for i in range(3)]
return pieces
def get_image(self):
image_size = (self.height, self.width, 3)
yellow_color = (255, 204, 153)
dark_yellow_color = (255, 153, 51)
drums = np.zeros(shape=image_size, dtype=np.uint8)
for piece in self.pieces:
drums = cv2.circle(drums, piece.shape.center, piece.shape.radius, yellow_color, -1)
drums = cv2.circle(drums, piece.shape.center, int(piece.shape.radius * 2 / 5), (0, 0, 0), -1)
drums = cv2.circle(drums, piece.shape.center, piece.shape.radius, dark_yellow_color,
int(0.025 * self.width))
return drums
def get_full_image_with_ui(self):
return self.ui_image + self.get_image()
def get_highlighted_images(self, ui_image=None):
base_drum_image = self.get_image()
image_size = (self.height, self.width, 3)
highlighted_images = {}
for piece in self.pieces:
drums_highlighted = np.zeros(shape=image_size, dtype=np.uint8)
drums_highlighted = cv2.circle(drums_highlighted, piece.shape.center, piece.shape.radius, (255, 255, 255), -1)
highlighted_piece = cv2.addWeighted(base_drum_image, 0.5, drums_highlighted, 0.5, 1.0)
highlighted_images[piece.name] = highlighted_piece
if ui_image is not None:
highlighted_images[piece.name] += ui_image
return highlighted_images
def play_sound_from_point(self, sound_event, projectionData=None):
import sounddevice as sd
for piece in self.pieces:
if piece.shape.is_point_inside((sound_event.locationX, sound_event.locationY)):
sound_to_played = piece.sound[0]
if abs(sound_event.intensity) < 1.2:
sound_to_played = sound_to_played*0.2
# play_till = {"Piece1": 40000, "Piece2": 8000, "Piece3": 40000}
# sd.play(piece.sound[0][:play_till[piece.name]], piece.sound[1])
sd.play(sound_to_played, piece.sound[1])
if projectionData is not None:
update_pair_pics(projectionData, self.highlighted_images_with_ui[piece.name], self.full_image_with_ui)
def start_playing_drums(width, height, sound_signal_receiver_conn, projectionData):
drums = Drums(width, height)
while 1:
print("Playing drums")
sound_event = sound_signal_receiver_conn.recv()
print("Produce Sound = ", sound_event)
drums.play_sound_from_point(sound_event, projectionData)
def start_playing_dummy_drums(width, height, sound_signal_receiver_conn):
drums = Drums(width, height)
pieces = drums.pieces
while 1:
for i in range(2):
drums.play_sound_from_point(SoundEvent(0.1, pieces[2].shape.center[0], pieces[2].shape.center[1]))
time.sleep(0.3)
drums.play_sound_from_point(SoundEvent(0.1, pieces[0].shape.center[0], pieces[0].shape.center[1]))
time.sleep(0.3)
for i in range(2):
drums.play_sound_from_point(SoundEvent(0.1, pieces[1].shape.center[0], pieces[1].shape.center[1]))
time.sleep(0.3)
drums.play_sound_from_point(SoundEvent(0.1, pieces[0].shape.center[0], pieces[0].shape.center[1]))
time.sleep(0.3)
| 5,373 | 40.022901 | 122 | py |
muTable | muTable-main/src/instruments/drums/__init__.py | 0 | 0 | 0 | py | |
muTable | muTable-main/experiments/BLEPython.py | import logging
import asyncio
import platform
import ast
import time
from bleak import BleakClient
from bleak import BleakScanner
from bleak import discover
import struct
import numpy as np
import scipy
from numpy import mean
BLE_UUID_TEST_SERVICE = "9A48ECBA-2E92-082F-C079-9E75AAE428B1"
BLE_UUID_AMPLITUDE = "E3ADBF53-950E-DC1D-9B44-076BE52760D6"
BLE_UUID_FLOAT_VALUE1 = "C8F88594-2217-0CA6-8F06-A4270B675D69"
BLE_UUID_FLOAT_VALUE2 = "C8F88594-2217-0CA6-8F06-A4270B675D70"
BLE_UUID_FLOAT_VALUE3 = "C8F88594-2217-0CA6-8F06-A4270B675D68"
previous_value=0.000000000
current_value=0.0000000000
tol=1e-6
def notification_handler(sender, data):
"""Simple notification handler which prints the data received."""
output_numbers = list(data)
print(output_numbers)
async def run(address):
previous_value=0.0000000000
print('ProtoStax Arduino Nano BLE LED Peripheral Central Service')
print('Looking for Arduino Nano 33 BLE Sense Peripheral Device...')
found = False
async with BleakClient(address) as client:
#await client.is_connected()
print(f'Connected to {address}')
found=True
while True:
#time.sleep(1)
#await client.start_notify(BLE_UUID_FLOAT_VALUE, notification_handler)
#val1 = await client.read_gatt_char(BLE_UUID_FLOAT_VALUE1)
#val2= await client.read_gatt_char(BLE_UUID_FLOAT_VALUE2)
val3 = await client.read_gatt_char(BLE_UUID_FLOAT_VALUE3)
z= struct.unpack('<f', val3)
current_value=round(z[0],5)
#print("current value", current_value)
#print("previous value", previous_value)
try:
if(abs(current_value-previous_value)>tol):
print("TAP !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
previous_value=current_value
except Exception as e:
value = str(e).encode()
print(value)
print("Z val", z)
if not found:
print('Could not find Arduino Nano 33 BLE Sense Peripheral')
address = "db:eb:8d:2b:72:b9"
loop = asyncio.get_event_loop()
if __name__ == "__main__":
address = "db:eb:8d:2b:72:b9"
previous_value = 0.000000000
print('address:', address)
loop.run_until_complete(run(address))
| 2,427 | 26.590909 | 97 | py |
muTable | muTable-main/experiments/simpleAudioTest.py | import numpy as np
import simpleaudio as sa
frequency = 1000 # Our played note will be 440 Hz
fs = 44100 # 44100 samples per second
seconds = 10 # Note duration of 3 seconds
# Generate array with seconds*sample_rate steps, ranging between 0 and seconds
t = np.linspace(0, seconds, seconds * fs, False)
# Generate a 440 Hz sine wave
note = np.sin(frequency * t * 2 * np.pi)
# Ensure that highest value is in 16-bit range
audio = note * (2**15 - 1) / np.max(np.abs(note))
# Convert to 16-bit data
audio = audio.astype(np.int16)
# Start playback
play_obj = sa.play_buffer(audio, 1, 2, fs)
# Wait for playback to finish before exiting
play_obj.wait_done()
| 661 | 26.583333 | 78 | py |
XFL | XFL-master/python/xfl.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import sys
import client
import scheduler_run
import trainer_run
from common.utils.logger import logger
def add_args(parser):
group = parser.add_mutually_exclusive_group()
group.add_argument("-s", "--scheduler", action="store_true",
help="run scheduler server")
group.add_argument("-a", "--assist_trainer", action="store_true",
help='run assist trainer server')
group.add_argument("-t", "--trainer", type=str, metavar="1",
default="trainer", const="trainer", nargs="?",
help='run trainer server')
group.add_argument("-c", "--client", type=str, metavar="start",
choices=["start", "stop", "status", "algo", "stage"],
help="run command line client")
parser.add_argument("--bar", action="store_true",
help="display a progress bar on scheduler")
parser.add_argument("--config_path", type=str,
default="/opt/config", metavar="/opt/config", nargs="?",
help="config file path")
return parser
def main():
parser = add_args(argparse.ArgumentParser(description="XFL - BaseBit Federated Learning"))
args = parser.parse_args()
if args.scheduler:
scheduler_run.main(args.config_path, args.bar)
elif args.client:
# client.main(args.client)
client.main(args.client, args.config_path)
elif args.assist_trainer:
# trainer_run.main("assist_trainer", "assist_trainer")
trainer_run.main("assist_trainer", "assist_trainer", config_path=args.config_path)
elif args.trainer:
# trainer_run.main("trainer", args.trainer)
trainer_run.main("trainer", args.trainer, config_path=args.config_path)
def check_version():
version = "3.9"
current_version = '.'.join([str(sys.version_info.major), str(sys.version_info.minor)])
if version != current_version:
logger.error("Python Version is not: " + version)
sys.exit(-1)
if __name__ == "__main__":
# check_version()
main()
| 2,770 | 35.460526 | 94 | py |
XFL | XFL-master/python/scheduler_run.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import datetime
import time
import traceback
from concurrent import futures
import grpc
from google.protobuf import json_format
from common.communication.gRPC.python import (control_pb2, scheduler_pb2_grpc, scheduler_pb2,
status_pb2)
from common.communication.gRPC.python.commu import Commu
from common.storage.redis.redis_conn import RedisConn
from common.utils.config_parser import replace_variable
from common.utils.grpc_channel_options import insecure_options
from common.utils.logger import logger, remove_log_handler
from service.fed_config import FedConfig
from service.fed_control import get_trainer_status, trainer_control
from service.fed_job import FedJob
from service.fed_node import FedNode
from service.scheduler import SchedulerService
def start_server(config_path, is_bar):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=insecure_options)
scheduler_pb2_grpc.add_SchedulerServicer_to_server(SchedulerService(is_bar), server)
FedNode.add_server(server)
server.start()
logger.info("Scheduler Service Start...")
logger.info(f"[::]:{FedNode.listening_port}")
while True:
time.sleep(1)
try:
if FedJob.status == status_pb2.TRAINING:
start_time = datetime.datetime.now()
RedisConn.set("XFL_JOB_START_TIME_"+str(FedJob.job_id), str(int(time.time())))
FedConfig.load_config(config_path)
trainer_config = copy.deepcopy(FedConfig.trainer_config)
for stage in trainer_config:
for node_id in trainer_config[stage]:
trainer_config[stage][node_id] = \
replace_variable(trainer_config[stage][node_id], stage_id=stage, job_id=FedJob.job_id, node_id=node_id)
FedConfig.converted_trainer_config = trainer_config
FedJob.init_progress(len(FedConfig.trainer_config))
for stage in range(FedJob.total_stage_num):
logger.info(f"Stage {stage} Start...")
FedJob.current_stage = stage
###
stage_response = scheduler_pb2.GetStageResponse()
try:
stage_config = FedConfig.trainer_config[FedJob.current_stage]
if len(stage_config) < 1:
stage_response.code = 1
stage_name = ""
else:
# response.code = 0
stage_config = list(stage_config.values())[0]
stage_name = stage_config.get("model_info", {}).get("name", "")
except IndexError:
stage_response.code = 2
stage_name = ""
stage_response.currentStageId = FedJob.current_stage
stage_response.totalStageNum = FedJob.total_stage_num
stage_response.currentStageName = stage_name
bar_response = scheduler_pb2.ProgressBar()
for stage, progress in enumerate(FedJob.progress):
bar_response.stageId = stage
bar_response.stageProgress = progress
stage_response.progressBar.append(bar_response)
RedisConn.set("XFL_JOB_STAGE_" + str(FedJob.job_id), json_format.MessageToJson(stage_response))
###
trainer_control(control_pb2.START)
trainer_status = {}
while True:
time.sleep(1)
resp = get_trainer_status()
for i in resp.keys():
if resp[i].code == status_pb2.FAILED:
FedJob.status = status_pb2.FAILED
logger.warning(f"Stage {stage} Failed.")
break
elif resp[i].code == status_pb2.SUCCESSFUL:
trainer_status[i] = resp[i].code
if FedJob.status == status_pb2.FAILED:
break
elif len(trainer_status) == len(FedNode.trainers):
logger.info(f"Stage {stage} Successful.")
break
if FedJob.status == status_pb2.FAILED:
break
if FedJob.status == status_pb2.TRAINING:
logger.info("All Stage Successful.")
logger.info(f"JOB_ID: {FedJob.job_id} Successful.")
RedisConn.set("XFL_JOB_STATUS_"+str(FedJob.job_id), status_pb2.SUCCESSFUL)
FedJob.status = status_pb2.SUCCESSFUL
else:
logger.warning(f"JOB_ID: {FedJob.job_id} Failed.")
trainer_control(control_pb2.STOP)
RedisConn.set("XFL_JOB_STATUS_"+str(FedJob.job_id), status_pb2.FAILED)
end_time = datetime.datetime.now()
RedisConn.set("XFL_JOB_END_TIME_"+str(FedJob.job_id), str(int(time.time())))
cost_time = (end_time - start_time).seconds
logger.info(f"Cost time: {cost_time} seconds.")
remove_log_handler(FedConfig.job_log_handler)
except Exception:
logger.error(traceback.format_exc())
remove_log_handler(FedConfig.job_log_handler)
FedJob.status = status_pb2.FAILED
def main(config_path, is_bar):
FedNode.init_fednode(conf_dir=config_path)
RedisConn.init_redis()
FedJob.init_fedjob()
FedConfig.load_algorithm_list()
Commu(FedNode.config)
start_server(config_path, is_bar)
| 6,575 | 44.351724 | 131 | py |
XFL | XFL-master/python/client.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from common.communication.gRPC.python import (control_pb2, scheduler_pb2,
scheduler_pb2_grpc, status_pb2)
from common.storage.redis.redis_conn import RedisConn
from service.fed_node import FedNode
def start():
channel = FedNode.create_channel("scheduler")
stub = scheduler_pb2_grpc.SchedulerStub(channel)
request = control_pb2.ControlRequest()
request.control = control_pb2.START
response = stub.control(request)
print("JobID:", response.jobId)
print("Code:", response.code)
print("Message:", response.message)
print("NodeLogPath:", response.nodeLogPath)
print("StageNodeLogPath:", response.stageNodeLogPath)
def stop():
channel = FedNode.create_channel("scheduler")
stub = scheduler_pb2_grpc.SchedulerStub(channel)
request = control_pb2.ControlRequest()
request.control = control_pb2.STOP
response = stub.control(request)
print("Code:", response.code)
print("JobID:", response.jobId)
print("Message:", response.message)
def status():
channel = FedNode.create_channel("scheduler")
stub = scheduler_pb2_grpc.SchedulerStub(channel)
request = status_pb2.StatusRequest()
response = stub.status(request)
print("JobID:", response.jobId)
if request.jobId == 0:
print("---------- Scheduler ----------")
print("Code:", response.schedulerStatus.code)
print("Status:", response.schedulerStatus.status)
for node_id in response.trainerStatus.keys():
print(f"---------- Trainer {node_id} ----------")
print("Code:", response.trainerStatus[node_id].code)
print("Status:", response.trainerStatus[node_id].status)
else:
print("Code:", response.jobStatus.code)
print("Job Status:", response.jobStatus.status)
def algo():
channel = FedNode.create_channel("scheduler")
stub = scheduler_pb2_grpc.SchedulerStub(channel)
request = scheduler_pb2.GetAlgorithmListRequest()
response = stub.getAlgorithmList(request)
print("---------- Algorithm List ----------")
for i in response.algorithmList:
print(i)
print(f"---------- Config ----------")
print({i: response.defaultConfigMap[i].config for i in response.algorithmList})
def stage():
channel = FedNode.create_channel("scheduler")
stub = scheduler_pb2_grpc.SchedulerStub(channel)
request = scheduler_pb2.GetStageRequest()
response = stub.getStage(request)
print("---------- Stage ----------")
print("code:", response.code)
print("stage_id:", response.currentStageId)
print("total_stage_num:", response.totalStageNum)
print("stage_name:", response.currentStageName)
def main(cmd, config_path=''):
FedNode.init_fednode(conf_dir=config_path)
RedisConn.init_redis()
if cmd == "start":
start()
elif cmd == "stop":
stop()
elif cmd == "status":
status()
elif cmd == "algo":
algo()
elif cmd == "stage":
stage()
else:
print("Client command is not exists.")
| 3,692 | 33.839623 | 83 | py |
XFL | XFL-master/python/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/trainer_run.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import time
import traceback
from concurrent import futures
from multiprocessing import Process
import grpc
from common.communication.gRPC.python import (control_pb2, scheduler_pb2_grpc,
status_pb2, trainer_pb2_grpc)
from common.communication.gRPC.python.commu import Commu
from common.storage.redis.redis_conn import RedisConn
from common.utils.grpc_channel_options import insecure_options
from common.utils.logger import logger, remove_log_handler
from service.fed_config import FedConfig
from service.fed_job import FedJob
from service.fed_node import FedNode
from service.trainer import TrainerService
multiprocessing.set_start_method('fork')
def start_trainer_service(status):
FedJob.status = status
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=insecure_options)
trainer_pb2_grpc.add_TrainerServicer_to_server(TrainerService(), server)
FedNode.add_server(server)
server.start()
logger.info("Trainer Service Start...")
logger.info(f"[::]:{FedNode.listening_port}")
server.wait_for_termination()
def start_server():
status = multiprocessing.Value("i", status_pb2.IDLE)
p = Process(target=start_trainer_service, args=(status,))
p.start()
while True:
time.sleep(1)
try:
if status.value == status_pb2.START_TRAIN:
FedJob.process = Process(target=train, args=(status,))
FedJob.process.start()
status.value = status_pb2.TRAINING
elif status.value == status_pb2.STOP_TRAIN:
if FedJob.process is not None:
FedJob.process.terminate()
logger.info("Model training is stopped.")
FedJob.process = None
status.value = status_pb2.FAILED
remove_log_handler(FedConfig.job_log_handler)
remove_log_handler(FedConfig.job_stage_log_handler)
except Exception:
logger.error(traceback.format_exc())
remove_log_handler(FedConfig.job_log_handler)
remove_log_handler(FedConfig.job_stage_log_handler)
FedJob.status.value = status_pb2.FAILED
def train(status):
try:
FedConfig.get_config()
if len(FedConfig.stage_config.keys()) == 1:
logger.info("Train Model passed")
status.value = status_pb2.SUCCESSFUL
return
identity = FedConfig.stage_config["identity"]
inference = FedConfig.stage_config.get("inference", False)
model = FedJob.get_model(identity, FedConfig.stage_config)
if inference:
logger.info(f"{identity} Start Predicting...")
model.predict()
else:
logger.info(f"{identity} Start Training...")
model.fit()
status.value = status_pb2.SUCCESSFUL
logger.info("Train Model Successful.")
except Exception as ex:
logger.error(ex, exc_info=True)
logger.warning("Train Model Failed.")
job_control(control_pb2.STOP)
def job_control(control):
channel = FedNode.create_channel("scheduler")
stub = scheduler_pb2_grpc.SchedulerStub(channel)
request = control_pb2.ControlRequest()
request.control = control
response = stub.control(request)
logger.info(response)
def main(identity, debug_node_id, config_path=''):
FedNode.init_fednode(identity=identity, debug_node_id=debug_node_id, conf_dir=config_path)
RedisConn.init_redis()
Commu(FedNode.config)
start_server()
| 4,189 | 34.811966 | 94 | py |
XFL | XFL-master/python/service/fed_node.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from pathlib import Path
from typing import Callable, Any
import grpc
from grpc_interceptor import ClientCallDetails, ClientInterceptor
from common.utils.config import get_str_config, parse_config
from common.utils.fed_conf_parser import FedConfParser
from common.utils.grpc_channel_options import insecure_options, secure_options
class FedNode(object):
config = {}
node_id = ""
scheduler_host = ""
scheduler_port = ""
redis_host = ""
redis_port = ""
trainers = {}
channels = {}
listening_port = None
@classmethod
def init_fednode(cls, identity: str = "scheduler", debug_node_id: str = "scheduler", conf_dir: str = ''):
path1 = Path(conf_dir, "fed_conf.json")
path2 = Path(conf_dir, "fed_conf_"+debug_node_id+'.json')
if os.path.exists(path1):
path = path1
elif os.path.exists(path2):
path = path2
else:
path = ''
if path == '':
if identity == "scheduler":
cls.node_id = "scheduler"
cls.listening_port = 55001
elif identity == "assist_trainer":
cls.node_id = "assist_trainer"
cls.listening_port = 57001
else:
cls.node_id = os.getenv("__ENIGMA_FEDAPP_LOCAL_TASK_NODE_ID__")
cls.listening_port = 56001
cls.config = parse_config(os.getenv("__ENIGMA_FEDAPP_TASK_NETWORK__"))
cls.config["node_id"] = cls.node_id
for name in cls.config["trainer"]:
if cls.node_id == name:
cls.node_name = cls.config["trainer"][cls.node_id]["name"]
if not hasattr(cls, "node_name"):
for name in cls.config["scheduler"]:
if cls.node_id == name:
cls.node_name = cls.config["scheduler"][cls.node_id]["name"]
cls.scheduler_host = cls.config["scheduler"]["host"]
cls.scheduler_port = cls.config["scheduler"]["port"]
cls.trainers = cls.config["trainer"]
if os.getenv("DEBUG_LISTENING_PORT") is not None:
cls.node_id = debug_node_id
cls.config["node_id"] = debug_node_id
cls.listening_port = get_str_config(os.getenv("DEBUG_LISTENING_PORT"))[debug_node_id]
cls.redis_host = os.getenv("ENIGMA_redis_HOST")
cls.redis_port = '6379'
else:
with open(path, 'r') as f:
conf_dict = json.load(f)
cls.config = FedConfParser.parse_dict_conf(conf_dict, debug_node_id)
cls.node_id = cls.config["node_id"]
cls.node_name = cls.config["node_id"]
for node_id in cls.config["trainer"]:
cls.config["trainer"][node_id]["name"] = node_id
if identity == "scheduler":
cls.listening_port = cls.config["scheduler"]["port"]
else:
for node_id in cls.config["trainer"]:
if node_id == "assist_trainer":
if cls.node_id == cls.config["trainer"]["assist_trainer"]["node_id"]:
cls.listening_port = cls.config["trainer"]["assist_trainer"]["port"]
break
elif cls.node_id == node_id:
cls.listening_port = cls.config["trainer"][node_id]["port"]
break
cls.scheduler_host = cls.config["scheduler"]["host"]
cls.scheduler_port = cls.config["scheduler"]["port"]
cls.trainers = cls.config["trainer"]
cls.redis_host = cls.config["redis_server"]["host"]
cls.redis_port = cls.config["redis_server"]["port"]
@classmethod
def add_server(cls, server):
server.add_insecure_port(f"[::]:{cls.listening_port}")
@classmethod
def create_channel(cls, node_id: str):
if node_id not in cls.channels.keys():
if node_id == "scheduler":
host = cls.scheduler_host
port = cls.scheduler_port
use_tls = cls.config["scheduler"]["use_tls"]
else:
host = cls.trainers[node_id]["host"]
port = cls.trainers[node_id]["port"]
use_tls = cls.trainers[node_id]["use_tls"]
addr_list = port.split("/")
port = addr_list[0]
sub_addr = '/'.join(addr_list[1:])
if use_tls:
root_certificates = cls.load_root_certificates()
credentials = grpc.ssl_channel_credentials(root_certificates=root_certificates)
channel = grpc.secure_channel(f"{host}:{port}", credentials, options=secure_options)
else:
channel = grpc.insecure_channel(f"{host}:{port}", options=insecure_options)
class ClientPathInterceptor(ClientInterceptor):
def intercept(
self,
method: Callable,
request_or_iterator: Any,
call_details: grpc.ClientCallDetails):
path_list = call_details.method.split("/")
path_list.insert(1, sub_addr)
new_method = '/' + os.path.join(*path_list)
new_call_details = ClientCallDetails(
new_method,
call_details.timeout, call_details.metadata,
call_details.credentials, call_details.wait_for_ready,
call_details.compression)
return method(request_or_iterator, new_call_details)
channel = grpc.intercept_channel(channel, ClientPathInterceptor())
cls.channels[node_id] = channel
return cls.channels[node_id]
@classmethod
def init_job_id(cls):
if cls.rs.get("XFL_JOB_ID") is None:
cls.rs.set("XFL_JOB_ID", 0)
@classmethod
def load_root_certificates(cls):
ca_file = os.getcwd() + "/common/certificates/ca.crt"
ca_bundle_file = os.getcwd() + "/common/certificates/ca-bundle.crt"
root_certificates = b""
if os.path.isfile(ca_file):
with open(ca_file, "rb") as f:
root_certificates += f.read()
if os.path.isfile(ca_bundle_file):
with open(ca_bundle_file, "rb") as f:
root_certificates += f.read()
if root_certificates == b"":
return None
else:
return root_certificates
@classmethod
def load_client_cert(cls):
with open(cls.config["cert"]["client.key"], "rb") as f:
private_key = f.read()
with open(cls.config["cert"]["client.crt"], "rb") as f:
certificate_chain = f.read()
with open(cls.config["cert"]["ca.crt"], "rb") as f:
root_certificates = f.read()
return private_key, certificate_chain, root_certificates
| 7,810 | 39.682292 | 109 | py |
XFL | XFL-master/python/service/scheduler.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import time
import traceback
import pickle
from google.protobuf import json_format
from common.communication.gRPC.python import (commu_pb2, control_pb2,
scheduler_pb2, status_pb2, checker_pb2)
from common.storage.redis.redis_conn import RedisConn
from common.utils.config_parser import replace_variable
from common.utils.config_checker import check_multi_stage_train_conf, check_cross_stage_input_output
from common.utils.logger import logger, get_node_log_path, get_stage_node_log_path
from service.fed_config import FedConfig
from service.fed_control import get_trainer_status, trainer_control
from service.fed_job import FedJob
from service.fed_node import FedNode
from tqdm import tqdm, trange
class SchedulerService(object):
def __init__(self, is_bar=False):
self.is_bar = is_bar
self.progress_bar = None
def post(self, request, context):
request_key = ''
request_value = bytearray()
for i, r in enumerate(request):
request_value += r.value
if i == 0:
request_key = r.key
# request_info = r.key.split("~")
# name = request_info[1]
# start_end_id = request_info[-1]
# logger.info(f"Start receiving the data of channel {name} from {start_end_id} ...")
RedisConn.put(request_key, bytes(request_value))
response = commu_pb2.PostResponse()
response.code = 0
# logger.info(f"Successfully received the data of channel {name} from {start_end_id}")
return response
def getConfig(self, request, context):
response = scheduler_pb2.GetConfigResponse()
config = copy.deepcopy(FedConfig.trainer_config[FedJob.current_stage][request.nodeId])
config = replace_variable(config, stage_id=FedJob.current_stage, job_id=FedJob.job_id, node_id=request.nodeId)
response.config = json.dumps(config)
response.jobId = FedJob.job_id
response.code = 0
return response
def control(self, request, context):
response = control_pb2.ControlResponse()
response.code = 0
# response.logPath = json.dumps({})
# response.nodeStageLogPath = json.dumps({})
if request.control == control_pb2.STOP:
FedJob.status = status_pb2.FAILED
response.message += f"Stop Scheduler Successful.\n"
trainer_resp = trainer_control(control_pb2.STOP)
response.code = trainer_resp.code
response.message += trainer_resp.message
logger.info("Model training is stopped.")
elif request.control == control_pb2.START:
if FedJob.status not in (status_pb2.IDLE, status_pb2.FAILED, status_pb2.SUCCESSFUL):
response.code = 1
response.message = "Scheduler not ready."
response.jobId = int(FedJob.job_id)
return response
else:
resp = get_trainer_status()
for i in resp.keys():
if resp[i].code not in (status_pb2.IDLE, status_pb2.FAILED, status_pb2.SUCCESSFUL):
response.code = 1
response.message = f"Trainer {i} not ready.."
response.jobId = int(FedJob.job_id)
return response
FedJob.job_id = int(RedisConn.incr("XFL_JOB_ID"))
RedisConn.set("XFL_JOB_STATUS_" + str(FedJob.job_id), status_pb2.TRAINING)
FedJob.status = status_pb2.TRAINING
start = time.time()
while time.time() - start < 5:
if FedConfig.converted_trainer_config != {}:
response.message = 'Ack'
response.dumpedTrainConfig = json.dumps(FedConfig.converted_trainer_config)
raw_node_log_path = get_node_log_path(job_id=FedJob.job_id, node_ids=list(FedNode.trainers.keys()) + ['scheduler'])
raw_stage_node_log_path = get_stage_node_log_path(job_id=FedJob.job_id, train_conf=FedConfig.converted_trainer_config)
for node_id in raw_node_log_path:
node_log_path = scheduler_pb2.control__pb2.NodeLogPath()
node_log_path.nodeId = node_id
node_log_path.logPath = raw_node_log_path[node_id]
response.nodeLogPath.append(node_log_path)
for stage_id in raw_stage_node_log_path:
for node_id in raw_stage_node_log_path[stage_id]:
stage_node_log_path = scheduler_pb2.control__pb2.StageNodeLogPath()
stage_node_log_path.stageId = int(stage_id)
stage_node_log_path.nodeId = node_id
stage_node_log_path.logPath = raw_stage_node_log_path[stage_id][node_id]
response.stageNodeLogPath.append(stage_node_log_path)
FedConfig.converted_trainer_config = {}
break
time.sleep(0.1)
response.jobId = int(FedJob.job_id)
return response
def status(self, request, context):
response = status_pb2.StatusResponse()
request_job_id = int(request.jobId)
if request_job_id == 0:
# return node status
node_status = status_pb2.Status()
response.jobId = FedJob.job_id
node_status.code = FedJob.status
node_status.status = status_pb2.StatusEnum.Name(FedJob.status)
response.schedulerStatus.CopyFrom(node_status)
resp = get_trainer_status()
for t in resp.keys():
response.trainerStatus[t].CopyFrom(resp[t])
elif request_job_id <= FedJob.job_id:
# return job status
start_time = RedisConn.get("XFL_JOB_START_TIME_"+str(request_job_id)) or 0
job_status = status_pb2.Status()
response.jobId = request_job_id
if request_job_id == FedJob.job_id and FedJob.status == status_pb2.TRAINING:
job_status.code = status_pb2.TRAINING
job_status.status = status_pb2.StatusEnum.Name(status_pb2.TRAINING)
response.jobStatus.CopyFrom(job_status)
response.startTime = int(start_time)
return response
redis_job_status = RedisConn.get("XFL_JOB_STATUS_"+str(request_job_id))
if int(redis_job_status) == status_pb2.SUCCESSFUL:
job_status.code = int(redis_job_status)
job_status.status = status_pb2.StatusEnum.Name(int(redis_job_status))
else:
job_status.code = status_pb2.FAILED
job_status.status = status_pb2.StatusEnum.Name(status_pb2.FAILED)
response.jobStatus.CopyFrom(job_status)
end_time = RedisConn.get("XFL_JOB_END_TIME_"+str(request_job_id)) or 0
response.startTime = int(start_time)
response.endTime = int(end_time)
return response
def checkTaskConfig(self, request, context):
response = checker_pb2.CheckTaskConfigResponse()
try:
first_message = True
response.message = ''
configs = json.loads(request.dumpedTrainConfig)
result_multi_stage = check_multi_stage_train_conf(configs)
for stage_id, stage_result in enumerate(result_multi_stage["result"]):
stage_result = checker_pb2.StageResult()
stage_result.stageId = stage_id
stage_result.dumpedCheckedConfig = json.dumps(result_multi_stage["result"])
for itemized_info in result_multi_stage["itemized_result"][stage_id]:
item_info = checker_pb2.ItemInfo()
num_path = 0
for info in itemized_info[:-1]:
path_info = checker_pb2.PathInfo()
if info['type'] == 'dict':
path_info.dictPath.key = info['key']
if first_message:
response.message += configs[stage_id].get('model_info', {}).get('name')
response.message += '-' + str(info['key'])
else:
path_info.listPath.index = info['index']
if first_message:
response.message += '-' + str(info['index'])
num_path += 1
item_info.pathInfo.append(path_info)
if first_message:
if num_path < 2:
response.message = ''
else:
response.message = '-'.join(response.message.split('-')[:1] + response.message.split('-')[-1:])
if len(itemized_info) > 0:
# response.message += ':' + itemized_info[-1]
response.message += ': format error'
first_message = False
response.code = 1
else:
response.message = ''
if len(itemized_info) > 0:
item_info.notes = itemized_info[-1]
stage_result.unmatchedItems.append(item_info)
# path_info = []
# for info in itemized_info[:-1]:
# if info['type'] == 'dict':
# path_info.append(str(info['key']))
# else:
# path_info.append(str(info['index']))
# path_info = '-'.join(path_info)
# path_info += ":" + itemized_info[-1]
# if itemized_info[-1]:
# if path_info not in stage_result.unmatchedItems:
# stage_result.unmatchedItems.append(path_info)
stage_result.passedRules = result_multi_stage["summary"][stage_id][0]
stage_result.checkedRules = result_multi_stage["summary"][stage_id][1]
stage_result.code = 0
response.multiStageResult.stageResultList.append(stage_result)
response.multiStageResult.code = 0
except Exception:
logger.error(traceback.format_exc())
response.multiStageResult.code = 1
try:
result_cross_stage = check_cross_stage_input_output(json.loads(request.dumpedTrainConfig), ignore_list=request.existedInputPath)
for item in result_cross_stage['duplicated']:
item_info = checker_pb2.CrossStageItemInfo()
item_info.dumpedValue = json.dumps(item['value'])
for position in item['position']:
position_info = checker_pb2.CrossStagePositionInfo()
position_info.stageId = position['stage']
for key in position['key_chain']:
path_info = checker_pb2.PathInfo()
path_info.dictPath.key = key
position_info.pathInfo.append(path_info)
item_info.positionList.append(position_info)
response.crossStageResult.duplicatedInputOutput.append(item_info)
for item in result_cross_stage['blank']:
item_info = checker_pb2.CrossStageItemInfo()
item_info.dumpedValue = json.dumps(item['value'])
for position in item['position']:
position_info = checker_pb2.CrossStagePositionInfo()
position_info.stageId = position['stage']
for key in position['key_chain']:
path_info = checker_pb2.PathInfo()
path_info.dictPath.key = key
position_info.pathInfo.append(path_info)
item_info.positionList.append(position_info)
response.crossStageResult.blankInputOutput.append(item_info)
for item in result_cross_stage['nonexistent']:
item_info = checker_pb2.CrossStageItemInfo()
item_info.dumpedValue = json.dumps(item['value'])
for position in item['position']:
position_info = checker_pb2.CrossStagePositionInfo()
position_info.stageId = position['stage']
for key in position['key_chain']:
path_info = checker_pb2.PathInfo()
path_info.dictPath.key = key
position_info.pathInfo.append(path_info)
item_info.positionList.append(position_info)
response.crossStageResult.nonexistentInput.append(item_info)
response.crossStageResult.code = 0
except Exception:
logger.error(traceback.format_exc())
response.crossStageResult.code = 1
return response
def getAlgorithmList(self, request, context):
response = scheduler_pb2.GetAlgorithmListResponse()
response.algorithmList.extend(FedConfig.algorithm_list)
for i in FedConfig.default_config_map.keys():
dc = scheduler_pb2.DefaultConfig()
for j in FedConfig.default_config_map[i].keys():
dc.config[j] = json.dumps(FedConfig.default_config_map[i][j])
response.defaultConfigMap[i].CopyFrom(dc)
return response
def recProgress(self, request, context):
if self.is_bar:
if (self.progress_bar is None and FedJob.progress[FedJob.current_stage] == 0) or \
(self.progress_bar and self.progress_bar.desc != f"Stage {FedJob.current_stage}"):
self.progress_bar = trange(
FedJob.max_progress, desc=f"Stage {FedJob.current_stage}",
leave=True, unit="%"
)
# wait for the progress bar to be printed
time.sleep(0.1)
response = scheduler_pb2.RecProgressResponse()
response.code = 1
if request.progress > FedJob.progress[FedJob.current_stage]:
if self.is_bar:
self.progress_bar.update(request.progress-FedJob.progress[FedJob.current_stage])
if request.progress == FedJob.max_progress:
self.progress_bar.close()
self.progress_bar = None
FedJob.progress[FedJob.current_stage] = request.progress
response.code = 0
stage_response = scheduler_pb2.GetStageResponse()
try:
stage_config = FedConfig.trainer_config[FedJob.current_stage]
if len(stage_config) < 1:
stage_response.code = 1
stage_name = ""
else:
response.code = 0
stage_config = list(stage_config.values())[0]
stage_name = stage_config.get("model_info", {}).get("name", "")
except IndexError:
stage_response.code = 2
stage_name = ""
stage_response.currentStageId = FedJob.current_stage
stage_response.totalStageNum = FedJob.total_stage_num
stage_response.currentStageName = stage_name
bar_response = scheduler_pb2.ProgressBar()
for stage, progress in enumerate(FedJob.progress):
bar_response.stageId = stage
bar_response.stageProgress = progress
stage_response.progressBar.append(bar_response)
RedisConn.set("XFL_JOB_STAGE_" + str(FedJob.job_id), json_format.MessageToJson(stage_response))
return response
def getStage(self, request, context):
job_id = request.jobId
key = "XFL_JOB_STAGE_" + str(job_id)
response = RedisConn.get(key)
if not response:
response = scheduler_pb2.GetStageResponse()
response.code = 3
else:
response = json_format.Parse(response, scheduler_pb2.GetStageResponse())
response.isRunning = True if FedJob.job_id == job_id and FedJob.status == status_pb2.TRAINING else False
return response
| 17,385 | 46.632877 | 140 | py |
XFL | XFL-master/python/service/fed_config.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from common.communication.gRPC.python import scheduler_pb2, scheduler_pb2_grpc
from common.utils.config import load_json_config
from common.utils.logger import (add_job_log_handler,
add_job_stage_log_handler, logger)
from common.xoperator import get_operator
from service.fed_job import FedJob
from service.fed_node import FedNode
class FedConfig(object):
trainer_config = {}
converted_trainer_config = {} # temp
stage_config = {}
algorithm_list = []
default_config_map = {}
job_log_handler = None
job_stage_log_handler = None
@classmethod
@property
def job_id(cls):
return FedJob.job_id
@classmethod
@property
def node_id(cls):
return FedNode.node_id
@classmethod
@property
def node_name(cls):
return FedNode.node_name
@classmethod
@property
def redis_host(cls):
return FedNode.redis_host
@classmethod
@property
def redis_port(cls):
return FedNode.redis_port
@classmethod
def get_label_trainer(cls):
res = cls.stage_config.get("fed_info", {}).get("label_trainer", [])
return res
@classmethod
def get_assist_trainer(cls):
res = cls.stage_config.get("fed_info", {}).get("assist_trainer", [])
if len(res) > 0:
return res[0]
else:
return None
@classmethod
def get_trainer(cls):
res = cls.stage_config.get("fed_info", {}).get("trainer", [])
return res
@classmethod
def load_config(cls, config_path):
cls.job_log_handler = add_job_log_handler(FedJob.job_id, FedNode.node_id)
logger.info("Loading Config...")
cls.trainer_config = cls.load_trainer_config(config_path)
logger.info("Load Config Completed.")
# @classmethod
# def load_trainer_config(cls, config_path):
# trainer_config = {}
# for node_id in FedNode.trainers.keys():
# info = load_json_config(f"{config_path}/trainer_config_{node_id}.json")
# for idx in range(len(info)):
# if idx not in trainer_config.keys():
# trainer_config[idx] = {}
# trainer_config[idx][node_id] = info[idx]
# for stage_id in trainer_config:
# fed_info = {
# "label_trainer": [],
# "trainer": [],
# "assist_trainer": []
# }
# for node_id in trainer_config[stage_id]:
# # identity = trainer_config[stage_id][node_id]["identity"]
# identity = trainer_config[stage_id][node_id].get("identity")
# if identity:
# fed_info[identity].append(node_id)
# trainer_config[stage_id][node_id]["fed_info"] = fed_info
# return trainer_config
@classmethod
def load_trainer_config(cls, config_path):
trainer_config = {}
unconfiged_node_ids = []
op_names = {}
for node_id in FedNode.trainers.keys():
f_path = f"{config_path}/trainer_config_{node_id}.json"
if not os.path.exists(f_path):
unconfiged_node_ids.append(node_id)
continue
info = load_json_config(f_path)
assert type(info) == list, "trainer config should be wrapped by '[]'"
for stage_id in range(len(info)):
if stage_id not in trainer_config.keys():
trainer_config[stage_id] = {}
op_names[stage_id] = []
trainer_config[stage_id][node_id] = info[stage_id]
op_name = info[stage_id].get("model_info", {}).get("name")
if op_name:
op_names[stage_id].append(op_name)
if len(unconfiged_node_ids) > 1:
logger.warning(f"{len(unconfiged_node_ids)} nodes-{unconfiged_node_ids} are not configed.")
if len(unconfiged_node_ids) == 1:
assist_trainer_id = unconfiged_node_ids[0]
for stage_id in op_names:
if len(set(op_names[stage_id])) != 1:
logger.warning(f"Operator names {op_names[stage_id]} not the same in stage {stage_id}.")
continue
op_name = op_names[stage_id][0]
try:
operator = get_operator(op_name, "assist_trainer")
except Exception:
operator = None
if operator is not None:
assist_trainer_config = {
"identity": "assist_trainer",
"model_info": {
"name": op_name
},
}
else:
assist_trainer_config = {}
trainer_config[stage_id][assist_trainer_id] = assist_trainer_config
for stage_id in trainer_config:
fed_info = {
"label_trainer": [],
"trainer": [],
"assist_trainer": []
}
for node_id in trainer_config[stage_id]:
identity = trainer_config[stage_id][node_id].get("identity")
if identity:
fed_info[identity].append(node_id)
trainer_config[stage_id][node_id]["fed_info"] = fed_info
return trainer_config
@classmethod
def get_config(cls):
request = scheduler_pb2.GetConfigRequest()
request.nodeId = FedNode.node_id
channel = FedNode.create_channel("scheduler")
stub = scheduler_pb2_grpc.SchedulerStub(channel)
response = stub.getConfig(request)
cls.stage_config = json.loads(response.config)
FedJob.job_id = response.jobId
cls.job_log_handler = add_job_log_handler(FedJob.job_id, FedNode.node_id)
cls.job_stage_log_handler = add_job_stage_log_handler(
FedJob.job_id, FedNode.node_id, FedJob.current_stage, FedConfig.stage_config.get("model_info", {}).get("name", ""))
if "global_epoch" in cls.stage_config.get("train_info", {}).get("train_params", {}):
FedJob.global_epoch = cls.stage_config["train_info"].get("train_params", {}).get("global_epoch")
logger.info("stage_config: " + str(cls.stage_config))
return response
@classmethod
def load_algorithm_list(cls):
config_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../algorithm/config/'))
algo_info = {}
for algorithm_name in os.listdir(config_dir):
algorithm_conf_dir = os.path.join(config_dir, algorithm_name)
if not os.path.isdir(algorithm_conf_dir):
continue
algo_info[algorithm_name] = []
for party_conf_file in os.listdir(algorithm_conf_dir):
file_name = party_conf_file.split(".")[0]
if file_name != "__init__":
algo_info[algorithm_name].append(file_name)
cls.algorithm_list = list(algo_info.keys())
for k in cls.algorithm_list:
dc = {}
for v in algo_info[k]:
conf = load_json_config(os.path.abspath(
os.path.join(os.path.dirname(__file__), f'../algorithm/config/{k}/{v}.json')))
dc[v] = conf
cls.default_config_map[k] = dc
| 8,244 | 35.973094 | 127 | py |
XFL | XFL-master/python/service/fed_control.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.communication.gRPC.python import (
control_pb2, status_pb2, trainer_pb2_grpc, scheduler_pb2, scheduler_pb2_grpc
)
from common.utils.logger import logger
from service.fed_node import FedNode
class ProgressCalculator:
def __init__(self, *args):
self.param_num = len(args)
self.iter_list = []
self.max_list = args
self.tick_list = [100]
for max_item in self.max_list:
last_tick = self.tick_list[-1]
assert max_item > 0
self.tick_list.append(last_tick / max_item)
def cal_progress(self):
'''
the iter_item always begin from 1.
'''
progress = self.tick_list[-1]
for iter_item, tick_item in zip(self.iter_list, self.tick_list[1:]):
progress += (iter_item - 1) * tick_item
_send_progress(int(progress))
def cal_custom_progress(self, *args):
if len(args) != self.param_num:
raise ValueError("The number of args is not equal to the number of max values.")
self.iter_list = args
self.cal_progress()
def cal_horizontal_progress(self, context: dict):
self.iter_list = [context["g_epoch"]]
if len(self.iter_list) != self.param_num:
raise ValueError("The number of args is not equal to the number of max values.")
self.cal_progress()
@staticmethod
def finish_progress(context: dict=None):
_send_progress(100)
def _send_progress(progress):
progress = progress if progress <= 100 else 100
channel = FedNode.create_channel("scheduler")
stub = scheduler_pb2_grpc.SchedulerStub(channel)
request = scheduler_pb2.RecProgressRequest()
# request.stageId = FedJob.current_stage
request.progress = progress
stub.recProgress(request)
return
def trainer_control(control):
response = control_pb2.ControlResponse()
for node_id in FedNode.trainers.keys():
channel = FedNode.create_channel(node_id)
stub = trainer_pb2_grpc.TrainerStub(channel)
request = control_pb2.ControlRequest()
request.control = control
try:
resp = stub.control(request)
if resp.code == 0:
response.message += f"{control_pb2.Operation.Name(control)} Trainer: {node_id} Successful.\n"
else:
response.code = 1
response.message += f"{control_pb2.Operation.Name(control)} Trainer: {node_id} Failed.\n"
except Exception as ex:
logger.error(ex, exc_info=True)
logger.error(f"{control_pb2.Operation.Name(control)} Trainer: {node_id} Failed.")
response.code = 1
response.message += f"{control_pb2.Operation.Name(control)} Trainer: {node_id} Failed.\n"
return response
def get_trainer_status():
response = status_pb2.StatusResponse()
for node_id in FedNode.trainers.keys():
channel = FedNode.create_channel(node_id)
stub = trainer_pb2_grpc.TrainerStub(channel)
request = status_pb2.StatusRequest()
node_status = status_pb2.Status()
try:
resp = stub.status(request)
node_status.code = resp.trainerStatus[node_id].code
node_status.status = resp.trainerStatus[node_id].status
except Exception as ex:
logger.error(ex, exc_info=True)
logger.error(f"Get {node_id} Status Error.")
node_status.code = -1
response.trainerStatus[node_id].CopyFrom(node_status)
return response.trainerStatus
| 4,187 | 36.72973 | 109 | py |
XFL | XFL-master/python/service/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/service/fed_job.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.communication.gRPC.python import status_pb2
from common.storage.redis.redis_conn import RedisConn
from common.xoperator import get_operator
class FedJob(object):
job_id = 0
current_stage = 0
total_stage_num = 0
global_epoch = None
algo_info = None
status = status_pb2.IDLE
progress = []
max_progress = 100
@classmethod
def init_fedjob(cls):
cls.job_id = int(RedisConn.get("XFL_JOB_ID"))
@classmethod
def init_progress(cls, total_stage_num):
cls.total_stage_num = total_stage_num
cls.progress = [0] * total_stage_num
@classmethod
def get_model(cls, role: str, stage_config: dict) -> object:
"""Get model handler
Args:
role (str): The role this node played in the federation. Supported roles are "assist_trainer", "label_trainer" and "trainer".
stage_config (dict):
Returns:
Model handler.
"""
model_name = stage_config["model_info"]["name"]
model = get_operator(name=model_name, role=role)
return model(stage_config)
| 1,715 | 30.2 | 137 | py |
XFL | XFL-master/python/service/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.communication.gRPC.python import commu_pb2, control_pb2, status_pb2
from common.storage.redis.redis_conn import RedisConn
# from common.utils.logger import logger
from service.fed_job import FedJob
from service.fed_node import FedNode
class TrainerService(object):
def __init__(self):
pass
def post(self, request, context):
request_key = ''
request_value = bytearray()
for i, r in enumerate(request):
request_value += r.value
if i == 0:
request_key = r.key
# request_info = r.key.split("~")
# name = request_info[1]
# start_end_id = request_info[-1]
# logger.info(f"Start receiving the data of channel {name} from {start_end_id} ...")
RedisConn.put(request_key, bytes(request_value))
# logger.info(f"Successfully received the data of channel {name} from {start_end_id}")
response = commu_pb2.PostResponse()
response.code = 0
return response
def control(self, request, context):
response = control_pb2.ControlResponse()
if request.control == control_pb2.STOP:
FedJob.status.value = status_pb2.STOP_TRAIN
elif request.control == control_pb2.START:
FedJob.status.value = status_pb2.START_TRAIN
response.code = 0
response.message = f"{status_pb2.StatusEnum.Name(request.control)} Completed."
return response
def status(self, request, context):
response = status_pb2.StatusResponse()
node_status = status_pb2.Status()
node_status.code = FedJob.status.value
node_status.status = status_pb2.StatusEnum.Name(FedJob.status.value)
response.trainerStatus[FedNode.node_id].CopyFrom(node_status)
return response
| 2,460 | 38.063492 | 100 | py |
XFL | XFL-master/python/common/xoperator.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from importlib import import_module
from common.xregister import xregister
def get_operator(name: str, role: str) -> object:
""" Get operator by name and role.
Args:
name (str): operator name.
role (str): assist_trainer, trainer or label_trainer.
Returns:
Operator find by name and role.
"""
if role not in ["assist_trainer", "trainer", "label_trainer"]:
raise ValueError(f"Identity {role} is not valid, need to be assist_trainer, trainer or label_trainer.")
fed_type = name.split("_")[0]
operator_dir = "_".join(name.split("_")[1:])
if fed_type not in ["horizontal", "vertical", "transfer", "local"]:
raise ValueError(f"Prefix of operator name {name} is not valid, need to be horizontal, vertical or local.")
class_name = [fed_type] + operator_dir.split("_") + role.split("_")
class_name = ' '.join(class_name).title().replace(' ', '')
module_path = '.'.join(["algorithm", "framework", fed_type, operator_dir, role])
try:
module = import_module(module_path)
operator = getattr(module, class_name)
except ModuleNotFoundError:
if class_name in xregister.registered_object:
operator = xregister(class_name)
else:
raise ValueError(f"Operator name: {name}, role: {role} is not defined.")
return operator
| 1,985 | 35.777778 | 115 | py |
XFL | XFL-master/python/common/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/common/xregister.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Union
from common.utils.logger import logger
class XRegister():
""" Class for register object, for example loss, metric, dataset, etc..
"""
@property
def registered_object(self):
return self.__dict__
@classmethod
def get_class_name(cls):
return cls.__name__
def __call__(self, name: str) -> Callable:
if not hasattr(self, name):
raise KeyError(f"Calling {name} not registed in {self.get_class_name()}.")
return getattr(self, name)
def register(self, target: Union[Callable, str]):
def add_register_item(key: str, value: Callable):
if not callable(value):
raise TypeError(f"Register object {value} is not callable.")
if hasattr(self, key):
logger.warning(f"Repeated register key {key} to {self.get_class_name()}.")
setattr(self, key, value)
logger.info(f"Register {key} to {self.get_class_name()} successfully.")
return target
if callable(target):
return add_register_item(target.__name__, target)
return lambda x: add_register_item(target, x)
def unregister(self, name: str):
if hasattr(self, name):
delattr(self, name)
logger.info(f"Unregister {name} from {self.get_class_name()} successfully.")
else:
logger.warning(f"Try to unregister an non-exist key {name} from {self.get_class_name()}.")
xregister = XRegister()
| 2,152 | 34.883333 | 102 | py |
XFL | XFL-master/python/common/evaluation/metrics.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
from typing import List, Optional
import numpy as np
import pandas as pd
import torch
from sklearn.metrics import confusion_matrix, roc_curve
from torch.nn import BCELoss
from common.utils.algo_utils import (BiClsAccuracy, BiClsAuc, BiClsF1, BiClsKS,
BiClsPrecision, BiClsRecall)
from algorithm.core.metrics import get_metric
from common.utils.logger import logger
from multiprocessing import Pool, cpu_count
def cumulative_gain_curve(y_true, y_score, pos_label=None):
"""Adapted from skplot package. Add some simplifications and modifications
`skplot` github: https://github.com/reiinakano/scikit-plot
This function generates the points necessary to plot the Cumulative Gain
Note: This implementation is restricted to the binary classification task.
Args:
y_true (array-like, shape (n_samples)): True labels of the data.
y_score (array-like, shape (n_samples)): Target scores, can either be
probability estimates of the positive class, confidence values, or
non-thresholded measure of decisions (as returned by
decision_function on some classifiers).
pos_label (int or str, default=None): Label considered as positive and
others are considered negative
Returns:
percentages (numpy.ndarray): An array containing the X-axis values for
plotting the Cumulative Gains chart.
gains (numpy.ndarray): An array containing the Y-axis values for one
curve of the Cumulative Gains chart.
Raises:
ValueError: If `y_true` is not composed of 2 classes. The Cumulative
Gain Chart is only relevant in binary classification.
"""
y_true, y_score = np.asarray(y_true), np.asarray(y_score)
classes = np.unique(y_true)
pos_label = 1
# make y_true a boolean vector
y_true = (y_true == pos_label)
sorted_indices = np.argsort(y_score)[::-1]
y_true = y_true[sorted_indices]
gains = np.cumsum(y_true)
percentages = np.arange(start=1, stop=len(y_true) + 1)
gains = gains / float(np.sum(y_true))
percentages = percentages / float(len(y_true))
gains = np.insert(gains, 0, [0])
percentages = np.insert(percentages, 0, [0])
return percentages, gains
class CommonMetrics:
@staticmethod
def to_str(metrics_dict: dict, presicion: int = 6):
return {k: format(v, f".{presicion}f") for k, v in metrics_dict.items()}
@staticmethod
def _calc_metrics(
metrics: dict,
labels: list,
val_predicts: list,
lossfunc_name: str = None,
loss: float = None,
dataset_type: str = "val",
) -> dict:
metrics_output = {}
metrics_str = {}
if lossfunc_name is not None:
metrics_output[lossfunc_name] = loss
for method in metrics.keys():
metrics_output[method] = metrics[method](labels, val_predicts)
metrics_str = CommonMetrics.to_str(metrics_output)
logger.info(f"Metrics on {dataset_type} dataset: {metrics_str}")
return metrics_output
@staticmethod
def save_metric_csv(
metrics_output: dict,
output_config: dict,
global_epoch: int,
local_epoch: int = None,
dataset_type: str = "val",
) -> None:
metrics_str = CommonMetrics.to_str(metrics_output)
metric_dir = output_config.get("path", "")
if not os.path.exists(metric_dir):
os.makedirs(metric_dir)
file_name = output_config.get("metric_" + dataset_type)["name"]
output_file = os.path.join(metric_dir, file_name)
if local_epoch:
epoch = f"{local_epoch}/{global_epoch}"
else:
epoch = f"{global_epoch}"
if os.path.exists(output_file):
with open(output_file, 'a') as f:
features = []
for k, v in metrics_str.items():
features.append(v)
f.write("%s,%s\n" % (epoch, ','.join(features)))
else:
with open(output_file, 'w') as f:
if local_epoch:
f.write("%s,%s\n" % ("local_epoch/global_epoch", ','.join(
[_ for _ in metrics_str.keys()])))
else:
f.write("%s,%s\n" % ("global_epoch", ','.join(
[_ for _ in metrics_str.keys()])))
features = []
for k, v in metrics_str.items():
features.append(v)
f.write("%s,%s\n" % (epoch, ','.join(features)))
class BiClsMetric:
def __init__(self, epoch, output_file=None, metric_config={}, lossfunc_config={}):
self.metric_functions_map = {
"BCEWithLogitsLoss": BCELoss,
"acc": BiClsAccuracy,
"precision": BiClsPrecision,
"recall": BiClsRecall,
"f1_score": BiClsF1,
"auc": BiClsAuc,
"ks": BiClsKS
}
self.metric_functions = {}
self.metrics = {}
self.epoch = epoch
self.output_file = output_file
if len(lossfunc_config):
loss_function = list(lossfunc_config.keys())[0]
else:
loss_function = None
if loss_function:
if loss_function not in self.metric_functions_map:
raise NotImplementedError(
"Loss function {} is not supported in this model.".format(loss_function))
func = self.metric_functions_map[loss_function]
method_args = inspect.getfullargspec(func).args
defined_args = {}
for (key, value) in lossfunc_config.items():
if key in method_args:
defined_args[key] = value
self.metric_functions[loss_function] = func(**defined_args)
for metric_function in metric_config:
if metric_function == "auc_ks":
logger.warning('metric "auc_ks" in config will be deprecated in future version, '
'please use "auc" and "ks" separately.')
defined_args = {}
for _ in ["auc", "ks"]:
func = self.metric_functions_map[_]
self.metric_functions[_] = func(**defined_args)
continue
if metric_function == "decision_table":
continue
elif metric_function not in self.metric_functions_map:
raise NotImplementedError(
"Metric function {} is not supported in this model.".format(metric_function))
func = self.metric_functions_map[metric_function]
defined_args = {}
self.metric_functions[metric_function] = func(**defined_args)
def calc_metrics(self, y_true: np.array, y_pred: np.array):
fpr, tpr, _ = roc_curve(y_true, y_pred)
cm = confusion_matrix(y_true, y_pred > 0.5)
for metric_function in self.metric_functions:
if metric_function in ("acc", "precision", "recall", "f1_score"):
self.metrics[metric_function] = self.metric_functions[metric_function](
cm).item()
elif metric_function in ("auc", "ks"):
self.metrics[metric_function] = self.metric_functions[metric_function](
tpr, fpr).item()
elif metric_function == "BCEWithLogitsLoss":
self.metrics[metric_function] = self.metric_functions[metric_function](torch.tensor(y_pred),
torch.tensor(y_true)).item()
def __repr__(self):
output = ["epoch: %d" % self.epoch]
for k, v in self.metrics.items():
output.append("%s: %.6g" % (k, v))
return ', '.join(output)
def save(self):
if os.path.exists(self.output_file):
with open(self.output_file, 'a') as f:
features = []
for k in self.metric_functions_map:
if k in self.metrics:
features.append("%.6g" % self.metrics[k])
else:
features.append("")
f.write("%d,%s\n" % (self.epoch, ','.join(features)))
else:
with open(self.output_file, 'w') as f:
f.write("%s,%s\n" % ("epoch", ','.join(
[_ for _ in self.metric_functions_map])))
features = []
for k in self.metric_functions_map:
if k in self.metrics:
features.append("%.6g" % self.metrics[k])
else:
features.append("")
f.write("%d,%s\n" % (self.epoch, ','.join(features)))
class RegressionMetric:
def __init__(self, epoch, output_file=None, metric_config={}):
self.metric_functions = {}
self.metrics = {}
self.epoch = epoch
self.output_file = output_file
for metric_function in metric_config:
self.metric_functions[metric_function] = get_metric(
metric_function)
def calc_metrics(self, y_true: np.array, y_pred: np.array):
for metric_function in self.metric_functions:
self.metrics[metric_function] = self.metric_functions[metric_function](
y_true, y_pred)
def __repr__(self):
output = ["epoch: %d" % self.epoch]
for k, v in self.metrics.items():
output.append("%s: %.6g" % (k, v))
return ', '.join(output)
def save(self, met):
features = ["%.6g" % met[k] for k in met]
if os.path.exists(self.output_file):
with open(self.output_file, 'a') as f:
f.write("%d,%s\n" % (self.epoch, ','.join(features)))
else:
with open(self.output_file, 'w') as f:
f.write("%s,%s\n" % ("epoch", ','.join([_ for _ in met])))
f.write("%d,%s\n" % (self.epoch, ','.join(features)))
class ThresholdCutter:
def __init__(self, output_file=None):
self.bst_threshold = 0.5
self.bst_score = 0
# self.default_threshold = [0.1, 0.2, 0.3, 0.35, 0.4, 0.45, 0.46, 0.47, 0.48, 0.49, 0.5,
# 0.51, 0.52, 0.53, 0.54, 0.55, 0.6, 0.65, 0.7, 0.8, 0.9]
self.default_percentile = np.arange(100, -1, -1)
self.output_file = output_file
self.metrics = {
"threshold": [],
"tn": [],
"fp": [],
"fn": [],
"tp": [],
"tpr": [],
"fpr": [],
"ks": []
}
def sim_cut_by_value(self, y_true, y_pred):
fpr, tpr, thresholds = roc_curve(y_true, y_pred)
fpr, tpr, thresholds = fpr[:-1], tpr[:-1], thresholds[:-1]
ks_curve = tpr - fpr
ks_curve = np.where(ks_curve > 0, ks_curve, 0)
# shrink output size
probs = np.unique(y_pred)
if len(probs) < len(self.default_percentile):
self.metrics = {
'tpr': tpr,
'fpr': fpr,
'ks': ks_curve,
'threshold': thresholds
}
return
cuts = np.arange(0.01, 1, 0.01)
size = thresholds.size
index_list = [int(size * cut) for cut in cuts]
if index_list[-1] >= size:
index_list = index_list[:-1]
thresholds = [thresholds[idx] for idx in index_list]
ks_curve = [ks_curve[idx] for idx in index_list]
tpr = [tpr[idx] for idx in index_list]
fpr = [fpr[idx] for idx in index_list]
self.metrics = {
'tpr': tpr,
'fpr': fpr,
'ks': ks_curve,
'threshold': thresholds
}
return
def cut_by_value(self, y_true: np.array, y_pred: np.array, values: List = None):
probs = np.unique(y_pred)
logger.info("num of probs: %d." % len(probs))
if values is None:
if len(probs) < len(self.default_percentile):
# logger.warning("ks points %d less than the default num: %d." % (len(probs),
# len(self.default_percentile)))
values = np.array(sorted(probs, reverse=True))
self.default_percentile = np.array(
[sum(y_pred < _) / (len(y_pred) - 1) * 100 for _ in values])
else:
values = np.percentile(y_pred, self.default_percentile)
# - Threshold, TP, FN, FP, TN, TPR, FPR, KS
for threshold in values:
tn, fp, fn, tp = confusion_matrix(
y_true, y_pred >= threshold, labels=[1, 0]).ravel()
if tp + fn > 0:
tpr = tp / (tp + fn)
else:
tpr = np.nan
if tn + fp > 0:
fpr = fp / (tn + fp)
else:
fpr = np.nan
ks = max(np.max(tpr - fpr), 0)
for metric in self.metrics:
self.metrics[metric].append(locals()[metric])
if ks > self.bst_score:
self.bst_score = ks
self.bst_threshold = threshold
def cut_by_index(self, y_true: np.array, y_pred: np.array):
fpr, tpr, thresholds = roc_curve(y_true, y_pred)
ks_curve = tpr - fpr
ks_curve = np.where(ks_curve > 0, ks_curve, 0)
idx = np.argmax(ks_curve)
ks = ks_curve[idx]
if ks > self.bst_score:
self.bst_score = ks
self.bst_threshold = thresholds[idx]
def save(self):
df = pd.DataFrame(self.metrics)
df["top_percentile"] = 100 - self.default_percentile
df.to_csv(self.output_file, header=True,
index=False, float_format='%.6g')
class DecisionTable:
def __init__(self, conf):
self.method = conf.get("method", "equal_frequency")
self.bins = conf.get("bins", 5)
self.type = conf.get("type")
self._check_params()
self.stats = pd.DataFrame()
def _check_params(self):
if self.method not in ("equal_frequency", "equal_width"):
raise NotImplementedError(
"decision table: method '{}' is not implemented.".format(self.method))
if self.bins <= 1:
raise ValueError(
"decision table: bins ({}) must be greater than 1.".format(self.bins))
def fit(self, y_true: np.array, y_pred: np.array):
df = pd.DataFrame({"label": y_true, "pred": y_pred})
n = len(set(y_pred))
if n <= self.bins:
logger.info("metric::decision table::number of unique values in the prediction (%d) is less than "
"the bins (%d), set the bins=%d." % (n, self.bins, n))
self.bins = n
if self.method == "equal_frequency":
groups = pd.qcut(y_pred, self.bins, duplicates='drop', precision=3)
elif self.method == "equal_width":
groups = pd.cut(y_pred, self.bins, right=True,
duplicates='drop', precision=3)
if self.type == "score_card":
groups = [pd.Interval(int(_.left), int(
_.right), _.closed) for _ in groups]
df["区间"] = groups
self.stats["样本数"] = df.groupby("区间").size()
self.stats["负样本数"] = df.groupby(
"区间")["label"].agg(lambda x: sum(x == 0))
self.stats["正样本数"] = df.groupby(
"区间")["label"].agg(lambda x: sum(x == 1))
self.stats["区间内负样本占比"] = self.stats["负样本数"] / self.stats["样本数"]
self.stats["区间内正样本占比"] = self.stats["正样本数"] / self.stats["样本数"]
self.stats["样本占比"] = self.stats["样本数"] / self.stats["样本数"].sum()
self.stats["负样本占比"] = self.stats["负样本数"] / self.stats["负样本数"].sum()
self.stats["正样本占比"] = self.stats["正样本数"] / self.stats["正样本数"].sum()
self.stats["累计总样本数"] = self.stats["样本数"].cumsum()
self.stats["累计负样本数"] = self.stats["负样本数"].cumsum()
self.stats["累计正样本数"] = self.stats["正样本数"].cumsum()
self.stats["累计负样本/负样本总数"] = self.stats["累计负样本数"] / \
self.stats["负样本数"].sum()
self.stats["累计正样本/正样本总数"] = self.stats["累计正样本数"] / \
self.stats["正样本数"].sum()
self.stats["累计负样本/累计总样本"] = self.stats["累计负样本数"] / self.stats["累计总样本数"]
self.stats["累计正样本/累计总样本"] = self.stats["累计正样本数"] / self.stats["累计总样本数"]
self.stats["累计样本数/总样本数"] = self.stats["累计总样本数"] / \
self.stats["样本数"].sum()
self.stats["累计正样本占比/累计总样本占比"] = self.stats["正样本占比"].cumsum() / \
self.stats["样本占比"].cumsum()
for _ in ["区间内负样本占比", "区间内正样本占比", "样本占比", "负样本占比", "正样本占比",
"累计负样本/负样本总数", "累计正样本/正样本总数", "累计负样本/累计总样本", "累计正样本/累计总样本",
"累计样本数/总样本数", "累计正样本占比/累计总样本占比"]:
self.stats[_] = np.where(
self.stats[_].isnull(),
np.nan,
self.stats[_].apply(lambda x: "%.2f%%" % (x * 100))
)
self.stats = self.stats.reset_index()
self.stats["区间"] = self.stats["区间"].apply(str)
def save(self, file_name):
self.stats.to_csv(file_name, header=True,
index=False, float_format='%.2g')
class LiftGainCalculator():
def __init__(self, output_file=None, step=0.001):
self.output_file = output_file
self.step = step
self.num_proc = 4
@staticmethod
def _pred_thres(pred: np.array, thres: float):
return (pred >= thres).astype(np.int32)
def cal_lift_gain(self, label: np.array, pred: np.array):
step = self.step
# thresholds = np.sort(np.unique(pred))
cuts = np.arange(step, 1, step)
# new_thresholds = [thresholds[idx] for idx in index_list]
percentages, gains = cumulative_gain_curve(label, pred)
logger.info(
f"Length of percentages before pruning: {percentages.size}")
lifts = gains / percentages
size = percentages.size
index_list = [int(size * cut) for cut in cuts]
if index_list[-1] >= size:
index_list = index_list[:-1]
percentages = [percentages[idx] for idx in index_list]
gains = [gains[idx] for idx in index_list]
lifts = [lifts[idx] for idx in index_list]
logger.info(f"Length of percentages after pruning: {len(percentages)}")
self.metrics = pd.DataFrame(
{
'percentage_data': percentages,
'cum_gain': gains,
'lift': lifts
}
)
def save(self, file_name):
self.metrics.to_csv(
file_name, header=True,
index=False, float_format="%.2g"
)
class ClusteringMetric:
def __init__(self):
pass
@staticmethod
def calc_dbi(dist_table, cluster_dist):
if len(dist_table) == 1:
return np.nan
max_dij_list = []
d = 0
n = 0
for i in range(0, len(dist_table)):
dij_list = []
for j in range(0, len(dist_table)):
if j != i:
dij_list.append(
(dist_table[i] + dist_table[j]) / (cluster_dist[d] ** 0.5))
d += 1
dij_list = [_ for _ in dij_list if ~torch.isnan(_)]
if len(dij_list) <= 0:
max_dij_list.append(np.nan)
else:
max_dij = max(dij_list)
max_dij_list.append(max_dij)
n += 1
if n > 0:
return np.nansum(max_dij_list) / n
else:
return np.nan
| 20,407 | 37.946565 | 115 | py |
XFL | XFL-master/python/common/crypto/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/common/crypto/key_agreement/contants.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RFC 7919
"""
primes_hex = [
'FFFFFFFF FFFFFFFF ADF85458 A2BB4A9A AFDC5620 273D3CF1'
'D8B9C583 CE2D3695 A9E13641 146433FB CC939DCE 249B3EF9'
'7D2FE363 630C75D8 F681B202 AEC4617A D3DF1ED5 D5FD6561'
'2433F51F 5F066ED0 85636555 3DED1AF3 B557135E 7F57C935'
'984F0C70 E0E68B77 E2A689DA F3EFE872 1DF158A1 36ADE735'
'30ACCA4F 483A797A BC0AB182 B324FB61 D108A94B B2C8E3FB'
'B96ADAB7 60D7F468 1D4F42A3 DE394DF4 AE56EDE7 6372BB19'
'0B07A7C8 EE0A6D70 9E02FCE1 CDF7E2EC C03404CD 28342F61'
'9172FE9C E98583FF 8E4F1232 EEF28183 C3FE3B1B 4C6FAD73'
'3BB5FCBC 2EC22005 C58EF183 7D1683B2 C6F34A26 C1B2EFFA'
'886B4238 61285C97 FFFFFFFF FFFFFFFF',
'FFFFFFFF FFFFFFFF ADF85458 A2BB4A9A AFDC5620 273D3CF1'
'D8B9C583 CE2D3695 A9E13641 146433FB CC939DCE 249B3EF9'
'7D2FE363 630C75D8 F681B202 AEC4617A D3DF1ED5 D5FD6561'
'2433F51F 5F066ED0 85636555 3DED1AF3 B557135E 7F57C935'
'984F0C70 E0E68B77 E2A689DA F3EFE872 1DF158A1 36ADE735'
'30ACCA4F 483A797A BC0AB182 B324FB61 D108A94B B2C8E3FB'
'B96ADAB7 60D7F468 1D4F42A3 DE394DF4 AE56EDE7 6372BB19'
'0B07A7C8 EE0A6D70 9E02FCE1 CDF7E2EC C03404CD 28342F61'
'9172FE9C E98583FF 8E4F1232 EEF28183 C3FE3B1B 4C6FAD73'
'3BB5FCBC 2EC22005 C58EF183 7D1683B2 C6F34A26 C1B2EFFA'
'886B4238 611FCFDC DE355B3B 6519035B BC34F4DE F99C0238'
'61B46FC9 D6E6C907 7AD91D26 91F7F7EE 598CB0FA C186D91C'
'AEFE1309 85139270 B4130C93 BC437944 F4FD4452 E2D74DD3'
'64F2E21E 71F54BFF 5CAE82AB 9C9DF69E E86D2BC5 22363A0D'
'ABC52197 9B0DEADA 1DBF9A42 D5C4484E 0ABCD06B FA53DDEF'
'3C1B20EE 3FD59D7C 25E41D2B 66C62E37 FFFFFFFF FFFFFFFF',
'FFFFFFFF FFFFFFFF ADF85458 A2BB4A9A AFDC5620 273D3CF1'
'D8B9C583 CE2D3695 A9E13641 146433FB CC939DCE 249B3EF9'
'7D2FE363 630C75D8 F681B202 AEC4617A D3DF1ED5 D5FD6561'
'2433F51F 5F066ED0 85636555 3DED1AF3 B557135E 7F57C935'
'984F0C70 E0E68B77 E2A689DA F3EFE872 1DF158A1 36ADE735'
'30ACCA4F 483A797A BC0AB182 B324FB61 D108A94B B2C8E3FB'
'B96ADAB7 60D7F468 1D4F42A3 DE394DF4 AE56EDE7 6372BB19'
'0B07A7C8 EE0A6D70 9E02FCE1 CDF7E2EC C03404CD 28342F61'
'9172FE9C E98583FF 8E4F1232 EEF28183 C3FE3B1B 4C6FAD73'
'3BB5FCBC 2EC22005 C58EF183 7D1683B2 C6F34A26 C1B2EFFA'
'886B4238 611FCFDC DE355B3B 6519035B BC34F4DE F99C0238'
'61B46FC9 D6E6C907 7AD91D26 91F7F7EE 598CB0FA C186D91C'
'AEFE1309 85139270 B4130C93 BC437944 F4FD4452 E2D74DD3'
'64F2E21E 71F54BFF 5CAE82AB 9C9DF69E E86D2BC5 22363A0D'
'ABC52197 9B0DEADA 1DBF9A42 D5C4484E 0ABCD06B FA53DDEF'
'3C1B20EE 3FD59D7C 25E41D2B 669E1EF1 6E6F52C3 164DF4FB'
'7930E9E4 E58857B6 AC7D5F42 D69F6D18 7763CF1D 55034004'
'87F55BA5 7E31CC7A 7135C886 EFB4318A ED6A1E01 2D9E6832'
'A907600A 918130C4 6DC778F9 71AD0038 092999A3 33CB8B7A'
'1A1DB93D 7140003C 2A4ECEA9 F98D0ACC 0A8291CD CEC97DCF'
'8EC9B55A 7F88A46B 4DB5A851 F44182E1 C68A007E 5E655F6A'
'FFFFFFFF FFFFFFFF',
'FFFFFFFF FFFFFFFF ADF85458 A2BB4A9A AFDC5620 273D3CF1'
'D8B9C583 CE2D3695 A9E13641 146433FB CC939DCE 249B3EF9'
'7D2FE363 630C75D8 F681B202 AEC4617A D3DF1ED5 D5FD6561'
'2433F51F 5F066ED0 85636555 3DED1AF3 B557135E 7F57C935'
'984F0C70 E0E68B77 E2A689DA F3EFE872 1DF158A1 36ADE735'
'30ACCA4F 483A797A BC0AB182 B324FB61 D108A94B B2C8E3FB'
'B96ADAB7 60D7F468 1D4F42A3 DE394DF4 AE56EDE7 6372BB19'
'0B07A7C8 EE0A6D70 9E02FCE1 CDF7E2EC C03404CD 28342F61'
'9172FE9C E98583FF 8E4F1232 EEF28183 C3FE3B1B 4C6FAD73'
'3BB5FCBC 2EC22005 C58EF183 7D1683B2 C6F34A26 C1B2EFFA'
'886B4238 611FCFDC DE355B3B 6519035B BC34F4DE F99C0238'
'61B46FC9 D6E6C907 7AD91D26 91F7F7EE 598CB0FA C186D91C'
'AEFE1309 85139270 B4130C93 BC437944 F4FD4452 E2D74DD3'
'64F2E21E 71F54BFF 5CAE82AB 9C9DF69E E86D2BC5 22363A0D'
'ABC52197 9B0DEADA 1DBF9A42 D5C4484E 0ABCD06B FA53DDEF'
'3C1B20EE 3FD59D7C 25E41D2B 669E1EF1 6E6F52C3 164DF4FB'
'7930E9E4 E58857B6 AC7D5F42 D69F6D18 7763CF1D 55034004'
'87F55BA5 7E31CC7A 7135C886 EFB4318A ED6A1E01 2D9E6832'
'A907600A 918130C4 6DC778F9 71AD0038 092999A3 33CB8B7A'
'1A1DB93D 7140003C 2A4ECEA9 F98D0ACC 0A8291CD CEC97DCF'
'8EC9B55A 7F88A46B 4DB5A851 F44182E1 C68A007E 5E0DD902'
'0BFD64B6 45036C7A 4E677D2C 38532A3A 23BA4442 CAF53EA6'
'3BB45432 9B7624C8 917BDD64 B1C0FD4C B38E8C33 4C701C3A'
'CDAD0657 FCCFEC71 9B1F5C3E 4E46041F 388147FB 4CFDB477'
'A52471F7 A9A96910 B855322E DB6340D8 A00EF092 350511E3'
'0ABEC1FF F9E3A26E 7FB29F8C 183023C3 587E38DA 0077D9B4'
'763E4E4B 94B2BBC1 94C6651E 77CAF992 EEAAC023 2A281BF6'
'B3A739C1 22611682 0AE8DB58 47A67CBE F9C9091B 462D538C'
'D72B0374 6AE77F5E 62292C31 1562A846 505DC82D B854338A'
'E49F5235 C95B9117 8CCF2DD5 CACEF403 EC9D1810 C6272B04'
'5B3B71F9 DC6B80D6 3FDD4A8E 9ADB1E69 62A69526 D43161C1'
'A41D570D 7938DAD4 A40E329C D0E40E65 FFFFFFFF FFFFFFFF',
'FFFFFFFF FFFFFFFF ADF85458 A2BB4A9A AFDC5620 273D3CF1'
'D8B9C583 CE2D3695 A9E13641 146433FB CC939DCE 249B3EF9'
'7D2FE363 630C75D8 F681B202 AEC4617A D3DF1ED5 D5FD6561'
'2433F51F 5F066ED0 85636555 3DED1AF3 B557135E 7F57C935'
'984F0C70 E0E68B77 E2A689DA F3EFE872 1DF158A1 36ADE735'
'30ACCA4F 483A797A BC0AB182 B324FB61 D108A94B B2C8E3FB'
'B96ADAB7 60D7F468 1D4F42A3 DE394DF4 AE56EDE7 6372BB19'
'0B07A7C8 EE0A6D70 9E02FCE1 CDF7E2EC C03404CD 28342F61'
'9172FE9C E98583FF 8E4F1232 EEF28183 C3FE3B1B 4C6FAD73'
'3BB5FCBC 2EC22005 C58EF183 7D1683B2 C6F34A26 C1B2EFFA'
'886B4238 611FCFDC DE355B3B 6519035B BC34F4DE F99C0238'
'61B46FC9 D6E6C907 7AD91D26 91F7F7EE 598CB0FA C186D91C'
'AEFE1309 85139270 B4130C93 BC437944 F4FD4452 E2D74DD3'
'64F2E21E 71F54BFF 5CAE82AB 9C9DF69E E86D2BC5 22363A0D'
'ABC52197 9B0DEADA 1DBF9A42 D5C4484E 0ABCD06B FA53DDEF'
'3C1B20EE 3FD59D7C 25E41D2B 669E1EF1 6E6F52C3 164DF4FB'
'7930E9E4 E58857B6 AC7D5F42 D69F6D18 7763CF1D 55034004'
'87F55BA5 7E31CC7A 7135C886 EFB4318A ED6A1E01 2D9E6832'
'A907600A 918130C4 6DC778F9 71AD0038 092999A3 33CB8B7A'
'1A1DB93D 7140003C 2A4ECEA9 F98D0ACC 0A8291CD CEC97DCF'
'8EC9B55A 7F88A46B 4DB5A851 F44182E1 C68A007E 5E0DD902'
'0BFD64B6 45036C7A 4E677D2C 38532A3A 23BA4442 CAF53EA6'
'3BB45432 9B7624C8 917BDD64 B1C0FD4C B38E8C33 4C701C3A'
'CDAD0657 FCCFEC71 9B1F5C3E 4E46041F 388147FB 4CFDB477'
'A52471F7 A9A96910 B855322E DB6340D8 A00EF092 350511E3'
'0ABEC1FF F9E3A26E 7FB29F8C 183023C3 587E38DA 0077D9B4'
'763E4E4B 94B2BBC1 94C6651E 77CAF992 EEAAC023 2A281BF6'
'B3A739C1 22611682 0AE8DB58 47A67CBE F9C9091B 462D538C'
'D72B0374 6AE77F5E 62292C31 1562A846 505DC82D B854338A'
'E49F5235 C95B9117 8CCF2DD5 CACEF403 EC9D1810 C6272B04'
'5B3B71F9 DC6B80D6 3FDD4A8E 9ADB1E69 62A69526 D43161C1'
'A41D570D 7938DAD4 A40E329C CFF46AAA 36AD004C F600C838'
'1E425A31 D951AE64 FDB23FCE C9509D43 687FEB69 EDD1CC5E'
'0B8CC3BD F64B10EF 86B63142 A3AB8829 555B2F74 7C932665'
'CB2C0F1C C01BD702 29388839 D2AF05E4 54504AC7 8B758282'
'2846C0BA 35C35F5C 59160CC0 46FD8251 541FC68C 9C86B022'
'BB709987 6A460E74 51A8A931 09703FEE 1C217E6C 3826E52C'
'51AA691E 0E423CFC 99E9E316 50C1217B 624816CD AD9A95F9'
'D5B80194 88D9C0A0 A1FE3075 A577E231 83F81D4A 3F2FA457'
'1EFC8CE0 BA8A4FE8 B6855DFE 72B0A66E DED2FBAB FBE58A30'
'FAFABE1C 5D71A87E 2F741EF8 C1FE86FE A6BBFDE5 30677F0D'
'97D11D49 F7A8443D 0822E506 A9F4614E 011E2A94 838FF88C'
'D68C8BB7 C5C6424C FFFFFFFF FFFFFFFF'
] | 9,020 | 59.543624 | 74 | py |
XFL | XFL-master/python/common/crypto/key_agreement/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/common/crypto/key_agreement/diffie_hellman.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from secrets import SystemRandom
from typing import List
from gmpy2 import powmod
from common.communication.gRPC.python.channel import DualChannel
from .contants import primes_hex
class DiffieHellman(object):
"""
Implement Diffie-Hellman key exchange protocol.
The security parameters are referenced from RFC 7919, which are used in TLS 1.3.
Shortest exponents are referenced from appendix A of RFC 7919.
"""
supported_prime_bitlength = [2048, 3072, 4096, 6144, 8192]
# symmetric_equivalent_strength
supported_security_strength = [103, 125, 150, 175, 192]
supported_shortest_exponents = [225, 275, 325, 375, 400]
g = 2
primes = [int(p.replace(' ', ''), 16) for p in primes_hex]
def __init__(self,
fed_ids: List[str],
key_bitlength: int = 3072,
optimized: bool = True,
channel_name: str = "diffie_hellman"):
try:
self.index = self.supported_prime_bitlength.index(key_bitlength)
except ValueError:
message = "Input key_bitlength {} not supported! Need to be one of the {}"
raise ValueError(message.format(key_bitlength, self.supported_prime_bitlength))
self.chan = DualChannel(name=channel_name, ids=fed_ids)
self.p = self.primes[self.index]
self.key_bitlength = key_bitlength
self.security_strength = self.supported_security_strength[self.index]
self.shorest_exponent = self.supported_shortest_exponents[self.index]
self.optimized = optimized
self.lower_bound = 1 << (self.shorest_exponent - 1)
if optimized:
self.upper_bound = 1 << self.shorest_exponent
else:
self.upper_bound = self.p - 2
self.rand_num_generator = SystemRandom()
def __str__(self) -> str:
s = "Diffie-Hellman key exchange: remote_id={}, key_bitlength={}," \
"security_level={}, optimized={}"
return s.format(self.chan.remote_id, self.key_bitlength,
self.security_strength, self.optimized)
def exchange(self, out_bytes: bool = True):
a = self.rand_num_generator.randint(self.lower_bound, self.upper_bound)
g_power_a = powmod(self.g, a, self.p)
index, g_power_b = self.chan.swap([self.index, g_power_a])
if index != self.index:
message = "Input key_bitlength are not the same! {} for local, {} from remote."
raise ValueError(message.format(self.supported_prime_bitlength[self.index],
self.supported_prime_bitlength[index]))
secret_number = int(powmod(g_power_b, a, self.p))
if out_bytes:
secret_number = secret_number.to_bytes((secret_number.bit_length() + 7) // 8, 'big')
return secret_number
| 3,484 | 39.523256 | 96 | py |
XFL | XFL-master/python/common/crypto/paillier/context.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import pickle
import secrets
import warnings
from typing import Optional, Union
import gmpy2
from .utils import MPZ, getprimeover, invert, mul, powmod
class PaillierContext(object):
def init(self,
p: Optional[Union[int, MPZ]] = None,
q: Optional[Union[int, MPZ]] = None,
n: Optional[Union[int, MPZ]] = None,
djn_h_pow_n: Optional[Union[int, MPZ]] = None):
if n is None and (p is None or q is None):
raise ValueError("Insufficient parameters.")
if p is not None and q is not None:
self.__p = p
self.__q = q
self.__n = mul(p, q)
if n is not None and self.__n != n:
warnings.warn(f"Input n {n} not equal to p * q {self.__n}, use p * q instead.")
self.q_inverse_p = invert(q, p)
self.p_square = mul(p, p)
self.q_square = mul(q, q)
self.q2_inverse_p2 = invert(self.q_square, self.p_square)
self.hp = self._h_function(p, self.p_square)
self.hq = self._h_function(q, self.q_square)
self.phi_p2 = mul(p, p-1)
self.phi_q2 = mul(q, q-1)
self.ep = self.__n % self.phi_p2
self.eq = self.__n % self.phi_q2
self.__is_private = True
else:
self.__n = n
self.__is_private = False
if djn_h_pow_n:
self.h_pow_n = djn_h_pow_n
self.djn_exp_bound = pow(2, self.__n.bit_length() // 2)
self.djn_on = True
if self.__is_private:
self.h_pow_n_modp2 = self.h_pow_n % self.p_square
self.h_pow_n_modq2 = self.h_pow_n % self.q_square
else:
self.djn_on = False
self.n_square = pow(self.__n, 2)
self.max_value_for_positive = self.__n // 3
self.min_value_for_negative = self.__n - self.max_value_for_positive
return self
@classmethod
def generate(cls, key_bit_size: int = 2048, djn_on: bool = False):
p, q = cls._generate_paillier_private_key(key_bit_size, djn_on)
if djn_on:
n = mul(p, q)
x = secrets.SystemRandom().getrandbits(n.bit_length())
x = gmpy2.bit_set(x, n.bit_length() - 1)
h = -pow(x, 2)
h_pow_n = powmod(h, n, pow(n, 2))
return PaillierContext().init(p, q, djn_h_pow_n=h_pow_n)
else:
return PaillierContext().init(p, q)
@property
def p(self):
if self.__is_private:
return self.__p
return None
@property
def q(self):
if self.__is_private:
return self.__q
return None
@property
def n(self):
return self.__n
def is_private(self):
return self.__is_private
def _copy_public_from(self, other):
self.__n = other.n
self.__is_private = False
self.n_square = other.n_square
self.max_value_for_positive = other.max_value_for_positive
self.min_value_for_negative = other.min_value_for_negative
self.djn_on = other.djn_on
if self.djn_on:
self.h_pow_n = other.h_pow_n
self.djn_exp_bound = other.djn_exp_bound
def to_public(self):
if not self.__is_private:
return self
pub_context = PaillierContext()
pub_context._copy_public_from(self)
return pub_context
@staticmethod
def _generate_paillier_private_key(n_length: int = 2048, djn_on: bool = False, seed: Optional[int] = None): # djn_on: bool = True,
"""
Paillier-DJN:
Damgård I, Jurik M, Nielsen J B.
A generalization of Paillier’s public-key system with applications to electronic voting[J].
International Journal of Information Security, 2010, 9(6): 371-385.
"""
p, q = None, None
if djn_on:
def f(x, y):
return (x == y) or (math.gcd(p-1, q-1) != 2)
else:
def f(x, y):
return x == y
if seed is None:
while f(p, q):
p = getprimeover(n_length // 2)
q = getprimeover(n_length // 2)
else:
i = 1
while f(p, q):
p = getprimeover(n_length // 2, seed + i * 3)
q = getprimeover(n_length // 2, seed + i * 5)
i += 1
return p, q
def serialize(self, save_private_key: bool = True):
if save_private_key and self.__is_private:
out = pickle.dumps((self.__p, self.__q))
else:
out = pickle.dumps((self.__n,))
return out
@classmethod
def deserialize_from(cls, data: bytes):
unpickled_data = pickle.loads(data)
if len(unpickled_data) == 1:
return PaillierContext().init(n=unpickled_data[0])
elif len(unpickled_data) == 2:
return PaillierContext().init(p=unpickled_data[0], q=unpickled_data[1])
else:
return ValueError("The unpickled data should be a tuple contains 1 or 2 big integers.")
def __eq__(self, other):
if id(self) == id(other):
return True
if self.p != other.p or self.q != other.q or self.n != other.n:
return False
return True
def __hash__(self):
if self.__is_private:
return hash((self.__p, self.__q))
else:
return hash(self.__n)
def __str__(self):
if self.__is_private:
return f"PaillierContext: p = {int(self.__p)}, q = {int(self.__q)}, n = {int(self.__n)}"
else:
return f"PaillierContext: n = {int(self.__n)}"
def _l_function(self, x, p):
return (x - 1) // p
def _h_function(self, x, xsquare):
return invert(self._l_function(powmod(self.__n + 1, x - 1, xsquare), x), x)
| 6,659 | 33.153846 | 135 | py |
XFL | XFL-master/python/common/crypto/paillier/utils.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import secrets
from typing import Optional
import gmpy2
MPZ = type(gmpy2.mpz())
def get_core_num(expected_core_num):
max_cores = multiprocessing.cpu_count()
if expected_core_num == -1:
num_cores = max_cores
else:
num_cores = min(max(1, expected_core_num), max_cores)
return num_cores
def mul(a, b):
return gmpy2.mul(a, b)
def crt(mp, mq, p, q, q_inverse, n):
"""The Chinese Remainder Theorem, return the solution modulo n=pq.
"""
u = gmpy2.mul(mp-mq, q_inverse) % p
x = (mq + gmpy2.mul(u, q)) % n
return int(x)
# def fmod(a, b):
# return gmpy2.f_mod(a, b)
def mulmod(a, b, c):
"""
return int: (a * b) % c
"""
return gmpy2.mul(a, b) % c
def powmod(a: int, b: int, c: int) -> int:
"""
return int: (a ** b) % c
"""
if a == 1:
return 1
if max(a, b, c) < (1 << 64):
return pow(a, b, c)
else:
return gmpy2.powmod(a, b, c)
def invert(a, b):
"""return int: x, where a * x == 1 mod b
"""
x = gmpy2.invert(a, b)
if x == 0:
raise ZeroDivisionError('invert(a, b) no inverse exists')
return x
def getprimeover(n, seed: Optional[int] = None):
"""return a random n-bit prime number #, p = 3 mod 4
"""
if seed is not None:
r = gmpy2.mpz(secrets.SystemRandom().getrandbits(n))
else:
r = gmpy2.mpz(secrets.SystemRandom(seed).getrandbits(n))
r = gmpy2.bit_set(r, n - 1)
out = gmpy2.next_prime(r)
return out
def isqrt(n):
"""return the integer square root of N """
return gmpy2.isqrt(n)
| 2,228 | 21.979381 | 74 | py |
XFL | XFL-master/python/common/crypto/paillier/encoder.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import sys
from typing import Optional, Union
import numpy as np
from .context import PaillierContext
from .utils import MPZ, mul
class PaillierEncoder(object):
_MANT_DIG = sys.float_info.mant_dig
@classmethod
def cal_exponent(cls,
data: Union[int, float, np.ndarray, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64],
precision: Optional[int] = None) -> Union[int, np.ndarray]:
"""Precison are expected to be an non-negative integer.
"""
if precision is None:
if isinstance(data, np.ndarray):
exponent = np.frexp(data)[1] - cls._MANT_DIG
elif isinstance(data, (np.int32, np.int64, int, np.int16)):
exponent = 0
elif isinstance(data, (np.float32, np.float64, float, np.float16,)):
exponent = math.frexp(data)[1] - cls._MANT_DIG
else:
raise TypeError(f"Precision type {type(precision)} not supported.")
else:
exponent = -math.ceil(math.log2(10) * precision)
return exponent
@classmethod
def encode_single(cls,
context: PaillierContext,
data: Union[int, float, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64],
exponent: int) -> MPZ:
out = round(data * (1 << -exponent)) % context.n
return out
@classmethod
def decode_single(cls, context: PaillierContext, data: MPZ, exponent: int) -> float:
if data >= context.min_value_for_negative:
data -= context.n
elif data > context.max_value_for_positive:
raise OverflowError("Overflow detected during decoding encrypted number.")
out = mul(data, pow(2, exponent))
return out
| 2,467 | 36.969231 | 123 | py |
XFL | XFL-master/python/common/crypto/paillier/paillier.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import secrets
import warnings
from functools import partial
from typing import Optional, Union
import numpy as np
import zstd
from pathos.pools import ProcessPool
from .context import PaillierContext
from .encoder import PaillierEncoder
from .utils import MPZ, crt, get_core_num, invert, mul, mulmod, powmod
"""
We suggest the plaintext to be a numpy.ndarray of dtype np.float32 to get more accurate result.
key_length ~ symmetric_equivalent_security_strength
1024 ~ 80
2048 ~ 112
3072 ~ 128
"""
class RawCiphertext:
def __init__(self, value, exp):
self.value = value
self.exp = exp
class PaillierCiphertext(object):
def __init__(self,
context: PaillierContext,
raw_ciphertext: MPZ,
exponent: int) -> None:
self.__context = context
self.__c = raw_ciphertext
self.__exp = exponent
@property
def context(self):
return self.__context
@property
def raw_ciphertext(self):
return self.__c
@property
def exponent(self):
return self.__exp
def serialize(self, compression: bool = True) -> bytes:
out = pickle.dumps(RawCiphertext(self.__c, self.__exp))
if compression:
out = zstd.compress(out)
return out
@classmethod
def deserialize_from(cls, context: PaillierContext, data: bytes, compression: bool = True):
if compression:
data = zstd.decompress(data)
unpickled_data = pickle.loads(data)
return PaillierCiphertext(context, unpickled_data.value, unpickled_data.exp)
def _decrease_exponent_to(self, new_exponent: int):
scalar = 1 << (self.__exp - new_exponent)
raw_ciphertext = self._raw_mul(self.raw_ciphertext,
scalar,
self.context.min_value_for_negative,
self.context.n,
self.context.n_square)
return raw_ciphertext
def __add__(self, other):
"""[]
Attention!: No obfuscation is applyed when adding a scalar, one need to explicitly
execute 'obfuscate' method if needed.
"""
if isinstance(other, PaillierCiphertext):
return self._add_encrypted(other)
elif isinstance(other, (int, float)):
ciphertext = Paillier.encrypt(self.context,
other,
precision=None,
max_exponent=None,
obfuscation=False)
return self._add_encrypted(ciphertext)
else:
raise TypeError(f"Adding data of type {type(other)} not supported.")
def _add_encrypted(self, other):
if self.context.to_public() != other.context.to_public():
raise ValueError("Adding two ciphertext with different keys.")
if self.exponent > other.exponent:
new_exponent = other.exponent
raw_a = self._decrease_exponent_to(new_exponent)
raw_b = other.raw_ciphertext
else:
new_exponent = self.exponent
raw_a = self.raw_ciphertext
if self.exponent < other.exponent:
raw_b = other._decrease_exponent_to(new_exponent)
else:
raw_b = other.raw_ciphertext
raw_sum = self._raw_add(raw_a, raw_b, self.context.n_square)
return PaillierCiphertext(self.context, raw_sum, new_exponent)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
return self.__add__(other * (-1))
def __rsub__(self, other):
return ((-1) * self).__add__(other)
def __mul__(self, scalar: Union[int, float]):
if isinstance(scalar, PaillierCiphertext):
raise TypeError("Cannot multiply one ciphertext with another ciphertext, try multiply a scalar.")
exponent = PaillierEncoder.cal_exponent(scalar, precision=None)
encoded_scalar = PaillierEncoder.encode_single(self.context, scalar, exponent)
raw_ciphertext = self._raw_mul(self.raw_ciphertext,
encoded_scalar,
self.context.min_value_for_negative,
self.context.n,
self.context.n_square)
return PaillierCiphertext(self.context, raw_ciphertext, exponent + self.exponent)
def __rmul__(self, scalar: Union[int, float]):
return self.__mul__(scalar)
def __truediv__(self, scalar: Union[int, float]):
return self.__mul__(1 / scalar)
def _raw_add(self, c1: MPZ, c2: MPZ, n_square: MPZ) -> MPZ:
return mulmod(c1, c2, n_square)
def _raw_mul(self, c1: MPZ, p2: MPZ, min_value_for_negative: MPZ, n: MPZ, n_square: MPZ) -> MPZ:
'''0 <= p2 < n, optimize for -1'''
if self.__context.is_private():
p_square = self.__context.p_square
q_square = self.__context.q_square
phi_p2 = self.__context.phi_p2
phi_q2 = self.__context.phi_q2
q2_inverse_p2 = self.__context.q2_inverse_p2
def crt_(mp_square, mq_square):
return crt(mp_square, mq_square, p_square, q_square, q2_inverse_p2, n_square)
def crt_powmod(c, p):
mp_square = powmod(c % p_square, p % phi_p2, p_square)
mq_square = powmod(c % q_square, p % phi_q2, q_square)
return crt_(mp_square, mq_square)
if p2 >= min_value_for_negative:
if p2 + 102400 > n: # abs(p2) is small
out = powmod(c1, n - p2, n_square)
return invert(out, n_square)
invert_c1 = invert(c1, n_square)
return crt_powmod(invert_c1, n - p2)
else:
return crt_powmod(c1, p2)
else:
if p2 >= min_value_for_negative:
invert_c1 = invert(c1, n_square)
return powmod(invert_c1, n - p2, n_square)
else:
return powmod(c1, p2, n_square)
def obfuscate(self):
n = self.__context.n
n_square = self.__context.n_square
if self.__context.djn_on:
if self.__context.is_private():
a = secrets.SystemRandom().randrange(1, self.__context.djn_exp_bound)
phi_p2 = self.__context.phi_p2
phi_q2 = self.__context.phi_q2
ep = self.__context.ep
eq = self.__context.eq
p_square = self.__context.p_square
q_square = self.__context.q_square
q2_inverse_p2 = self.__context.q2_inverse_p2
def crt_(mp_square, mq_square):
return crt(mp_square, mq_square, p_square, q_square, q2_inverse_p2, n_square)
mp_square = powmod(self.__context.h_pow_n_modp2, a % phi_p2, p_square)
mq_square = powmod(self.__context.h_pow_n_modq2, a % phi_q2, q_square)
obfuscator = crt_(mp_square, mq_square)
else:
a = secrets.SystemRandom().randrange(1, self.__context.djn_exp_bound)
obfuscator = powmod(self.__context.h_pow_n, a, n_square)
else:
if self.__context.is_private():
r = secrets.SystemRandom().randrange(1, n)
ep = self.__context.ep
eq = self.__context.eq
p_square = self.__context.p_square
q_square = self.__context.q_square
q2_inverse_p2 = self.__context.q2_inverse_p2
def crt_(mp_square, mq_square):
return crt(mp_square, mq_square, p_square, q_square, q2_inverse_p2, n_square)
mp_square = powmod(r % p_square, ep, p_square)
mq_square = powmod(r % q_square, eq, q_square)
obfuscator = crt_(mp_square, mq_square)
else:
r = secrets.SystemRandom().randrange(1, n)
obfuscator = powmod(r, n, n_square)
self.__c = mulmod(self.__c, obfuscator, n_square)
return self
class Paillier(object):
@staticmethod
def context(key_bit_size: int = 2048, djn_on: bool = False):
return PaillierContext.generate(key_bit_size, djn_on)
@staticmethod
def context_from(data: bytes):
return PaillierContext.deserialize_from(data)
@staticmethod
def serialize(data: Union[np.ndarray, PaillierCiphertext], compression: bool = True) -> bytes:
"""data is PaillierCiphertext or a numpy.ndarray consists of PaillierCiphertext"""
if isinstance(data, PaillierCiphertext):
return data.serialize(compression)
def f(x):
return RawCiphertext(x.raw_ciphertext, x.exponent)
f1 = np.vectorize(f)
out = f1(data)
out = pickle.dumps(out)
if compression:
out = zstd.compress(out)
return out
@staticmethod
def ciphertext_from(context: PaillierContext, data: bytes, compression: bool = True):
if compression:
data = zstd.decompress(data)
unpickled_data = pickle.loads(data)
def f(x):
return PaillierCiphertext(context, x.value, x.exp)
f1 = np.vectorize(f, otypes=[PaillierCiphertext])
out = f1(unpickled_data)
return out
@staticmethod
def _encrypt_single(data: Union[int, float],
context: PaillierContext,
precision: Optional[int] = None,
max_exponent: Optional[int] = None,
obfuscation: bool = True) -> MPZ:
exponent = PaillierEncoder.cal_exponent(data, precision)
if max_exponent is not None:
exponent = min(exponent, max_exponent)
encoded_data = PaillierEncoder.encode_single(context, data, exponent)
raw_ciphertext = (mul(context.n, encoded_data) + 1) % context.n_square
out = PaillierCiphertext(context, raw_ciphertext, exponent)
if obfuscation:
return out.obfuscate()
return out
@classmethod
def encrypt(cls,
context: PaillierContext,
data: Union[int, float, np.ndarray],
precision: Optional[int] = None,
max_exponent: Optional[int] = None,
obfuscation: bool = True,
num_cores: int = -1) -> Union[PaillierCiphertext, np.ndarray]:
"""[summary]
Args:
context (PaillierContext): [description]
data (Union[int, float, np.ndarray]): [description]
precision (Optional[int], optional): [description]. Defaults to None.
max_exponent (Optional[int], optional): [description]. Defaults to None.
obfuscation (bool, optional): [description]. Defaults to True.
num_cores (int, optional): how many cores are used for encryption. \
Defaults to 1. Set to inf to use all the cores.
Raises:
TypeError: [description]
Returns:
Union[PaillierCiphertext, np.ndarray]: [description]
"""
if isinstance(data, np.ndarray):
if num_cores == 1:
def f1(x):
c = cls._encrypt_single(x, context, precision, max_exponent, obfuscation)
return c
f2 = np.vectorize(f1, otypes=[PaillierCiphertext])
ciphertext = f2(data)
else:
num_cores = get_core_num(num_cores)
partial_encrypt = partial(cls._encrypt_single,
context=context,
precision=precision,
max_exponent=max_exponent,
obfuscation=obfuscation)
s = data.shape
data_flatten = data.reshape(-1)
with ProcessPool(num_cores) as pool:
ciphertext = np.array(pool.map(partial_encrypt, data_flatten)).reshape(s)
# pool.terminate()
elif isinstance(data, (int, float)):
# ciphertext = cls._encrypt_single(context, data, precision, max_exponent, obfuscation)
ciphertext = cls._encrypt_single(data, context, precision, max_exponent, obfuscation)
else:
raise TypeError(f"Unsupported data type {type(data)}, accepted types are 'np.ndarray', 'int', 'float'.")
return ciphertext
@staticmethod
def _decrypt_single(data: PaillierCiphertext,
context: PaillierContext) -> float:
if not isinstance(data, PaillierCiphertext):
return data
p, q = context.p, context.q
q_inverse_p = context.q_inverse_p
p_square = context.p_square
q_square = context.q_square
hp = context.hp
hq = context.hq
n = context.n
def l_func(x, m):
return (x - 1) // m
def crt_(mp, mq):
return crt(mp, mq, p, q, q_inverse_p, n)
mp = l_func(powmod(data.raw_ciphertext, p - 1, p_square), p)
mp = mulmod(mp, hp, p)
mq = l_func(powmod(data.raw_ciphertext, q - 1, q_square), q)
mq = mulmod(mq, hq, q)
encoded_number = crt_(mp, mq)
out = PaillierEncoder.decode_single(context, encoded_number, data.exponent)
return out
@classmethod
def decrypt(cls,
context: PaillierContext,
data: Union[PaillierCiphertext, np.ndarray],
dtype: str = 'float',
num_cores: int = -1,
out_origin: bool = False):
if not context.is_private():
raise TypeError("Try to decrypt a paillier ciphertext by a public key.")
if isinstance(data, np.ndarray):
if num_cores == 1:
def f1(x):
p = cls._decrypt_single(x, context)
return p
f2 = np.vectorize(f1)
out = f2(data)
else:
num_cores = get_core_num(num_cores)
partial_decrypt = partial(cls._decrypt_single, context=context)
s = data.shape
data_flatten = data.reshape(-1)
with ProcessPool(num_cores) as pool:
out = np.array(pool.map(partial_decrypt, data_flatten)).reshape(s)
if not out_origin:
if dtype == 'float':
out = out.astype(np.float32)
elif dtype == 'int':
out = out.astype(np.int32)
else:
warnings.warn(f"dtype {dtype} not supported.")
out = out.astype(np.float32)
elif isinstance(data, PaillierCiphertext):
out = cls._decrypt_single(data, context)
if not out_origin:
if 'float' in dtype:
out = float(out)
elif 'int' in dtype:
out = int(out)
else:
warnings.warn(f"dtype {dtype} not supported.")
out = float(out)
else:
raise TypeError(f"Unsupported data type {type(data)}, accepted types are 'np.ndarray', 'PaillierCiphertext'.")
return out
@classmethod
def obfuscate(cls, ciphertext: Union[PaillierCiphertext, np.ndarray]):
if isinstance(ciphertext, np.ndarray):
def f(c):
r = c.obfuscate()
return r
f2 = np.vectorize(f, otypes=[PaillierCiphertext])
ciphertext = f2(ciphertext)
elif isinstance(ciphertext, PaillierCiphertext):
ciphertext = ciphertext.obfuscate()
else:
raise TypeError(f"Unsupported raw ciphertext type {type(ciphertext)}")
return ciphertext
| 17,105 | 38.597222 | 122 | py |
XFL | XFL-master/python/common/crypto/paillier/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/common/crypto/one_time_pad/one_time_add.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from copy import deepcopy
from typing import List, Union
import numpy as np
import torch
from .component import OneTimePadCiphertext, OneTimePadContext, OneTimeKey
class OneTimeAdd(object):
"""Provide encrypt and decrypt method for one-time-add algorithm
"""
@staticmethod
def context(modulus_exp: int = 128,
data_type: str = "torch.Tensor"):
return OneTimePadContext(modulus_exp, data_type)
@staticmethod
def ciphertext(data: Union[list, np.ndarray, bytes],
context_: OneTimePadContext):
return OneTimePadCiphertext(data, context_)
@staticmethod
def _xcrypt(context_: OneTimePadContext,
data: Union[np.ndarray, torch.Tensor],
one_time_key: List[np.ndarray],
is_addition: Union[List[bool], bool] = True,
is_decrypt: bool = False) -> Union[List[OneTimePadCiphertext], OneTimePadCiphertext]:
""" Function for implementing encryption and decryption.
is_addition: same length as one_time_key, means to add or to subtract the key. default to True;
if is_decrypt is False, return a numpy array of integers;
if is_decrypt is True, return a numpy array of float numbers.
"""
if isinstance(is_addition, (bool, int)):
is_addition = [is_addition] * len(one_time_key)
elif len(is_addition) != len(one_time_key):
raise ValueError(
f"Length of is_additon ({len(is_addition)}) and one_time_key ({len(one_time_key)}) not match.")
if data.shape == ():
zero_shape = True
data = np.array([data])
else:
zero_shape = False
dtype = np.uint64 if context_.modulus_exp == 64 else object
if not is_decrypt:
if dtype == np.uint64:
out = np.mod(np.trunc(data * context_.scalar).astype("int"),
context_.modulus).astype(dtype)
else:
out = np.mod(np.trunc(data * context_.scalar),
context_.modulus).astype(dtype)
else:
out = deepcopy(data)
for i in range(len(one_time_key)):
if is_addition[i] - is_decrypt:
if context_.modulus_exp == 64:
out = out + one_time_key[i]
else:
out = np.mod(
out + one_time_key[i], context_.modulus).astype(object)
else:
if context_.modulus_exp == 64:
out = out - one_time_key[i]
else:
out = np.mod(
out - one_time_key[i], context_.modulus).astype(object)
if is_decrypt:
out = out.astype(object)
idx = np.where(out > context_.modulus // 2)
out[idx] -= context_.modulus
out /= context_.scalar
if zero_shape:
out = np.array(out[0])
return out
@classmethod
def encrypt(cls,
context_: OneTimePadContext,
data: Union[np.ndarray, torch.Tensor],
one_time_key: OneTimeKey,
is_addition: Union[List[bool], bool] = True,
serialized: bool = False) -> Union[OneTimePadCiphertext, bytes]:
"""Encrypt the data to a ciphertext
Args:
context_ (OneTimePadContext): see OneTimePadContext.
data (Union[np.ndarray, torch.Tensor]): plaintext to encrypt.
one_time_key (OneTimeKey): a key for addition/subtraction, or a list of keys,
the ciphertext is plaintext +/- key[0] +/- key[1] +/- key[2] +/- ...
is_addition (Union[List[bool], bool], optional): same length as one_time_key, means to add or to subtract the key.
Defaults to True.
serialized (bool, optional): it is convenient to set it to true if the ciphertext needs to
be sent by the network right after the encryption. Defaults to False.
Raises:
ValueError: if shape of data is different from shape of one_time_key or one_time_key[0].
Warnings:
if context_.data_type is different from the type of data, which means the type of plaintext
after decryption will be different from the type of plaintext before encryption.
Returns:
Union[OneTimePadCiphertext, bytes]: if serialized is False, return OneTimePadCiphertext,
else return pickled ciphertext(numpy.ndarray of integers).
"""
if isinstance(one_time_key.value, np.ndarray):
one_time_key = [one_time_key.value]
else:
one_time_key = one_time_key.value
if data.shape != one_time_key[0].shape:
raise ValueError(
f"Input data's shape {data.shape} and one_time_key's shape {one_time_key[0].shape} not match.")
if not isinstance(data, context_.data_type) and not isinstance(data, np.float64):
warnings.warn(
f"Input data type {type(data)} and context_.data_type {context_.data_type} are different.")
if isinstance(data, torch.Tensor):
data = data.numpy()
out = cls._xcrypt(context_, data, one_time_key, is_addition, False)
if not serialized:
out = OneTimePadCiphertext(out, context_)
else:
out = OneTimePadContext.serialize(out)
return out
@classmethod
def decrypt(cls,
context_: OneTimePadContext,
ciphertext: Union[OneTimePadCiphertext, bytes],
one_time_key: OneTimeKey,
is_addition: Union[List[bool], bool] = True) -> Union[np.ndarray, torch.Tensor]:
"""Decrypt the ciphertext to a plaintext
Args:
context_ (OneTimePadContext): see OneTimePadContext.
ciphertext (Union[OneTimePadCiphertext, bytes]): result of cls.encrypt(...) method.
one_time_key (OneTimeKey): the same as it is in cls.encrypt(...).
is_addition (Union[List[bool], bool]): the same as it is in cls.encrypt(...).
Raises:
ValueError: if the shape of ciphertext.data is different from the shape of one_time_key.
Returns:
Union[np.ndarray, torch.Tensor]: numpy.ndarray or torch.Tensor of float32, depend on context_.data_type
"""
if isinstance(one_time_key.value, np.ndarray):
one_time_key = [one_time_key.value]
else:
one_time_key = one_time_key.value
if isinstance(ciphertext, bytes):
ciphertext = OneTimePadContext.deserialize(ciphertext)
if ciphertext.data.shape != one_time_key[0].shape:
raise ValueError(
f"Input ciphertext's shape {ciphertext.data.shape} and one_time_key's shape {one_time_key[0].shape} not match.")
out = cls._xcrypt(context_, ciphertext.data,
one_time_key, is_addition, True)
if context_.data_type == np.ndarray:
out = out.astype(np.float32)
else:
out = torch.from_numpy(out.astype(np.float32))
return out
| 7,887 | 40.298429 | 128 | py |
XFL | XFL-master/python/common/crypto/one_time_pad/component.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from copy import deepcopy
from typing import Union
import numpy as np
class OneTimePadContext(object):
def __init__(self,
modulus_exp: int = 64,
data_type: str = "torch.Tensor"):
"""Context includes modulus, plaintext data type, exponent for encoding and etc..
Args:
modulus_exp (int, optional): exponent(base 2) of modulus. Ciphertext will consists of integers
module 2^modulus_exp. Defaults to 64.
data_type (str, optional): plaintext type, supports "torch.Tensor" and "numpy.ndarray",
or "torch" and "numpy" for short. Defaults to "torch.Tensor".
Raises:
ValueError: if modulus_exp not in [64, 128]
ValueError: if data_type not in ["numpy.ndarray", "numpy", "torch.Tensor", "torch"]
"""
if modulus_exp not in [64, 128]:
raise ValueError(f"Supported modulus_exps are 64 and 128, got {modulus_exp}.")
if data_type not in ["numpy.ndarray", "numpy", "torch.Tensor", "torch"]:
raise ValueError(f"Supported data_types are 'numpy.ndarray', 'numpy', 'torch.Tensor', 'torch', got {data_type}.")
if modulus_exp == 128:
self.__exp = 64
else:
self.__exp = 32
self.__modulus_exp = modulus_exp
self.__scalar = 1 << self.__exp
self.__modulus = 1 << modulus_exp
if "numpy" in data_type:
self.__data_type = np.ndarray
elif "torch" in data_type:
import torch
self.__data_type = torch.Tensor
self.__security_strength = modulus_exp
self.__encode_method = "fixed-point arithmetic"
@property
def exp(self):
return self.__exp
@property
def modulus_exp(self):
return self.__modulus_exp
@property
def scalar(self):
return self.__scalar
@property
def modulus(self):
return self.__modulus
@property
def data_type(self):
return self.__data_type
@property
def security_strength(self):
return self.__security_strength
@property
def encode_method(self):
return self.__encode_method
def __str__(self) -> str:
out = "scalar: 1 << {}, modulus: 1 << {}, data_type: {}, security_strength: {}, encode_method: {}"
out = out.format(self.exp, self.modulus_exp, self.data_type, self.security_strength, self.encode_method)
return out
def __eq__(self, other: object) -> bool:
return self.__modulus_exp == other.modulus_exp
@staticmethod
def serialize(data) -> bytes:
return pickle.dumps(data)
@staticmethod
def deserialize(data) -> any:
return pickle.loads(data)
class OneTimeKey(object):
def __init__(self, key: Union[list[np.ndarray], np.ndarray], modulus_exp: int = 64):
dtype = np.uint64 if modulus_exp == 64 else object
modulus = (1 << modulus_exp)
if isinstance(key, list):
self.value = [np.array(np.mod(v, modulus)).astype(dtype) for v in key]
else:
self.value = np.array(np.mod(key, modulus)).astype(dtype)
def __len__(self):
return len(self.value)
class OneTimePadCiphertext(object):
def __init__(self,
data: Union[list, np.ndarray, bytes],
context_: OneTimePadContext):
"""[summary]
Args:
data (Union[list, np.ndarray, bytes]): list or np.ndarray consists of integers, or picked object of them.
context_ (OneTimePadContext): see OneTimePadContext.
Raises:
TypeError: if the type of data is not bytes, list or np.ndarray.
TypeError: if the type of context_ is not OneTimePadContext or bytes.
"""
if isinstance(context_, OneTimePadContext):
self.__context = context_
elif isinstance(context_, bytes):
self.__context = pickle.loads(context_)
else:
raise TypeError(f"Got context type {type(context_)}, supported types are 'OneTimePadContext', 'bytes'")
dtype = np.uint64 if self.__context.modulus_exp == 64 else object
if isinstance(data, bytes):
self.__data = np.array(OneTimePadContext.deserialize(data), dtype=dtype)
elif isinstance(data, list):
self.__data = np.array(data, dtype=dtype)
elif isinstance(data, (np.ndarray, np.float64, np.uint64)): # , float, int)):
self.__data = data.astype(dtype)
else:
raise TypeError(f"Got data type {type(data)}, supported types are 'list', 'np.ndarray', 'bytes'")
def __str__(self):
out = ', '.join([f"data: {self.__data}", "context: " + str(self.__context)])
return out
def __add__(self, other: object):
if self.__context != other.__context:
raise ValueError(f"Adding ciphertext with different context, {self.__context} vs {other.__context}")
if self.__context.modulus_exp == 64:
out = self.__data + other.__data
else:
out = np.array(np.mod(self.__data + other.__data, self.__context.modulus), dtype=object)
out = OneTimePadCiphertext(out, self.__context)
return out
def __sub__(self, other: object):
if self.__context != other.__context:
raise ValueError(f"Subtracting ciphertext with different context, {self.__context} vs {other.__context}")
if self.__context.modulus_exp == 64:
out = self.__data - other.__data
else:
out = np.array(np.mod(self.__data - other.__data, self.__context.modulus), dtype=object)
out = OneTimePadCiphertext(out, self.__context)
return out
@property
def data(self):
return self.__data
@property
def context_(self):
return self.__context
def serialize(self) -> bytes:
"""Pickle __data for transmission
"""
return OneTimePadContext.serialize(self.__data)
def decode(self):
"""Decode to plaintext when all the keys in the ciphertext are cancelled
"""
if self.__data.shape == ():
zero_shape = True
data = np.array([self.__data], dtype=object)
else:
zero_shape = False
data = self.__data.astype(object)
idx = np.where(data > self.__context.modulus // 2)
out = deepcopy(data)
if len(idx[0]) != 0:
out[idx] -= self.__context.modulus
out /= self.__context.scalar
if self.__context.data_type == np.ndarray:
out = out.astype(np.float32)
else:
import torch
out = torch.from_numpy(out.astype(np.float32))
if zero_shape:
return out[0]
else:
return out
| 7,684 | 33.931818 | 125 | py |
XFL | XFL-master/python/common/crypto/one_time_pad/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/common/crypto/ckks/utils.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# important
supported_poly_modulus_degree = [1024, 2048, 4096, 8192, 16384, 32768]
max_coeff_modulus_bitlength = {
1024: 27,
2048: 54,
4096: 109,
8192: 218,
16384: 438,
32768: 881
}
security_table = [
['security_level', 'poly_moduls_degree', 'max_coeff_modulus_bitlength', 'suggested_coeff_mod_bit_sizes'],
[128, 1024, 27, None],
[128, 2048, 54, None],
[128, 4096, 109, [40, 20, 40]],
[128, 8192, 218, [60, 40, 40, 60]],
[128, 16384, 438, None],
[128, 32768, 881, None],
[192, 1024, 19, None],
[192, 2048, 37, None],
[192, 4096, 75, None],
[192, 8192, 152, None],
[192, 16384, 305, None],
[192, 32768, 611, None],
[256, 1024, 14, None],
[256, 2048, 29, None],
[256, 4096, 58, None],
[256, 8192, 118, None],
[256, 16384, 237, None],
[256, 32768, 476, None],
] | 1,467 | 28.959184 | 109 | py |
XFL | XFL-master/python/common/crypto/ckks/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/common/crypto/csprng/drbg.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
from .drbg_base import DRBGBase
from .hmac_drbg import HMAC_DRBG
#######################################################
# NIST SP 800-90A
# NIST SP 800-57 PART 1 Table 2, Table 3, Table 4
#######################################################
def get_drbg_inst(name: str,
entropy: Union[bytes, bytearray],
method: str = '',
nonce: Union[bytes, bytearray] = b'',
additional_data: Union[bytes, bytearray] = b'') -> DRBGBase:
""""
Get DRBG instance to generate random numbers by a given entropy.
Reference material is NIST SP 800-90A[1].
Args:
name (str): support "hmac_drbg", ("hash_drbg" and "ctr_drbg" in future).
entropy (Union[bytes, bytearray]): see [1]. For example, entropy can be a truncated secret key generated by
diffie-hellman key exchange.
method (str, optional): depend on name. Defaults to ''. see Specificatioins.
nonce (Union[bytes, bytearray], optional): see [1]. Defaults to b''.
additional_data (Union[bytes, bytearray], optional): see [1]. Defaults to b''.
Returns:
DRBGBase: a DRBG instance
Specifications:
for name = 'hmac_drbg', method can be one of the ['sha1', 'sha224', 'sha256', 'sha384', 'sha512', ('sm3' in future)],
if not specified, default method is 'sha512'.
Length(number of bytes) of the entropy should satisfy:
method min_entropy_length max_entropy_length
sha1 10 1 << 32
sha224 14 1 << 32
sha256 16 1 << 32
sha384 24 1 << 32
sha512 32 1 << 32
sm3 16 1 << 32
Also, length of additional_data should less equal than 1 << 32.
"""
opt = {
"hmac_drbg": {
'cls': HMAC_DRBG,
'method': 'sha512' if len(method) == 0 else method
}
}
try:
v = opt[name]
return v['cls'](v['method'], entropy, nonce, additional_data)
except KeyError:
raise KeyError(f"{name} not supported in drbg algorithm, use one of {list(opt.keys())} instead.")
except Exception as e:
raise e
| 2,975 | 37.153846 | 125 | py |
XFL | XFL-master/python/common/crypto/csprng/hmac_drbg.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import hmac
import warnings
from typing import Generator, List, Union
from .drbg_base import DRBGBase
class HMAC_DRBG(DRBGBase):
"""
Implement HMAC_DRBG algorithm, defined by NIST SP 800-90A[1], Section 10.1.2.
"""
'''NIST SP 800-90A Table 2'''
__max_number_of_bytes_per_request = 1 << 16
__maximum_number_of_requests_between_reseeds = 1 << 48
# __max_bitlength = 1 << 35 # entropy, nonce, additonal_data should not exceed __max_length. Ignored.
def __init__(self, method: str, entropy: Union[bytes, bytearray], nonce: Union[bytes, bytearray] = b'', additional_data: Union[bytes, bytearray] = b''):
"""
Args:
method (str): hash function, supported methods are SHA1, SHA224, SHA256, SHA284, SHA512
entropy (Union[bytes, bytearray]): see [1].
nonce (Union[bytes, bytearray], optional): see [1]. Defaults to b''.
additional_data (Union[bytes, bytearray], optional): personalization string, see [1]. Defaults to b''.
"""
super().__init__(method, entropy, nonce=nonce, additional_data=additional_data)
method = method.lower()
if method in ['sha1', 'sha224', 'sha256', 'sha384', 'sha512']:
self.__hashfunc = getattr(hashlib, method)
elif method == 'sm3':
raise ValueError(f"Hash method {method} not supported in HMAC_DRBG, supported methods are SHA1, SHA224, SHA256, SHA284, SHA512")
else:
raise ValueError(f"Hash method {method} not supported in HMAC_DRBG, supported methods are SHA1, SHA224, SHA256, SHA284, SHA512")
self.outlen_byte = self.__hashfunc().digest_size
self.outlen_bit = 8 * self.outlen_byte
"""NIST SP 800-57 PART1 Table 3"""
self.security_strength = self.outlen_bit // 2
if len(entropy) < self.security_strength // 8:
raise ValueError(f"Entropy's length is too short, should greater than {self.security_strength // 8} bytes.")
self.__K = b'\x00' * self.outlen_byte
self.__V = b'\x01' * self.outlen_byte
self.__update(entropy + nonce + additional_data)
self.__reseed_counter = 0
self.acquire_reseed = False
self.__buffer = bytearray()
def __hmac(self, data: Union[bytes, bytearray] = b'') -> bytes:
return hmac.new(self.__K, self.__V + data, self.__hashfunc).digest()
def __update(self, additional_data: Union[bytes, bytearray] = b''):
self.__K = self.__hmac(b'\x00' + additional_data)
self.__V = self.__hmac()
if len(additional_data) != 0:
self.__K = self.__hmac(b'\x01' + additional_data)
self.__V = self.__hmac()
def __str__(self) -> str:
s = "HMAC_DRBG: hash function {}, security strength {}"
return s.format(self.method, self.security_strength)
def reseed(self, entropy: Union[bytes, bytearray], additional_data: Union[bytes, bytearray] = b''):
self.__update(entropy + additional_data)
self.__reseed_counter = 0
self.acquire_reseed = False
def __gen(self, num_byte: int, additional_data: bytes = b''):
"""num_byte should <= __max_number_of_bytes_per_request
"""
buffer = bytearray()
if len(additional_data) != 0:
self.__update(additional_data)
while len(buffer) < num_byte:
self.__V = self.__hmac()
buffer += self.__V
self.__update(additional_data)
self.__buffer += buffer
def generator(self, num_bytes: Union[List[int], int], additional_data: bytes = b'') -> Generator:
"""generator version, need to use next(...) to get result, or use a 'for' loop"""
"""
NOTE: The returned bytes of
calling generator several times and
calling generator one time by packing number of bytes to a list
are mainly DIFFERENT.
for example,
the bytes generate by generator([n1, n2, ...]) and {generator(n1), generator(n2), ...} are different.
The returned bytes of
calling generator one time with a list of number of bytes and
calling generator one time with the sum of the same list
will be the SAME.
for example,
the bytes generate by generator([n1, n2, ...]) and generator(sum([n1, n2, ...])) are the same.
"""
self.__buffer.clear()
if isinstance(num_bytes, int):
num_bytes = [num_bytes]
total_bytes = sum(num_bytes)
quotient, remainder = divmod(total_bytes, self.__max_number_of_bytes_per_request)
index = 0
n_bytes = [self.__max_number_of_bytes_per_request] * quotient + [remainder] * (remainder > 0)
for n in n_bytes:
self.__gen(n, additional_data)
self.__reseed_counter += 1
while index < len(num_bytes) and len(self.__buffer) >= num_bytes[index]:
out = self.__buffer[:num_bytes[index]]
del self.__buffer[:num_bytes[index]]
index += 1
if self.__reseed_counter >= self.__maximum_number_of_requests_between_reseeds:
# almost never happens
self.acquire_reseed = True
warnings.warn("Max number of requests reached, HMAC_DRBG needs reseeding.")
yield out
self.__buffer.clear()
def gen(self, num_bytes: Union[List[int], int], additional_data: bytes = b'') -> bytearray:
"""normal version, return result immediately"""
self.__buffer.clear()
is_integer = False
if isinstance(num_bytes, int):
is_integer = True
num_bytes = [num_bytes]
total_bytes = sum(num_bytes)
quotient, remainder = divmod(total_bytes, self.__max_number_of_bytes_per_request)
index = 0
out = []
n_bytes = [self.__max_number_of_bytes_per_request] * quotient + [remainder] * (remainder > 0)
for n in n_bytes:
self.__gen(n, additional_data)
self.__reseed_counter += 1
while index < len(num_bytes) and len(self.__buffer) >= num_bytes[index]:
out.append(self.__buffer[:num_bytes[index]])
del self.__buffer[:num_bytes[index]]
index += 1
if self.__reseed_counter >= self.__maximum_number_of_requests_between_reseeds:
# almost never happens
self.acquire_reseed = True
warnings.warn("Max number of requests reached, HMAC_DRBG needs reseeding.")
self.__buffer.clear()
if is_integer:
out = out[0]
return out
| 7,678 | 40.961749 | 156 | py |
XFL | XFL-master/python/common/crypto/csprng/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/common/crypto/csprng/drbg_base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from typing import Any, Generator, List, Union
class DRBGBase(object):
__metaclass__ = abc.ABCMeta
def __init__(self,
method: str,
entropy: Union[bytes, bytearray],
nonce: Union[bytes, bytearray] = b'',
additional_data: Union[bytes, bytearray] = b''):
for i in [entropy, nonce, additional_data]:
self._check_input_type(i)
self.method = method
self.entropy = entropy
self.nonce = nonce
self.additional_data = additional_data
def _check_input_type(self, input: Any):
if not isinstance(input, (bytes, bytearray)):
raise TypeError(f"Expect type bytes or bytearray for DRBG input, got {type(input)}")
@abc.abstractclassmethod
def reseed(self, entropy: Union[bytes, bytearray], additional_data: Union[bytes, bytearray]):
pass
@abc.abstractclassmethod
def gen(self, num_bytes: Union[List[int], int], additional_data: Union[bytes, bytearray]) -> bytearray:
"""normal mode"""
pass
@abc.abstractclassmethod
def generator(self, num_bytes: Union[List[int], int], additional_data: Union[bytes, bytearray]) -> Generator:
"""generator mode"""
pass
| 1,919 | 34.555556 | 113 | py |
XFL | XFL-master/python/common/dataset/h_kmeans.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.datasets import make_blobs
import pandas as pd
import argparse
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--splits", type=int, default=2,
help="number of parties"
)
parser.add_argument(
"--ndims", type=int, default=2,
help="number of data dims"
)
parser.add_argument(
"--nsamples", type=int, default=150,
help="number of samples"
)
args = parser.parse_args()
dirpath = os.path.join(os.environ['PROJECT_HOME'], 'dataset')
data_folder = os.path.join(
dirpath, "horizontal_kmeans", f"{args.splits}party")
print(f"data folder: {data_folder}")
if not os.path.exists(data_folder):
os.makedirs(data_folder)
total_samples = args.nsamples * args.splits + 50
X, y = make_blobs(
n_samples=total_samples,
n_features=args.ndims,
centers=3,
random_state=42,
cluster_std=1.0
)
print("Generating dataset")
for party in range(args.splits):
start_ind = party * args.nsamples
end_ind = (party + 1) * args.nsamples
data_X = X[start_ind:end_ind, :]
data_y = y[start_ind:end_ind]
data_X_df = pd.DataFrame(
data_X, columns=["X_" + str(ind) for ind in range(args.ndims)])
data_y_df = pd.DataFrame(data_y, columns=["label"])
data_df = pd.concat([data_y_df, data_X_df], axis=1)
data_path = os.path.join(data_folder, f"blob_{party+1}.csv")
data_df.to_csv(data_path, index=False)
print(f"Writing to {data_path}")
data_X = X[-50:]
data_y = y[-50:]
data_X_df = pd.DataFrame(
data_X, columns=["X_" + str(ind) for ind in range(args.ndims)])
data_y_df = pd.DataFrame(data_y, columns=["label"])
data_df = pd.concat([data_y_df, data_X_df], axis=1)
data_path = os.path.join(data_folder, f"blob_assist_trainer.csv")
data_df.to_csv(data_path, index=False)
| 2,592 | 32.675325 | 75 | py |
XFL | XFL-master/python/common/dataset/azpro_data.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from typing import Any
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import numpy as np
import pandas as pd
import torch
from common.utils.data_utils import download_url, pd_train_test_split
class Azpro(torch.utils.data.Dataset):
url = "https://r-data.pmagunia.com/system/files/datasets/dataset-15359.csv"
md5 = None
dirpath = os.path.join(os.environ['PROJECT_HOME'], 'dataset')
datapath = os.path.join(dirpath, "azpro")
datadir = "azpro_data"
def __init__(
self,
redownload: bool = False,
) -> None:
super().__init__()
if not os.path.exists(self.dirpath):
os.mkdir(self.dirpath)
self._download(redownload)
# raw_df = pd.read_csv("/opt/dataset/data.csv", index_col=0)
raw_df = pd.read_csv(self.datapath, index_col=None)
raw_df = raw_df.drop(columns=["hospital"])
self.feature = raw_df.iloc[:, 1:]
self.feature_cols = self.feature.columns
self.label = pd.DataFrame(raw_df.iloc[:, 0])
self.label.columns = ["y"]
self.id = np.arange(len(self.label))
self.data = self.label.join(self.feature)
self.data = self.data.reset_index()
self.data.columns = ["id"] + list(self.data.columns[1:])
if reallocate_dict["norm"]:
feature = self.data.iloc[:, 1:]
scaler = MinMaxScaler()
data_norm = pd.DataFrame(scaler.fit_transform(feature), columns=feature.columns)
self.data = self.data.iloc[:, :1].join(data_norm)
def __getitem__(self, index: int) -> Any:
return self.feature[index], self.label[index]
def _download(self, redownload):
if redownload:
shutil.rmtree(os.path.join(self.dirpath, self.datadir))
download_url(
self.url, self.datapath, self.md5)
def reallocate(self, reallocate_dict):
mode = reallocate_dict['mode']
self.datadir = f"{self.datadir}_{mode}"
splits = reallocate_dict['splits']
reallocate_folder = f'{splits}party'
test_ratio = reallocate_dict['test_ratio']
random_state = reallocate_dict["random_seed"]
parties = reallocate_dict["parties"]
np.random.seed(random_state)
final_dir_path = os.path.join(
self.dirpath, self.datadir, reallocate_folder)
if not os.path.exists(final_dir_path):
os.makedirs(final_dir_path)
if mode == "vertical":
cols = self.feature_cols
split_cols = np.array_split(cols, splits)
for i, span in enumerate(split_cols):
if "labeled" in parties[i]:
train_data, test_data = pd_train_test_split(
self.data[["id", "y"] + list(span)], test_ratio=test_ratio, random_state=random_state)
else:
train_data, test_data = pd_train_test_split(
self.data[["id"] + list(span)], test_ratio=test_ratio, random_state=random_state)
train_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_train.csv')
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_test.csv')
train_data.to_csv(train_csv_path, index=False)
test_data.to_csv(test_csv_path, index=False)
elif mode == "horizontal":
train_data, test_data = pd_train_test_split(
self.data, test_ratio=test_ratio, random_state=random_state)
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_test.csv')
test_data.to_csv(test_csv_path, index=False)
rand_idx = np.random.permutation(range(len(train_data)))
indices = np.array_split(rand_idx, splits)
for i, party in enumerate(parties):
csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{party}.csv')
data = train_data.loc[indices[i]]
data.to_csv(csv_path, index=False)
os.remove(self.datapath)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str, default="vertical",
help="vertical or horizontal task")
parser.add_argument("--splits", type=int, default=2,
help="number of parties")
parser.add_argument("--test_ratio", type=float,
default=0.3, help="ratio of test data")
parser.add_argument("--random_seed", type=int,
default=0, help="random seed")
parser.add_argument("--party", nargs="+", help="involved parties")
parser.add_argument("--norm", type=bool,
default=False, help="normalization")
args = parser.parse_args()
reallocate_dict = {
"mode": args.mode,
"splits": args.splits,
"test_ratio": args.test_ratio,
"random_seed": args.random_seed,
"parties": args.party,
"norm": args.norm
}
boston = Azpro()
boston.reallocate(reallocate_dict)
| 5,809 | 38.52381 | 110 | py |
XFL | XFL-master/python/common/dataset/hiv.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from typing import Any
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import numpy as np
import pandas as pd
import torch
from common.utils.data_utils import download_url, pd_train_test_split
import zipfile
class HIV(torch.utils.data.Dataset):
url = "https://data.dgl.ai/dataset/hiv.zip"
md5 = None
dirpath = os.path.join(os.environ['PROJECT_HOME'], 'dataset')
datapath = os.path.join(dirpath, "hiv")
datadir = "hiv"
def __init__(
self,
redownload: bool = False,
) -> None:
super().__init__()
if not os.path.exists(self.dirpath):
os.mkdir(self.dirpath)
self._download(redownload)
raw_df = pd.read_csv(self.dirpath + "/HIV.csv")
self.feature = raw_df.iloc[:, 0].values.reshape(-1, 1)
self.label = raw_df.iloc[:, 2].values
self.feature_cols = [f'x{i}' for i in range(self.feature.shape[1])]
self.id = np.arange(len(self.label))
self.reconstruct_df = np.hstack(
[self.id.reshape(-1, 1), self.label.reshape(-1, 1), self.feature])
self.data = pd.DataFrame(data=self.reconstruct_df,
columns=["id", "y"] + list(self.feature_cols)
)
def __getitem__(self, index: int) -> Any:
return self.feature[index], self.label[index]
def __len__(self) -> int:
return len(self.data.values)
def _download(self, redownload):
if redownload:
shutil.rmtree(os.path.join(self.dirpath, self.datadir))
download_url(
self.url, self.datapath, self.md5)
self._extract_archive(self.datapath, self.dirpath)
def _extract_archive(self, datapath, dir_path):
with zipfile.ZipFile(datapath, "r") as archive:
archive.extractall(path=dir_path)
def reallocate(self, reallocate_dict):
mode = reallocate_dict['mode']
self.datadir = f"{self.datadir}_{mode}"
splits = reallocate_dict['splits']
reallocate_folder = f'{splits}party'
test_ratio = reallocate_dict['test_ratio']
random_state = reallocate_dict["random_seed"]
parties = reallocate_dict["parties"]
np.random.seed(random_state)
final_dir_path = os.path.join(
self.dirpath, self.datadir, reallocate_folder)
if not os.path.exists(final_dir_path):
os.makedirs(final_dir_path)
if mode == "vertical":
cols = self.feature_cols
split_cols = np.array_split(cols, splits)
for i, span in enumerate(split_cols):
if "labeled" in parties[i]:
train_data, test_data = pd_train_test_split(
self.data[["id", "y"] + list(span)], test_ratio=test_ratio, random_state=random_state)
else:
train_data, test_data = pd_train_test_split(
self.data[["id"] + list(span)], test_ratio=test_ratio, random_state=random_state)
train_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_train.csv')
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_test.csv')
train_data.to_csv(train_csv_path, index=False)
test_data.to_csv(test_csv_path, index=False)
elif mode == "horizontal":
train_data, test_data = pd_train_test_split(
self.data, test_ratio=test_ratio, random_state=random_state)
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_test.csv')
test_data.to_csv(test_csv_path, index=False)
rand_idx = np.random.permutation(range(len(train_data)))
indices = np.array_split(rand_idx, splits)
for i, party in enumerate(parties):
csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{party}.csv')
data = train_data.loc[indices[i]]
data.to_csv(csv_path, index=False)
os.remove(self.datapath)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str, default="vertical",
help="vertical or horizontal task")
parser.add_argument("--splits", type=int, default=2,
help="number of parties")
parser.add_argument("--test_ratio", type=float,
default=0.3, help="ratio of test data")
parser.add_argument("--random_seed", type=int,
default=0, help="random seed")
parser.add_argument("--party", nargs="+", help="involved parties")
args = parser.parse_args()
reallocate_dict = {
"mode": args.mode,
"splits": args.splits,
"test_ratio": args.test_ratio,
"random_seed": args.random_seed,
"parties": args.party,
}
hiv = HIV()
hiv.reallocate(reallocate_dict)
| 5,661 | 37.256757 | 110 | py |
XFL | XFL-master/python/common/dataset/sst2.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from typing import Any
import numpy as np
import pandas as pd
import torch
from common.utils.data_utils import download_and_extract_data, pd_train_test_split
class SST2(torch.utils.data.Dataset):
url = "https://dl.fbaipublicfiles.com/glue/data/SST-2.zip"
md5 = "9f81648d4199384278b86e315dac217c"
dirpath = os.path.join(os.environ['PROJECT_HOME'], 'dataset')
datapath = os.path.join(dirpath, "SST-2.zip")
data_folder = "SST-2"
raw_data_folder = os.path.join(dirpath, data_folder, "original")
def __init__(
self,
redownload: bool = False,
mode: str = "train"
) -> None:
super().__init__()
if not os.path.exists(self.dirpath):
os.mkdir(self.dirpath)
self._download_and_extract(redownload)
self.mode = mode
self.train = pd.read_csv(os.path.join(self.dirpath, self.data_folder, "train.tsv"),sep='\t')
self.val = pd.read_csv(os.path.join(self.dirpath, self.data_folder, "dev.tsv"), sep='\t')
self.test = pd.read_csv(os.path.join(self.dirpath, self.data_folder, "test.tsv"), sep='\t')
self.data = getattr(self, mode)
def __getitem__(self, index: int) -> Any:
return self.data["sentence"].values[index], self.data["label"].values[index]
def __len__(self) -> int:
return len(self.data["sentence"].values)
def get_data(self):
return self.data["sentence"].values, self.data["label"].values
def _download_and_extract(self, redownload):
if redownload:
shutil.rmtree(os.path.join(self.dirpath, self.data_folder))
download_and_extract_data(
self.url, self.md5, self.datapath, data_folder=self.data_folder)
def reallocate(self, reallocate_dict):
mode = reallocate_dict['mode']
splits = reallocate_dict['splits']
reallocate_folder = f'{splits}party'
random_state = reallocate_dict["random_seed"]
parties = reallocate_dict["parties"]
np.random.seed(random_state)
final_dir_path = os.path.join(
self.dirpath, self.data_folder, reallocate_folder)
if not os.path.exists(final_dir_path):
os.makedirs(final_dir_path)
if mode == "vertical":
raise NotImplementedError("SST-2 task currently doesn't support vertical federated learning")
elif mode == "horizontal":
val_path = os.path.join(
final_dir_path, f'{self.data_folder}_val.tsv')
self.val.to_csv(val_path, index=False, sep="\t")
rand_idx = np.random.permutation(range(len(self.train)))
indices = np.array_split(rand_idx, splits)
for i, party in enumerate(parties):
tsv_path = os.path.join(
final_dir_path, f'{self.data_folder}_{party}.tsv')
data = self.train.loc[indices[i]]
data.to_csv(tsv_path, index=False, sep='\t')
shutil.rmtree(self.raw_data_folder)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str, default="horizontal",
help="vertical or horizontal task")
parser.add_argument("--splits", type=int, default=2,
help="number of parties")
parser.add_argument("--random_seed", type=int,
default=0, help="random seed")
parser.add_argument("--party", nargs="+", help="involved parties")
args = parser.parse_args()
reallocate_dict = {
"mode": args.mode,
"splits": args.splits,
"random_seed": args.random_seed,
"parties": args.party
}
sst2= SST2()
sst2.reallocate(reallocate_dict)
| 4,360 | 36.594828 | 105 | py |
XFL | XFL-master/python/common/dataset/breast_cancer_wisconsin.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from typing import Any
import numpy as np
import pandas as pd
import torch
from common.utils.data_utils import download_url, pd_train_test_split
class WDBC(torch.utils.data.Dataset):
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data"
md5 = None
dirpath = os.path.join(os.environ['PROJECT_HOME'], 'dataset')
datapath = os.path.join(dirpath, "wdbc.data")
datadir = "breast_cancer_wisconsin"
def __init__(
self,
redownload: bool = False,
) -> None:
super().__init__()
if not os.path.exists(self.dirpath):
os.mkdir(self.dirpath)
self._download(redownload)
self.data = pd.read_csv(self.datapath, names=[
"id", "y"] + [f"x{i:0>2d}" for i in range(30)])
self.data["y"] = self.data["y"].map({"M": 1, "B": 0})
self.data[[f"x{i:0>2d}" for i in range(30)]] = self.data[[
f"x{i:0>2d}" for i in range(30)]].apply(lambda x: (x-x.mean())/x.std())
def __getitem__(self, index: int) -> Any:
return self.data[[f"x{i:0>2d}" for i in range(30)]].values[index], self.data["y"].values[index]
def __len__(self) -> int:
return len(self.data.values)
def _download(self, redownload):
if redownload:
shutil.rmtree(os.path.join(self.dirpath, self.datadir))
download_url(
self.url, self.datapath, self.md5)
def reallocate(self, reallocate_dict):
mode = reallocate_dict['mode']
self.datadir = f"{self.datadir}_{mode}"
splits = reallocate_dict['splits']
reallocate_folder = f'{splits}party'
test_ratio = reallocate_dict['test_ratio']
drop_ratio = reallocate_dict['drop_ratio']
random_state = reallocate_dict["random_seed"]
parties = reallocate_dict["parties"]
np.random.seed(random_state)
final_dir_path = os.path.join(
self.dirpath, self.datadir, reallocate_folder)
if not os.path.exists(final_dir_path):
os.makedirs(final_dir_path)
if mode == "vertical":
cols = [f"x{i:0>2d}" for i in range(30)]
split_cols = np.array_split(cols, splits)
for i, span in enumerate(split_cols):
if "labeled" in parties[i]:
train_data, test_data = pd_train_test_split(
self.data[["id", "y"]+list(span)], test_ratio=test_ratio, random_state=random_state)
else:
train_data, test_data = pd_train_test_split(
self.data[["id"]+list(span)], test_ratio=test_ratio, random_state=random_state)
train_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_train.csv')
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_test.csv')
train_data.to_csv(train_csv_path, index=False)
test_data.to_csv(test_csv_path, index=False)
elif mode == "horizontal":
train_data, test_data = pd_train_test_split(
self.data, test_ratio=test_ratio, random_state=random_state)
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_test.csv')
test_data.to_csv(test_csv_path, index=False)
# rand_idx = np.random.permutation(range(len(train_data)))
# indices = np.array_split(rand_idx, splits)
indices = np.array_split(range(len(train_data)), splits)
for i, party in enumerate(parties):
csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{party}.csv')
data = train_data.loc[indices[i]]
data.to_csv(csv_path, index=False)
elif mode == "transfer":
all_train_data, all_test_data = pd_train_test_split(
self.data, test_ratio=test_ratio, random_state=random_state)
cols = [f"x{i:0>2d}" for i in range(30)]
split_cols = np.array_split(cols, splits)
index_dict = {}
for i, span in enumerate(split_cols):
if parties[i] == "labeled":
train_data, _ = pd_train_test_split(
all_train_data[["id", "y"]+list(span)], test_ratio=drop_ratio,
shuffle=True, random_state=random_state+100
)
test_data = all_test_data[["id", "y"]+list(span)]
else:
train_data, _ = pd_train_test_split(
all_train_data[["id"]+list(span)], test_ratio=drop_ratio,
shuffle=True, random_state=random_state+200
)
test_data = all_test_data[["id"]+list(span)]
index_dict[i] = train_data["id"].to_list()
train_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_train.csv')
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_test.csv')
train_data.to_csv(train_csv_path, index=False)
test_data.to_csv(test_csv_path, index=False)
overlap_index = index_dict[0]
for i in range(1, len(index_dict)):
overlap_index = self.intersection_list(overlap_index, index_dict[i])
np.save(os.path.join(final_dir_path, "overlap_index.npy"), np.array(overlap_index))
os.remove(self.datapath)
@staticmethod
def intersection_list(list1, list2):
list3 = [value for value in list1 if value in list2]
return list3
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str, default="vertical",
help="vertical, horizontal or transfer task")
parser.add_argument("--splits", type=int, default=2,
help="number of parties")
parser.add_argument("--test_ratio", type=float,
default=0.3, help="ratio of test data")
parser.add_argument("--drop_ratio", type=float,
default=0.3, help="ratio of drop data")
parser.add_argument("--random_seed", type=int,
default=0, help="random seed")
parser.add_argument("--party", nargs="+", help="involved parties")
args = parser.parse_args()
reallocate_dict = {
"mode": args.mode,
"splits": args.splits,
"test_ratio": args.test_ratio,
"drop_ratio": args.drop_ratio,
"random_seed": args.random_seed,
"parties": args.party
}
wdbc = WDBC()
wdbc.reallocate(reallocate_dict)
| 7,461 | 40.921348 | 108 | py |
XFL | XFL-master/python/common/dataset/cifar.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import pickle
import shutil
from typing import Any, Callable, Optional, Tuple
import numpy as np
import torch
from common.utils.data_utils import check_integrity, download_and_extract_data
from PIL import Image
import torchvision.transforms as transforms
class CIFAR10(torch.utils.data.Dataset):
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
md5 = "c58f30108f718f92721af3b95e74349a"
dirpath = os.path.join(os.environ['PROJECT_HOME'], 'dataset')
datapath = os.path.join(dirpath, "cifar-10-python.tar.gz")
data_folder = "cifar-10-batches-py"
data_folder_renamed = "cifar10"
train_dict = {
"data_batch_1": "c99cafc152244af753f735de768cd75f",
"data_batch_2": "d4bba439e000b95fd0a9bffe97cbabec",
"data_batch_3": "54ebc095f3ab1f0389bbae665268c751",
"data_batch_4": "634d18415352ddfa80567beed471001a",
"data_batch_5": "482c414d41f54cd18b22e5b47cb7c3cb",
}
test_dict = {
"test_batch": "40351d587109b95175f43aff81a1287e",
}
metadata = {
"filename": "batches.meta",
"key": "label_names",
"md5": "5ff9c542aee3614f3951f8cda6e48888",
}
def __init__(
self,
train: bool = True,
redownload: bool = False,
transform: Optional[Callable] = None
) -> None:
super().__init__()
if not os.path.exists(self.dirpath):
os.mkdir(self.dirpath)
self.train = train
self._download_and_extract(redownload)
self._load_metadata()
self.data_dict = self.train_dict if self.train else self.test_dict
self.transform = transform
self.data = []
self.labels = []
for file_name, md5 in self.data_dict.items():
file_path = os.path.join(self.dirpath, self.data_folder, file_name)
if not check_integrity(file_path, md5):
self.intergrity = False
raise RuntimeError(
f'{file_path} has been corruptted or lost. Please redownload the data by setting redownload=True')
with open(file_path, "rb") as f:
entry = pickle.load(f, encoding="latin1")
self.data.append(entry["data"])
if "labels" in entry:
self.labels.extend(entry["labels"])
else:
self.labels.extend(entry["fine_labels"])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32).transpose((0, 2, 3, 1)) # HWC format
self.labels = np.array(self.labels)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
data, label = self.data[index], self.labels[index]
data = Image.fromarray(data)
if self.transform is not None:
data = self.transform(data)
return data, label
def __len__(self) -> int:
return len(self.data)
def _download_and_extract(self, redownload):
if redownload:
shutil.rmtree(os.path.join(self.dirpath, self.data_folder))
download_and_extract_data(
self.url, self.md5, self.datapath, data_folder=self.data_folder)
def _load_metadata(self) -> None:
metapath = os.path.join(
self.dirpath, self.data_folder, self.metadata["filename"])
if not check_integrity(metapath, self.metadata["md5"]):
raise RuntimeError(
"Dataset metadata has been found or corrupted. Please redownload the data by setting redownload=True")
with open(metapath, "rb") as f:
data = pickle.load(f, encoding="latin1")
self.classes = data[self.metadata["key"]]
self.class_to_idx = {c: i for i, c in enumerate(self.classes)}
def reallocate(self, reallocate_dict):
splits = reallocate_dict['splits']
reallocate_folder = f'{splits}party'
if not self.train:
splits = len(reallocate_dict["party"])
if reallocate_dict["sampling"] == "random":
np.random.seed(reallocate_dict["seed"])
if isinstance(reallocate_dict["splits"], int):
rand_idx = np.random.permutation(range(len(self.data)))
# split into equal arrays
indices = np.array_split(rand_idx, reallocate_dict["splits"])
elif isinstance(reallocate_dict["splits"], list):
assert sum(
reallocate_dict["splits"]) == 1, "the sum of fractions must be 1"
rand_idx = np.random.permutation(range(len(self.data)))
sections = np.floor(
reallocate_dict["splits"] * len(self.data))
sections = np.cumsum(sections)[:-1]
# split into arrays according to ratios
indices = np.split(rand_idx, sections)
final_dir_path = os.path.join(
self.dirpath, self.data_folder_renamed, reallocate_folder)
if not os.path.exists(final_dir_path):
os.makedirs(final_dir_path)
for i, party in enumerate(reallocate_dict["party"]):
npy_path = os.path.join(
final_dir_path, f"{self.data_folder_renamed}_{party}.npz")
data = self.data[indices[i]]
labels = self.labels[indices[i]]
np.savez(npy_path, data=data, labels=labels)
elif reallocate_dict["sampling"] == "biased":
np.random.seed(reallocate_dict["seed"])
indices_group = [[] for _ in range(reallocate_dict["splits"])]
for group_label, fractions in reallocate_dict["group_fractions"].items():
group_index = np.where(self.label == group_label)
group_index = np.random.permutation(group_index)
sections = np.floor(fractions * len(group_index))
sections = np.cumsum(sections)[:-1]
indices = np.split(rand_idx, sections)
for i, indice in enumerate(indices):
indices_group[i].extend(indice)
final_dir_path = os.path.join(
self.dirpath, self.data_folder_renamed, reallocate_folder)
for i, party in enumerate(reallocate_dict["party"]):
npy_path = os.path.join(final_dir_path, f"{party}.npz")
if not os.path.exists(final_dir_path):
os.makedirs(final_dir_path)
data = self.data[indices_group[i]]
labels = self.labels[indices_group[i]]
np.savez(npy_path, data=data, labels=labels)
elif reallocate_dict["sampling"] == "dirichlet":
np.random.seed(reallocate_dict["seed"])
party_size = 0
min_size = len(self.data) / reallocate_dict["splits"] * 0.95
while party_size < min_size:
indices_group = [[] for _ in range(reallocate_dict["splits"])]
for group_label in range(len(set(self.labels))):
group_index = np.where(self.labels == group_label)[0]
group_index = np.random.permutation(group_index)
fractions = np.random.dirichlet(np.repeat(reallocate_dict["beta"], reallocate_dict["splits"]))
sections = (np.cumsum(fractions) * len(group_index)).astype(int)[:-1]
indices = np.split(group_index, sections)
for i, indice in enumerate(indices):
indices_group[i].extend(indice)
party_size = min([len(ind) for ind in indices_group])
final_dir_path = os.path.join(
self.dirpath, self.data_folder_renamed, reallocate_folder)
if not os.path.exists(final_dir_path):
os.makedirs(final_dir_path)
for i, party in enumerate(reallocate_dict["party"]):
npy_path = os.path.join(
final_dir_path, f"{self.data_folder_renamed}_{party}.npz")
data = self.data[indices_group[i]]
labels = self.labels[indices_group[i]]
np.savez(npy_path, data=data, labels=labels)
class CIFAR100(CIFAR10):
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
md5 = "eb9058c3a382ffc7106e4002c42a8d85"
dirpath = os.path.join(os.environ['PROJECT_HOME'], 'dataset')
datapath = os.path.join(dirpath, "cifar-100-python.tar.gz")
data_folder = "cifar-100-python"
data_folder_renamed = "cifar-100"
train_dict = {
"train": "16019d7e3df5f24257cddd939b257f8d",
}
test_dict = {
"test", "f0ef6b0ae62326f3e7ffdfab6717acfc",
}
metadata = {
"filename": "meta",
"key": "fine_label_names",
"md5": "7973b15100ade9c7d40fb424638fde48",
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--splits", type=int, default=2,
help="number of parties")
parser.add_argument("--sampling", type=str, default="random",
help="mode to split the dataset, random, biased or dirichlet")
parser.add_argument("--beta", type=float, default=1, help="dirichlet parameter, smaller means more non-iid")
parser.add_argument("--party", nargs="+", help="involved parties")
parser.add_argument("--keep_raw_data",
action='store_true', help="keep raw data file")
args = parser.parse_args()
if args.sampling == "random":
train_reallocate_dict = {
"sampling": "random",
"splits": args.splits,
"seed": 0,
"party": args.party
}
test_reallocate_dict = {
"sampling": "random",
"splits": args.splits,
"seed": 0,
"party": ["test"]
}
elif args.sampling == "biased":
train_reallocate_dict = {
"sampling": "biased",
"splits": args.splits,
"seed": 0,
"group_fractions": {1: [0.8, 0.2], 2: [0.8, 0.2], 3: [0.8, 0.2], 4: [0.8, 0.2], 5: [0.8, 0.2], 6: [0.2, 0.8], 7: [0.2, 0.8], 8: [0.2, 0.8], 9: [0.2, 0.8], 0: [0.2, 0.8]},
"party": args.party
}
test_reallocate_dict = {
"sampling": "random",
"splits": args.splits,
"seed": 0,
"party": ["test"]
}
elif args.sampling == "dirichlet":
train_reallocate_dict = {
"sampling": "dirichlet",
"splits": args.splits,
"seed": 0,
"beta": args.beta,
"party": args.party
}
test_reallocate_dict = {
"sampling": "random",
"splits": args.splits,
"seed": 0,
"party": ["test"]
}
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
cifar10_train = CIFAR10(train=True)
cifar10_train.reallocate(train_reallocate_dict)
cifar10_test = CIFAR10(train=False)
cifar10_test.reallocate(test_reallocate_dict)
if not args.keep_raw_data:
shutil.rmtree(os.path.join(
cifar10_train.dirpath, cifar10_train.data_folder)) | 12,098 | 40.434932 | 182 | py |
XFL | XFL-master/python/common/dataset/boston_housing_price.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from typing import Any
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
import torch
from common.utils.data_utils import download_url, pd_train_test_split
class Boston(torch.utils.data.Dataset):
url = "http://lib.stat.cmu.edu/datasets/boston"
md5 = None
dirpath = os.path.join(os.environ['PROJECT_HOME'], 'dataset')
datapath = os.path.join(dirpath, "boston")
datadir = "boston_housing_price"
def __init__(
self,
redownload: bool = False,
) -> None:
super().__init__()
if not os.path.exists(self.dirpath):
os.mkdir(self.dirpath)
self._download(redownload)
raw_df = pd.read_csv(self.datapath, sep="\s+", skiprows=22, header=None)
self.feature = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
self.label = raw_df.values[1::2, 2]
self.feature_cols = [f'x{i}' for i in range(self.feature.shape[1])]
self.id = np.arange(len(self.label))
self.reconstruct_df = np.hstack([self.id.reshape(-1, 1), self.label.reshape(-1, 1), self.feature])
self.data = pd.DataFrame(data=self.reconstruct_df,
columns=["id", "y"] + list(self.feature_cols)
)
if reallocate_dict["norm"]:
feature = self.data.iloc[:, 2:]
scaler = StandardScaler()
data_norm = pd.DataFrame(scaler.fit_transform(feature), columns=feature.columns)
self.data = self.data.iloc[:, :2].join(data_norm)
def __getitem__(self, index: int) -> Any:
return self.feature[index], self.label[index]
def __len__(self) -> int:
return len(self.data.values)
def _download(self, redownload):
if redownload:
shutil.rmtree(os.path.join(self.dirpath, self.datadir))
download_url(
self.url, self.datapath, self.md5)
def reallocate(self, reallocate_dict):
mode = reallocate_dict['mode']
self.datadir = f"{self.datadir}_{mode}"
splits = reallocate_dict['splits']
reallocate_folder = f'{splits}party'
test_ratio = reallocate_dict['test_ratio']
random_state = reallocate_dict["random_seed"]
parties = reallocate_dict["parties"]
np.random.seed(random_state)
final_dir_path = os.path.join(
self.dirpath, self.datadir, reallocate_folder)
if not os.path.exists(final_dir_path):
os.makedirs(final_dir_path)
if mode == "vertical":
cols = self.feature_cols
split_cols = np.array_split(cols, splits)
for i, span in enumerate(split_cols):
if "labeled" in parties[i]:
train_data, test_data = pd_train_test_split(
self.data[["id", "y"] + list(span)], test_ratio=test_ratio, random_state=random_state)
else:
train_data, test_data = pd_train_test_split(
self.data[["id"] + list(span)], test_ratio=test_ratio, random_state=random_state)
train_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_train.csv')
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{parties[i]}_test.csv')
train_data.to_csv(train_csv_path, index=False)
test_data.to_csv(test_csv_path, index=False)
elif mode == "horizontal":
train_data, test_data = pd_train_test_split(
self.data, test_ratio=test_ratio, random_state=random_state)
test_csv_path = os.path.join(
final_dir_path, f'{self.datadir}_test.csv')
test_data.to_csv(test_csv_path, index=False)
rand_idx = np.random.permutation(range(len(train_data)))
indices = np.array_split(rand_idx, splits)
for i, party in enumerate(parties):
csv_path = os.path.join(
final_dir_path, f'{self.datadir}_{party}.csv')
data = train_data.loc[indices[i]]
data.to_csv(csv_path, index=False)
os.remove(self.datapath)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str, default="vertical",
help="vertical or horizontal task")
parser.add_argument("--splits", type=int, default=2,
help="number of parties")
parser.add_argument("--test_ratio", type=float,
default=0.3, help="ratio of test data")
parser.add_argument("--random_seed", type=int,
default=0, help="random seed")
parser.add_argument("--party", nargs="+", help="involved parties")
parser.add_argument("--norm", type=bool,
default=False, help="normalization")
args = parser.parse_args()
reallocate_dict = {
"mode": args.mode,
"splits": args.splits,
"test_ratio": args.test_ratio,
"random_seed": args.random_seed,
"parties": args.party,
"norm": args.norm
}
boston = Boston()
boston.reallocate(reallocate_dict)
| 5,887 | 39.054422 | 110 | py |
XFL | XFL-master/python/common/communication/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/common/communication/gRPC/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/common/communication/gRPC/python/checker_pb2_grpc.py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
| 159 | 31 | 75 | py |
XFL | XFL-master/python/common/communication/gRPC/python/status_pb2.py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: status.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0cstatus.proto\x12\x06status\"&\n\x06Status\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0e\n\x06status\x18\x02 \x01(\t\"\x1e\n\rStatusRequest\x12\r\n\x05jobId\x18\x01 \x01(\x05\"\x97\x02\n\x0eStatusResponse\x12\r\n\x05jobId\x18\x01 \x01(\x05\x12!\n\tjobStatus\x18\x02 \x01(\x0b\x32\x0e.status.Status\x12\'\n\x0fschedulerStatus\x18\x03 \x01(\x0b\x32\x0e.status.Status\x12@\n\rtrainerStatus\x18\x04 \x03(\x0b\x32).status.StatusResponse.TrainerStatusEntry\x12\x11\n\tstartTime\x18\x05 \x01(\x05\x12\x0f\n\x07\x65ndTime\x18\x06 \x01(\x05\x1a\x44\n\x12TrainerStatusEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1d\n\x05value\x18\x02 \x01(\x0b\x32\x0e.status.Status:\x02\x38\x01*m\n\nStatusEnum\x12\n\n\x06STATUS\x10\x00\x12\x08\n\x04IDLE\x10\x01\x12\x0c\n\x08TRAINING\x10\x02\x12\x0e\n\nSUCCESSFUL\x10\x03\x12\n\n\x06\x46\x41ILED\x10\x04\x12\x0f\n\x0bSTART_TRAIN\x10\x05\x12\x0e\n\nSTOP_TRAIN\x10\x06\x62\x06proto3')
_STATUSENUM = DESCRIPTOR.enum_types_by_name['StatusEnum']
StatusEnum = enum_type_wrapper.EnumTypeWrapper(_STATUSENUM)
STATUS = 0
IDLE = 1
TRAINING = 2
SUCCESSFUL = 3
FAILED = 4
START_TRAIN = 5
STOP_TRAIN = 6
_STATUS = DESCRIPTOR.message_types_by_name['Status']
_STATUSREQUEST = DESCRIPTOR.message_types_by_name['StatusRequest']
_STATUSRESPONSE = DESCRIPTOR.message_types_by_name['StatusResponse']
_STATUSRESPONSE_TRAINERSTATUSENTRY = _STATUSRESPONSE.nested_types_by_name['TrainerStatusEntry']
Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), {
'DESCRIPTOR' : _STATUS,
'__module__' : 'status_pb2'
# @@protoc_insertion_point(class_scope:status.Status)
})
_sym_db.RegisterMessage(Status)
StatusRequest = _reflection.GeneratedProtocolMessageType('StatusRequest', (_message.Message,), {
'DESCRIPTOR' : _STATUSREQUEST,
'__module__' : 'status_pb2'
# @@protoc_insertion_point(class_scope:status.StatusRequest)
})
_sym_db.RegisterMessage(StatusRequest)
StatusResponse = _reflection.GeneratedProtocolMessageType('StatusResponse', (_message.Message,), {
'TrainerStatusEntry' : _reflection.GeneratedProtocolMessageType('TrainerStatusEntry', (_message.Message,), {
'DESCRIPTOR' : _STATUSRESPONSE_TRAINERSTATUSENTRY,
'__module__' : 'status_pb2'
# @@protoc_insertion_point(class_scope:status.StatusResponse.TrainerStatusEntry)
})
,
'DESCRIPTOR' : _STATUSRESPONSE,
'__module__' : 'status_pb2'
# @@protoc_insertion_point(class_scope:status.StatusResponse)
})
_sym_db.RegisterMessage(StatusResponse)
_sym_db.RegisterMessage(StatusResponse.TrainerStatusEntry)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_STATUSRESPONSE_TRAINERSTATUSENTRY._options = None
_STATUSRESPONSE_TRAINERSTATUSENTRY._serialized_options = b'8\001'
_STATUSENUM._serialized_start=378
_STATUSENUM._serialized_end=487
_STATUS._serialized_start=24
_STATUS._serialized_end=62
_STATUSREQUEST._serialized_start=64
_STATUSREQUEST._serialized_end=94
_STATUSRESPONSE._serialized_start=97
_STATUSRESPONSE._serialized_end=376
_STATUSRESPONSE_TRAINERSTATUSENTRY._serialized_start=308
_STATUSRESPONSE_TRAINERSTATUSENTRY._serialized_end=376
# @@protoc_insertion_point(module_scope)
| 3,792 | 46.4125 | 973 | py |
XFL | XFL-master/python/common/communication/gRPC/python/control_pb2_grpc.py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
| 159 | 31 | 75 | py |
XFL | XFL-master/python/common/communication/gRPC/python/status_pb2_grpc.py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
| 159 | 31 | 75 | py |
XFL | XFL-master/python/common/communication/gRPC/python/scheduler_pb2_grpc.py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import checker_pb2 as checker__pb2
import commu_pb2 as commu__pb2
import control_pb2 as control__pb2
import scheduler_pb2 as scheduler__pb2
import status_pb2 as status__pb2
class SchedulerStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.getConfig = channel.unary_unary(
'/scheduler.Scheduler/getConfig',
request_serializer=scheduler__pb2.GetConfigRequest.SerializeToString,
response_deserializer=scheduler__pb2.GetConfigResponse.FromString,
)
self.post = channel.stream_unary(
'/scheduler.Scheduler/post',
request_serializer=commu__pb2.PostRequest.SerializeToString,
response_deserializer=commu__pb2.PostResponse.FromString,
)
self.control = channel.unary_unary(
'/scheduler.Scheduler/control',
request_serializer=control__pb2.ControlRequest.SerializeToString,
response_deserializer=control__pb2.ControlResponse.FromString,
)
self.status = channel.unary_unary(
'/scheduler.Scheduler/status',
request_serializer=status__pb2.StatusRequest.SerializeToString,
response_deserializer=status__pb2.StatusResponse.FromString,
)
self.getAlgorithmList = channel.unary_unary(
'/scheduler.Scheduler/getAlgorithmList',
request_serializer=scheduler__pb2.GetAlgorithmListRequest.SerializeToString,
response_deserializer=scheduler__pb2.GetAlgorithmListResponse.FromString,
)
self.recProgress = channel.unary_unary(
'/scheduler.Scheduler/recProgress',
request_serializer=scheduler__pb2.RecProgressRequest.SerializeToString,
response_deserializer=scheduler__pb2.RecProgressResponse.FromString,
)
self.getStage = channel.unary_unary(
'/scheduler.Scheduler/getStage',
request_serializer=scheduler__pb2.GetStageRequest.SerializeToString,
response_deserializer=scheduler__pb2.GetStageResponse.FromString,
)
self.checkTaskConfig = channel.unary_unary(
'/scheduler.Scheduler/checkTaskConfig',
request_serializer=checker__pb2.CheckTaskConfigRequest.SerializeToString,
response_deserializer=checker__pb2.CheckTaskConfigResponse.FromString,
)
class SchedulerServicer(object):
"""Missing associated documentation comment in .proto file."""
def getConfig(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def post(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def control(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def status(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getAlgorithmList(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def recProgress(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getStage(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def checkTaskConfig(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SchedulerServicer_to_server(servicer, server):
rpc_method_handlers = {
'getConfig': grpc.unary_unary_rpc_method_handler(
servicer.getConfig,
request_deserializer=scheduler__pb2.GetConfigRequest.FromString,
response_serializer=scheduler__pb2.GetConfigResponse.SerializeToString,
),
'post': grpc.stream_unary_rpc_method_handler(
servicer.post,
request_deserializer=commu__pb2.PostRequest.FromString,
response_serializer=commu__pb2.PostResponse.SerializeToString,
),
'control': grpc.unary_unary_rpc_method_handler(
servicer.control,
request_deserializer=control__pb2.ControlRequest.FromString,
response_serializer=control__pb2.ControlResponse.SerializeToString,
),
'status': grpc.unary_unary_rpc_method_handler(
servicer.status,
request_deserializer=status__pb2.StatusRequest.FromString,
response_serializer=status__pb2.StatusResponse.SerializeToString,
),
'getAlgorithmList': grpc.unary_unary_rpc_method_handler(
servicer.getAlgorithmList,
request_deserializer=scheduler__pb2.GetAlgorithmListRequest.FromString,
response_serializer=scheduler__pb2.GetAlgorithmListResponse.SerializeToString,
),
'recProgress': grpc.unary_unary_rpc_method_handler(
servicer.recProgress,
request_deserializer=scheduler__pb2.RecProgressRequest.FromString,
response_serializer=scheduler__pb2.RecProgressResponse.SerializeToString,
),
'getStage': grpc.unary_unary_rpc_method_handler(
servicer.getStage,
request_deserializer=scheduler__pb2.GetStageRequest.FromString,
response_serializer=scheduler__pb2.GetStageResponse.SerializeToString,
),
'checkTaskConfig': grpc.unary_unary_rpc_method_handler(
servicer.checkTaskConfig,
request_deserializer=checker__pb2.CheckTaskConfigRequest.FromString,
response_serializer=checker__pb2.CheckTaskConfigResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'scheduler.Scheduler', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Scheduler(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def getConfig(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/scheduler.Scheduler/getConfig',
scheduler__pb2.GetConfigRequest.SerializeToString,
scheduler__pb2.GetConfigResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def post(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_unary(request_iterator, target, '/scheduler.Scheduler/post',
commu__pb2.PostRequest.SerializeToString,
commu__pb2.PostResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def control(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/scheduler.Scheduler/control',
control__pb2.ControlRequest.SerializeToString,
control__pb2.ControlResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def status(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/scheduler.Scheduler/status',
status__pb2.StatusRequest.SerializeToString,
status__pb2.StatusResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getAlgorithmList(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/scheduler.Scheduler/getAlgorithmList',
scheduler__pb2.GetAlgorithmListRequest.SerializeToString,
scheduler__pb2.GetAlgorithmListResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def recProgress(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/scheduler.Scheduler/recProgress',
scheduler__pb2.RecProgressRequest.SerializeToString,
scheduler__pb2.RecProgressResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getStage(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/scheduler.Scheduler/getStage',
scheduler__pb2.GetStageRequest.SerializeToString,
scheduler__pb2.GetStageResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def checkTaskConfig(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/scheduler.Scheduler/checkTaskConfig',
checker__pb2.CheckTaskConfigRequest.SerializeToString,
checker__pb2.CheckTaskConfigResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 13,217 | 42.768212 | 102 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.