_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q32200 | get_sci_segs_for_ifo | train | def get_sci_segs_for_ifo(ifo, cp, start_time, end_time, out_dir, tags=None):
"""
Obtain science segments for the selected ifo
Parameters
-----------
ifo : string
The string describing the ifo to obtain science times for.
start_time : gps time (either int/LIGOTimeGPS)
The time at which to begin searching for segments.
end_time : gps time (either int/LIGOTimeGPS)
The time at which to stop searching for segments.
out_dir : path
The directory in which output will be stored.
tag : string, optional (default=None)
Use this to specify a tag. This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniqueify the actual filename.
Returns
--------
sci_segs : ligo.segments.segmentlist
The segmentlist generated by this call
sci_xml_file : pycbc.workflow.core.SegFile
The workflow File object corresponding to this science segments file.
out_sci_seg_name : string
The name of the output segment list in the output XML file.
"""
if tags is None:
tags = []
seg_valid_seg = segments.segment([start_time,end_time])
sci_seg_name = cp.get_opt_tags(
"workflow-segments", "segments-%s-science-name" %(ifo.lower()), tags)
sci_seg_url = cp.get_opt_tags(
"workflow-segments", "segments-database-url", tags)
# NOTE: ligolw_segment_query returns slightly strange output. The output
# segment list is put in with name "RESULT". So this is hardcoded here
out_sci_seg_name = "RESULT"
if tags:
sci_xml_file_path = os.path.join(
out_dir, "%s-SCIENCE_SEGMENTS_%s.xml" \
%(ifo.upper(), '_'.join(tags)))
tag_list=tags + ['SCIENCE']
else:
sci_xml_file_path = os.path.join(
out_dir, "%s-SCIENCE_SEGMENTS.xml" %(ifo.upper()) )
tag_list = ['SCIENCE']
if file_needs_generating(sci_xml_file_path, cp, tags=tags):
seg_find_call = [ resolve_url(cp.get("executables","segment_query"),
permissions=stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR),
"--query-segments",
"--segment-url", sci_seg_url,
"--gps-start-time", str(start_time),
"--gps-end-time", str(end_time),
"--include-segments", sci_seg_name,
"--output-file", sci_xml_file_path ]
make_external_call(seg_find_call, out_dir=os.path.join(out_dir,'logs'),
out_basename='%s-science-call' %(ifo.lower()) )
# Yes its yucky to generate a file and then read it back in.
sci_xml_file_path = os.path.abspath(sci_xml_file_path)
sci_xml_file = SegFile.from_segment_xml(sci_xml_file_path, tags=tag_list,
valid_segment=seg_valid_seg)
# NOTE: ligolw_segment_query returns slightly strange output. The output
# segment_summary output does not use RESULT. Therefore move the
# segment_summary across.
sci_xml_file.seg_summ_dict[ifo.upper() + ":" + out_sci_seg_name] = \
sci_xml_file.seg_summ_dict[':'.join(sci_seg_name.split(':')[0:2])]
sci_segs = sci_xml_file.return_union_seglist()
return sci_segs, sci_xml_file, out_sci_seg_name | python | {
"resource": ""
} |
q32201 | get_veto_segs | train | def get_veto_segs(workflow, ifo, category, start_time, end_time, out_dir,
veto_gen_job, tags=None, execute_now=False):
"""
Obtain veto segments for the selected ifo and veto category and add the job
to generate this to the workflow.
Parameters
-----------
workflow: pycbc.workflow.core.Workflow
An instance of the Workflow class that manages the workflow.
ifo : string
The string describing the ifo to generate vetoes for.
category : int
The veto category to generate vetoes for.
start_time : gps time (either int/LIGOTimeGPS)
The time at which to begin searching for segments.
end_time : gps time (either int/LIGOTimeGPS)
The time at which to stop searching for segments.
out_dir : path
The directory in which output will be stored.
vetoGenJob : Job
The veto generation Job class that will be used to create the Node.
tag : string, optional (default=None)
Use this to specify a tag. This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniqueify the actual filename.
FIXME: Filenames may not be unique with current codes!
execute_now : boolean, optional
If true, jobs are executed immediately. If false, they are added to the
workflow to be run later.
Returns
--------
veto_def_file : pycbc.workflow.core.SegFile
The workflow File object corresponding to this DQ veto file.
"""
if tags is None:
tags = []
seg_valid_seg = segments.segment([start_time,end_time])
# FIXME: This job needs an internet connection and X509_USER_PROXY
# For internet connection, it may need a headnode (ie universe local)
# For X509_USER_PROXY, I don't know what pegasus is doing
node = Node(veto_gen_job)
node.add_opt('--veto-categories', str(category))
node.add_opt('--ifo-list', ifo)
node.add_opt('--gps-start-time', str(start_time))
node.add_opt('--gps-end-time', str(end_time))
if tags:
veto_xml_file_name = "%s-VETOTIME_CAT%d_%s-%d-%d.xml" \
%(ifo, category, '_'.join(tags), start_time,
end_time-start_time)
else:
veto_xml_file_name = "%s-VETOTIME_CAT%d-%d-%d.xml" \
%(ifo, category, start_time, end_time-start_time)
veto_xml_file_path = os.path.abspath(os.path.join(out_dir,
veto_xml_file_name))
curr_url = urlparse.urlunparse(['file', 'localhost',
veto_xml_file_path, None, None, None])
if tags:
curr_tags = tags + ['VETO_CAT%d' %(category)]
else:
curr_tags = ['VETO_CAT%d' %(category)]
if file_needs_generating(veto_xml_file_path, workflow.cp, tags=tags):
if execute_now:
workflow.execute_node(node, verbatim_exe = True)
veto_xml_file = SegFile.from_segment_xml(veto_xml_file_path,
tags=curr_tags,
valid_segment=seg_valid_seg)
else:
veto_xml_file = SegFile(ifo, 'SEGMENTS', seg_valid_seg,
file_url=curr_url, tags=curr_tags)
node._add_output(veto_xml_file)
workflow.add_node(node)
else:
node.executed = True
for fil in node._outputs:
fil.node = None
veto_xml_file = SegFile.from_segment_xml(veto_xml_file_path,
tags=curr_tags,
valid_segment=seg_valid_seg)
return veto_xml_file | python | {
"resource": ""
} |
q32202 | create_segs_from_cats_job | train | def create_segs_from_cats_job(cp, out_dir, ifo_string, tags=None):
"""
This function creates the CondorDAGJob that will be used to run
ligolw_segments_from_cats as part of the workflow
Parameters
-----------
cp : pycbc.workflow.configuration.WorkflowConfigParser
The in-memory representation of the configuration (.ini) files
out_dir : path
Directory in which to put output files
ifo_string : string
String containing all active ifos, ie. "H1L1V1"
tag : list of strings, optional (default=None)
Use this to specify a tag(s). This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniqueify the actual filename.
FIXME: Filenames may not be unique with current codes!
Returns
--------
job : Job instance
The Job instance that will run segments_from_cats jobs
"""
if tags is None:
tags = []
seg_server_url = cp.get_opt_tags("workflow-segments",
"segments-database-url", tags)
veto_def_file = cp.get_opt_tags("workflow-segments",
"segments-veto-definer-file", tags)
job = Executable(cp, 'segments_from_cats', universe='local',
ifos=ifo_string, out_dir=out_dir, tags=tags)
job.add_opt('--separate-categories')
job.add_opt('--segment-url', seg_server_url)
job.add_opt('--veto-file', veto_def_file)
# FIXME: Would like the proxy in the Workflow instance
# FIXME: Explore using the x509 condor commands
# If the user has a proxy set in the environment, add it to the job
return job | python | {
"resource": ""
} |
q32203 | get_cumulative_segs | train | def get_cumulative_segs(workflow, categories, seg_files_list, out_dir,
tags=None, execute_now=False, segment_name=None):
"""
Function to generate one of the cumulative, multi-detector segment files
as part of the workflow.
Parameters
-----------
workflow: pycbc.workflow.core.Workflow
An instance of the Workflow class that manages the workflow.
categories : int
The veto categories to include in this cumulative veto.
seg_files_list : Listionary of SegFiles
The list of segment files to be used as input for combining.
out_dir : path
The directory to write output to.
tags : list of strings, optional
A list of strings that is used to identify this job
execute_now : boolean, optional
If true, jobs are executed immediately. If false, they are added to the
workflow to be run later.
segment_name : str
The name of the combined, cumulative segments in the output file.
"""
if tags is None:
tags = []
add_inputs = FileList([])
valid_segment = workflow.analysis_time
if segment_name is None:
segment_name = 'VETO_CAT%d_CUMULATIVE' % (categories[-1])
cp = workflow.cp
# calculate the cumulative veto files for a given ifo
for ifo in workflow.ifos:
cum_job = LigoLWCombineSegsExecutable(cp, 'ligolw_combine_segments',
out_dir=out_dir, tags=[segment_name]+tags, ifos=ifo)
inputs = []
files = seg_files_list.find_output_with_ifo(ifo)
for category in categories:
file_list = files.find_output_with_tag('VETO_CAT%d' %(category))
inputs+=file_list
cum_node = cum_job.create_node(valid_segment, inputs, segment_name)
if file_needs_generating(cum_node.output_files[0].cache_entry.path,
workflow.cp, tags=tags):
if execute_now:
workflow.execute_node(cum_node)
else:
workflow.add_node(cum_node)
else:
cum_node.executed = True
for fil in cum_node._outputs:
fil.node = None
fil.PFN(urlparse.urljoin('file:',
urllib.pathname2url(fil.storage_path)),
site='local')
add_inputs += cum_node.output_files
# add cumulative files for each ifo together
name = '%s_VETO_SEGMENTS' %(segment_name)
outfile = File(workflow.ifos, name, workflow.analysis_time,
directory=out_dir, extension='xml',
tags=[segment_name] + tags)
add_job = LigolwAddExecutable(cp, 'llwadd', ifos=ifo, out_dir=out_dir,
tags=tags)
add_node = add_job.create_node(valid_segment, add_inputs, output=outfile)
if file_needs_generating(add_node.output_files[0].cache_entry.path,
workflow.cp, tags=tags):
if execute_now:
workflow.execute_node(add_node)
else:
workflow.add_node(add_node)
else:
add_node.executed = True
for fil in add_node._outputs:
fil.node = None
fil.PFN(urlparse.urljoin('file:',
urllib.pathname2url(fil.storage_path)),
site='local')
return outfile | python | {
"resource": ""
} |
q32204 | add_cumulative_files | train | def add_cumulative_files(workflow, output_file, input_files, out_dir,
execute_now=False, tags=None):
"""
Function to combine a set of segment files into a single one. This function
will not merge the segment lists but keep each separate.
Parameters
-----------
workflow: pycbc.workflow.core.Workflow
An instance of the Workflow class that manages the workflow.
output_file: pycbc.workflow.core.File
The output file object
input_files: pycbc.workflow.core.FileList
This list of input segment files
out_dir : path
The directory to write output to.
execute_now : boolean, optional
If true, jobs are executed immediately. If false, they are added to the
workflow to be run later.
tags : list of strings, optional
A list of strings that is used to identify this job
"""
if tags is None:
tags = []
llwadd_job = LigolwAddExecutable(workflow.cp, 'llwadd',
ifo=output_file.ifo_list, out_dir=out_dir, tags=tags)
add_node = llwadd_job.create_node(output_file.segment, input_files,
output=output_file)
if file_needs_generating(add_node.output_files[0].cache_entry.path,
workflow.cp, tags=tags):
if execute_now:
workflow.execute_node(add_node)
else:
workflow.add_node(add_node)
else:
add_node.executed = True
for fil in add_node._outputs:
fil.node = None
fil.PFN(urlparse.urljoin('file:',
urllib.pathname2url(fil.storage_path)),
site='local')
return add_node.output_files[0] | python | {
"resource": ""
} |
q32205 | find_playground_segments | train | def find_playground_segments(segs):
'''Finds playground time in a list of segments.
Playground segments include the first 600s of every 6370s stride starting
at GPS time 729273613.
Parameters
----------
segs : segmentfilelist
A segmentfilelist to find playground segments.
Returns
-------
outlist : segmentfilelist
A segmentfilelist with all playground segments during the input
segmentfilelist (ie. segs).
'''
# initializations
start_s2 = 729273613
playground_stride = 6370
playground_length = 600
outlist = segments.segmentlist()
# loop over segments
for seg in segs:
start = seg[0]
end = seg[1]
# the first playground segment whose end is after the start of seg
playground_start = start_s2 + playground_stride * ( 1 + \
int(start-start_s2-playground_length) / playground_stride)
while playground_start < end:
# find start of playground segment
if playground_start > start:
ostart = playground_start
else:
ostart = start
playground_end = playground_start + playground_length
# find end of playground segment
if playground_end < end:
oend = playground_end
else:
oend = end
# append segment
x = segments.segment(ostart, oend)
outlist.append(x)
# increment
playground_start = playground_start + playground_stride
return outlist | python | {
"resource": ""
} |
q32206 | save_veto_definer | train | def save_veto_definer(cp, out_dir, tags=None):
""" Retrieve the veto definer file and save it locally
Parameters
-----------
cp : ConfigParser instance
out_dir : path
tags : list of strings
Used to retrieve subsections of the ini file for
configuration options.
"""
if tags is None:
tags = []
make_analysis_dir(out_dir)
veto_def_url = cp.get_opt_tags("workflow-segments",
"segments-veto-definer-url", tags)
veto_def_base_name = os.path.basename(veto_def_url)
veto_def_new_path = os.path.abspath(os.path.join(out_dir,
veto_def_base_name))
# Don't need to do this if already done
resolve_url(veto_def_url,out_dir)
# and update location
cp.set("workflow-segments", "segments-veto-definer-file", veto_def_new_path)
return veto_def_new_path | python | {
"resource": ""
} |
q32207 | parse_cat_ini_opt | train | def parse_cat_ini_opt(cat_str):
""" Parse a cat str from the ini file into a list of sets """
if cat_str == "":
return []
cat_groups = cat_str.split(',')
cat_sets = []
for group in cat_groups:
group = group.strip()
cat_sets += [set(c for c in group)]
return cat_sets | python | {
"resource": ""
} |
q32208 | file_needs_generating | train | def file_needs_generating(file_path, cp, tags=None):
"""
This job tests the file location and determines if the file should be
generated now or if an error should be raised. This uses the
generate_segment_files variable, global to this module, which is described
above and in the documentation.
Parameters
-----------
file_path : path
Location of file to check
cp : ConfigParser
The associated ConfigParser from which the
segments-generate-segment-files variable is returned.
It is recommended for most applications to use the default option by
leaving segments-generate-segment-files blank, which will regenerate
all segment files at runtime. Only use this facility if you need it.
Choices are
* 'always' : DEFAULT: All files will be generated even if they already exist.
* 'if_not_present': Files will be generated if they do not already exist. Pre-existing files will be read in and used.
* 'error_on_duplicate': Files will be generated if they do not already exist. Pre-existing files will raise a failure.
* 'never': Pre-existing files will be read in and used. If no file exists the code will fail.
Returns
--------
int
1 = Generate the file. 0 = File already exists, use it. Other cases
will raise an error.
"""
if tags is None:
tags = []
if cp.has_option_tags("workflow-segments",
"segments-generate-segment-files", tags):
value = cp.get_opt_tags("workflow-segments",
"segments-generate-segment-files", tags)
generate_segment_files = value
else:
generate_segment_files = 'always'
# Does the file exist
if os.path.isfile(file_path):
if generate_segment_files in ['if_not_present', 'never']:
return 0
elif generate_segment_files == 'always':
err_msg = "File %s already exists. " %(file_path,)
err_msg += "Regenerating and overwriting."
logging.warn(err_msg)
return 1
elif generate_segment_files == 'error_on_duplicate':
err_msg = "File %s already exists. " %(file_path,)
err_msg += "Refusing to overwrite file and exiting."
raise ValueError(err_msg)
else:
err_msg = 'Global variable generate_segment_files must be one of '
err_msg += '"always", "if_not_present", "error_on_duplicate", '
err_msg += '"never". Got %s.' %(generate_segment_files,)
raise ValueError(err_msg)
else:
if generate_segment_files in ['always', 'if_not_present',
'error_on_duplicate']:
return 1
elif generate_segment_files == 'never':
err_msg = 'File %s does not exist. ' %(file_path,)
raise ValueError(err_msg)
else:
err_msg = 'Global variable generate_segment_files must be one of '
err_msg += '"always", "if_not_present", "error_on_duplicate", '
err_msg += '"never". Got %s.' %(generate_segment_files,)
raise ValueError(err_msg) | python | {
"resource": ""
} |
q32209 | get_segments_file | train | def get_segments_file(workflow, name, option_name, out_dir):
"""Get cumulative segments from option name syntax for each ifo.
Use syntax of configparser string to define the resulting segment_file
e.x. option_name = +up_flag1,+up_flag2,+up_flag3,-down_flag1,-down_flag2
Each ifo may have a different string and is stored separately in the file.
Flags which add time must precede flags which subtract time.
Parameters
----------
workflow: pycbc.workflow.Workflow
name: string
Name of the segment list being created
option_name: str
Name of option in the associated config parser to get the flag list
returns
--------
seg_file: pycbc.workflow.SegFile
SegFile intance that points to the segment xml file on disk.
"""
from pycbc.dq import query_str
make_analysis_dir(out_dir)
cp = workflow.cp
start = workflow.analysis_time[0]
end = workflow.analysis_time[1]
# Check for veto definer file
veto_definer = None
if cp.has_option("workflow-segments", "segments-veto-definer-url"):
veto_definer = save_veto_definer(workflow.cp, out_dir, [])
# Check for provided server
server = "segments.ligo.org"
if cp.has_option("workflow-segments", "segments-database-url"):
server = cp.get("workflow-segments",
"segments-database-url")
segs = {}
for ifo in workflow.ifos:
flag_str = cp.get_opt_tags("workflow-segments", option_name, [ifo])
key = ifo + ':' + name
segs[key] = query_str(ifo, flag_str, start, end,
server=server,
veto_definer=veto_definer)
logging.info("%s: got %s flags", ifo, option_name)
return SegFile.from_segment_list_dict(name, segs,
extension='.xml',
valid_segment=workflow.analysis_time,
directory=out_dir) | python | {
"resource": ""
} |
q32210 | get_swstat_bits | train | def get_swstat_bits(frame_filenames, swstat_channel_name, start_time, end_time):
''' This function just checks the first time in the SWSTAT channel
to see if the filter was on, it doesn't check times beyond that.
This is just for a first test on a small chunck of data.
To read the SWSTAT bits, reference: https://dcc.ligo.org/DocDB/0107/T1300711/001/LIGO-T1300711-v1.pdf
Bit 0-9 = Filter on/off switches for the 10 filters in an SFM.
Bit 10 = Filter module input switch on/off
Bit 11 = Filter module offset switch on/off
Bit 12 = Filter module output switch on/off
Bit 13 = Filter module limit switch on/off
Bit 14 = Filter module history reset momentary switch
'''
# read frames
swstat = frame.read_frame(frame_filenames, swstat_channel_name,
start_time=start_time, end_time=end_time)
# convert number in channel to binary
bits = bin(int(swstat[0]))
# check if filterbank input or output was off
filterbank_off = False
if len(bits) < 14 or int(bits[-13]) == 0 or int(bits[-11]) == 0:
filterbank_off = True
return bits[-10:], filterbank_off | python | {
"resource": ""
} |
q32211 | filter_data | train | def filter_data(data, filter_name, filter_file, bits, filterbank_off=False,
swstat_channel_name=None):
'''
A naive function to determine if the filter was on at the time
and then filter the data.
'''
# if filterbank is off then return a time series of zeroes
if filterbank_off:
return numpy.zeros(len(data))
# loop over the 10 filters in the filterbank
for i in range(10):
# read the filter
filter = Filter(filter_file[filter_name][i])
# if bit is on then filter the data
bit = int(bits[-(i+1)])
if bit:
logging.info('filtering with filter module %d', i)
# if there are second-order sections then filter with them
if len(filter.sections):
data = filter.apply(data)
# else it is a filter with only gain so apply the gain
else:
coeffs = iir2z(filter_file[filter_name][i])
if len(coeffs) > 1:
logging.info('Gain-only filter module return more than one number')
sys.exit()
gain = coeffs[0]
data = gain * data
return data | python | {
"resource": ""
} |
q32212 | read_gain_from_frames | train | def read_gain_from_frames(frame_filenames, gain_channel_name, start_time, end_time):
'''
Returns the gain from the file.
'''
# get timeseries from frame
gain = frame.read_frame(frame_filenames, gain_channel_name,
start_time=start_time, end_time=end_time)
return gain[0] | python | {
"resource": ""
} |
q32213 | load_from_config | train | def load_from_config(cp, model, **kwargs):
"""Loads a sampler from the given config file.
This looks for a name in the section ``[sampler]`` to determine which
sampler class to load. That sampler's ``from_config`` is then called.
Parameters
----------
cp : WorkflowConfigParser
Config parser to read from.
model : pycbc.inference.model
Which model to pass to the sampler.
\**kwargs :
All other keyword arguments are passed directly to the sampler's
``from_config`` file.
Returns
-------
sampler :
The initialized sampler.
"""
name = cp.get('sampler', 'name')
return samplers[name].from_config(cp, model, **kwargs) | python | {
"resource": ""
} |
q32214 | qplane | train | def qplane(qplane_tile_dict, fseries, return_complex=False):
"""Performs q-transform on each tile for each q-plane and selects
tile with the maximum energy. Q-transform can then
be interpolated to a desired frequency and time resolution.
Parameters
----------
qplane_tile_dict:
Dictionary containing a list of q-tile tupples for each q-plane
fseries: 'pycbc FrequencySeries'
frequency-series data set
return_complex: {False, bool}
Return the raw complex series instead of the normalized power.
Returns
-------
q : float
The q of the maximum q plane
times : numpy.ndarray
The time that the qtransform is sampled.
freqs : numpy.ndarray
The frequencies that the qtransform is samled.
qplane : numpy.ndarray (2d)
The two dimensional interpolated qtransform of this time series.
"""
# store q-transforms for each q in a dict
qplanes = {}
max_energy, max_key = None, None
for i, q in enumerate(qplane_tile_dict):
energies = []
for f0 in qplane_tile_dict[q]:
energy = qseries(fseries, q, f0, return_complex=return_complex)
menergy = abs(energy).max()
energies.append(energy)
if i == 0 or menergy > max_energy:
max_energy = menergy
max_key = q
qplanes[q] = energies
# record q-transform output for peak q
plane = qplanes[max_key]
frequencies = qplane_tile_dict[max_key]
times = plane[0].sample_times.numpy()
plane = numpy.array([v.numpy() for v in plane])
return max_key, times, frequencies, numpy.array(plane) | python | {
"resource": ""
} |
q32215 | qtiling | train | def qtiling(fseries, qrange, frange, mismatch=0.2):
"""Iterable constructor of QTile tuples
Parameters
----------
fseries: 'pycbc FrequencySeries'
frequency-series data set
qrange:
upper and lower bounds of q range
frange:
upper and lower bounds of frequency range
mismatch:
percentage of desired fractional mismatch
Returns
-------
qplane_tile_dict: 'dict'
dictionary containing Q-tile tuples for a set of Q-planes
"""
qplane_tile_dict = {}
qs = list(_iter_qs(qrange, deltam_f(mismatch)))
for q in qs:
qtilefreq = _iter_frequencies(q, frange, mismatch, fseries.duration)
qplane_tile_dict[q] = numpy.array(list(qtilefreq))
return qplane_tile_dict | python | {
"resource": ""
} |
q32216 | get_build_name | train | def get_build_name(git_path='git'):
"""Find the username of the current builder
"""
name,retcode = call(('git', 'config', 'user.name'), returncode=True)
if retcode:
name = "Unknown User"
email,retcode = call(('git', 'config', 'user.email'), returncode=True)
if retcode:
email = ""
return "%s <%s>" % (name, email) | python | {
"resource": ""
} |
q32217 | get_last_commit | train | def get_last_commit(git_path='git'):
"""Returns the details of the last git commit
Returns a tuple (hash, date, author name, author e-mail,
committer name, committer e-mail).
"""
hash_, udate, aname, amail, cname, cmail = (
call((git_path, 'log', '-1',
'--pretty=format:%H,%ct,%an,%ae,%cn,%ce')).split(","))
date = time.strftime('%Y-%m-%d %H:%M:%S +0000', time.gmtime(float(udate)))
author = '%s <%s>' % (aname, amail)
committer = '%s <%s>' % (cname, cmail)
return hash_, date, author, committer | python | {
"resource": ""
} |
q32218 | get_git_branch | train | def get_git_branch(git_path='git'):
"""Returns the name of the current git branch
"""
branch_match = call((git_path, 'rev-parse', '--symbolic-full-name', 'HEAD'))
if branch_match == "HEAD":
return None
else:
return os.path.basename(branch_match) | python | {
"resource": ""
} |
q32219 | get_git_tag | train | def get_git_tag(hash_, git_path='git'):
"""Returns the name of the current git tag
"""
tag, status = call((git_path, 'describe', '--exact-match',
'--tags', hash_), returncode=True)
if status == 0:
return tag
else:
return None | python | {
"resource": ""
} |
q32220 | get_git_status | train | def get_git_status(git_path='git'):
"""Returns the state of the git working copy
"""
status_output = subprocess.call((git_path, 'diff-files', '--quiet'))
if status_output != 0:
return 'UNCLEAN: Modified working tree'
else:
# check index for changes
status_output = subprocess.call((git_path, 'diff-index', '--cached',
'--quiet', 'HEAD'))
if status_output != 0:
return 'UNCLEAN: Modified index'
else:
return 'CLEAN: All modifications committed' | python | {
"resource": ""
} |
q32221 | generate_git_version_info | train | def generate_git_version_info():
"""Query the git repository information to generate a version module.
"""
info = GitInfo()
git_path = call(('which', 'git'))
# get build info
info.builder = get_build_name()
info.build_date = get_build_date()
# parse git ID
info.hash, info.date, info.author, info.committer = (
get_last_commit(git_path))
# determine branch
info.branch = get_git_branch(git_path)
# determine tag
info.tag = get_git_tag(info.hash, git_path)
# determine version
if info.tag:
info.version = info.tag.strip('v')
info.release = not re.search('[a-z]', info.version.lower())
else:
info.version = info.hash[:6]
info.release = False
# Determine *last* stable release
info.last_release = determine_latest_release_version()
# refresh index
call((git_path, 'update-index', '-q', '--refresh'))
# check working copy for changes
info.status = get_git_status(git_path)
return info | python | {
"resource": ""
} |
q32222 | SingleDetSGChisq.values | train | def values(self, stilde, template, psd, snrv, snr_norm,
bchisq, bchisq_dof, indices):
""" Calculate sine-Gaussian chisq
Parameters
----------
stilde: pycbc.types.Frequencyseries
The overwhitened strain
template: pycbc.types.Frequencyseries
The waveform template being analyzed
psd: pycbc.types.Frequencyseries
The power spectral density of the data
snrv: numpy.ndarray
The peak unnormalized complex SNR values
snr_norm: float
The normalization factor for the snr
bchisq: numpy.ndarray
The Bruce Allen power chisq values for these triggers
bchisq_dof: numpy.ndarray
The degrees of freedom of the Bruce chisq
indics: numpy.ndarray
The indices of the snr peaks.
Returns
-------
chisq: Array
Chisq values, one for each sample index
"""
if not self.do:
return None
if template.params.template_hash not in self.params:
return numpy.ones(len(snrv))
values = self.params[template.params.template_hash].split(',')
# Get the chisq bins to use as the frequency reference point
bins = self.cached_chisq_bins(template, psd)
# This is implemented slowly, so let's not call it often, OK?
chisq = numpy.ones(len(snrv))
for i, snrvi in enumerate(snrv):
#Skip if newsnr too low
snr = abs(snrvi * snr_norm)
nsnr = ranking.newsnr(snr, bchisq[i] / bchisq_dof[i])
if nsnr < self.snr_threshold:
continue
N = (len(template) - 1) * 2
dt = 1.0 / (N * template.delta_f)
kmin = int(template.f_lower / psd.delta_f)
time = float(template.epoch) + dt * indices[i]
# Shift the time of interest to be centered on 0
stilde_shift = apply_fseries_time_shift(stilde, -time)
# Only apply the sine-Gaussian in a +-50 Hz range around the
# central frequency
qwindow = 50
chisq[i] = 0
# Estimate the maximum frequency up to which the waveform has
# power by approximating power per frequency
# as constant over the last 2 chisq bins. We cannot use the final
# chisq bin edge as it does not have to be where the waveform
# terminates.
fstep = (bins[-2] - bins[-3])
fpeak = (bins[-2] + fstep) * template.delta_f
# This is 90% of the Nyquist frequency of the data
# This allows us to avoid issues near Nyquist due to resample
# Filtering
fstop = len(stilde) * stilde.delta_f * 0.9
dof = 0
# Calculate the sum of SNR^2 for the sine-Gaussians specified
for descr in values:
# Get the q and frequency offset from the descriptor
q, offset = descr.split('-')
q, offset = float(q), float(offset)
fcen = fpeak + offset
flow = max(kmin * template.delta_f, fcen - qwindow)
fhigh = fcen + qwindow
# If any sine-gaussian tile has an upper frequency near
# nyquist return 1 instead.
if fhigh > fstop:
return numpy.ones(len(snrv))
kmin = int(flow / template.delta_f)
kmax = int(fhigh / template.delta_f)
#Calculate sine-gaussian tile
gtem = sinegauss.fd_sine_gaussian(1.0, q, fcen, flow,
len(template) * template.delta_f,
template.delta_f).astype(numpy.complex64)
gsigma = sigma(gtem, psd=psd,
low_frequency_cutoff=flow,
high_frequency_cutoff=fhigh)
#Calculate the SNR of the tile
gsnr = (gtem[kmin:kmax] * stilde_shift[kmin:kmax]).sum()
gsnr *= 4.0 * gtem.delta_f / gsigma
chisq[i] += abs(gsnr)**2.0
dof += 2
if dof == 0:
chisq[i] = 1
else:
chisq[i] /= dof
return chisq | python | {
"resource": ""
} |
q32223 | PhaseTDStatistic.logsignalrate | train | def logsignalrate(self, s0, s1, slide, step):
"""Calculate the normalized log rate density of signals via lookup"""
td = numpy.array(s0['end_time'] - s1['end_time'] - slide*step, ndmin=1)
pd = numpy.array((s0['coa_phase'] - s1['coa_phase']) % \
(2. * numpy.pi), ndmin=1)
rd = numpy.array((s0['sigmasq'] / s1['sigmasq']) ** 0.5, ndmin=1)
sn0 = numpy.array(s0['snr'], ndmin=1)
sn1 = numpy.array(s1['snr'], ndmin=1)
snr0 = sn0 * 1
snr1 = sn1 * 1
snr0[rd > 1] = sn1[rd > 1]
snr1[rd > 1] = sn0[rd > 1]
rd[rd > 1] = 1. / rd[rd > 1]
# Find which bin each coinc falls into
tv = numpy.searchsorted(self.tbins, td) - 1
pv = numpy.searchsorted(self.pbins, pd) - 1
s0v = numpy.searchsorted(self.sbins, snr0) - 1
s1v = numpy.searchsorted(self.sbins, snr1) - 1
rv = numpy.searchsorted(self.rbins, rd) - 1
# Enforce that points fits into the bin boundaries: if a point lies
# outside the boundaries it is pushed back to the nearest bin.
tv[tv < 0] = 0
tv[tv >= len(self.tbins) - 1] = len(self.tbins) - 2
pv[pv < 0] = 0
pv[pv >= len(self.pbins) - 1] = len(self.pbins) - 2
s0v[s0v < 0] = 0
s0v[s0v >= len(self.sbins) - 1] = len(self.sbins) - 2
s1v[s1v < 0] = 0
s1v[s1v >= len(self.sbins) - 1] = len(self.sbins) - 2
rv[rv < 0] = 0
rv[rv >= len(self.rbins) - 1] = len(self.rbins) - 2
return self.hist[tv, pv, s0v, s1v, rv] | python | {
"resource": ""
} |
q32224 | ExpFitStatistic.lognoiserate | train | def lognoiserate(self, trigs):
"""
Calculate the log noise rate density over single-ifo newsnr
Read in single trigger information, make the newsnr statistic
and rescale by the fitted coefficients alpha and rate
"""
alphai, ratei, thresh = self.find_fits(trigs)
newsnr = self.get_newsnr(trigs)
# alphai is constant of proportionality between single-ifo newsnr and
# negative log noise likelihood in given template
# ratei is rate of trigs in given template compared to average
# thresh is stat threshold used in given ifo
lognoisel = - alphai * (newsnr - thresh) + numpy.log(alphai) + \
numpy.log(ratei)
return numpy.array(lognoisel, ndmin=1, dtype=numpy.float32) | python | {
"resource": ""
} |
q32225 | ExpFitStatistic.coinc | train | def coinc(self, s0, s1, slide, step): # pylint:disable=unused-argument
"""Calculate the final coinc ranking statistic"""
# Approximate log likelihood ratio by summing single-ifo negative
# log noise likelihoods
loglr = - s0 - s1
# add squares of threshold stat values via idealized Gaussian formula
threshes = [self.fits_by_tid[i]['thresh'] for i in self.ifos]
loglr += sum([t**2. / 2. for t in threshes])
# convert back to a coinc-SNR-like statistic
# via log likelihood ratio \propto rho_c^2 / 2
return (2. * loglr) ** 0.5 | python | {
"resource": ""
} |
q32226 | TenantMixin._drop_schema | train | def _drop_schema(self, force_drop=False):
""" Drops the schema"""
connection = connections[get_tenant_database_alias()]
has_schema = hasattr(connection, 'schema_name')
if has_schema and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't delete tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
if has_schema and schema_exists(self.schema_name) and (self.auto_drop_schema or force_drop):
self.pre_drop()
cursor = connection.cursor()
cursor.execute('DROP SCHEMA %s CASCADE' % self.schema_name) | python | {
"resource": ""
} |
q32227 | TenantMixin.get_primary_domain | train | def get_primary_domain(self):
"""
Returns the primary domain of the tenant
"""
try:
domain = self.domains.get(is_primary=True)
return domain
except get_tenant_domain_model().DoesNotExist:
return None | python | {
"resource": ""
} |
q32228 | TenantMixin.reverse | train | def reverse(self, request, view_name):
"""
Returns the URL of this tenant.
"""
http_type = 'https://' if request.is_secure() else 'http://'
domain = get_current_site(request).domain
url = ''.join((http_type, self.schema_name, '.', domain, reverse(view_name)))
return url | python | {
"resource": ""
} |
q32229 | TenantSyncRouter.app_in_list | train | def app_in_list(self, app_label, apps_list):
"""
Is 'app_label' present in 'apps_list'?
apps_list is either settings.SHARED_APPS or settings.TENANT_APPS, a
list of app names.
We check the presence of the app's name or the full path to the apps's
AppConfig class.
https://docs.djangoproject.com/en/1.8/ref/applications/#configuring-applications
"""
appconfig = django_apps.get_app_config(app_label)
appconfig_full_name = '{}.{}'.format(
appconfig.__module__, appconfig.__class__.__name__)
return (appconfig.name in apps_list) or (appconfig_full_name in apps_list) | python | {
"resource": ""
} |
q32230 | TenantFileSystemFinder.check | train | def check(self, **kwargs):
"""
In addition to parent class' checks, also ensure that MULTITENANT_STATICFILES_DIRS
is a tuple or a list.
"""
errors = super().check(**kwargs)
multitenant_staticfiles_dirs = settings.MULTITENANT_STATICFILES_DIRS
if not isinstance(multitenant_staticfiles_dirs, (list, tuple)):
errors.append(
Error(
"Your MULTITENANT_STATICFILES_DIRS setting is not a tuple or list.",
hint="Perhaps you forgot a trailing comma?",
)
)
return errors | python | {
"resource": ""
} |
q32231 | DatabaseWrapper.set_schema_to_public | train | def set_schema_to_public(self):
"""
Instructs to stay in the common 'public' schema.
"""
self.tenant = FakeTenant(schema_name=get_public_schema_name())
self.schema_name = get_public_schema_name()
self.set_settings_schema(self.schema_name)
self.search_path_set = False | python | {
"resource": ""
} |
q32232 | Loader.cache_key | train | def cache_key(self, template_name, skip=None):
"""
Generate a cache key for the template name, dirs, and skip.
If skip is provided, only origins that match template_name are included
in the cache key. This ensures each template is only parsed and cached
once if contained in different extend chains like:
x -> a -> a
y -> a -> a
z -> a -> a
"""
dirs_prefix = ''
skip_prefix = ''
tenant_prefix = ''
if skip:
matching = [origin.name for origin in skip if origin.template_name == template_name]
if matching:
skip_prefix = self.generate_hash(matching)
if connection.tenant:
tenant_prefix = str(connection.tenant.pk)
return '-'.join(s for s in (str(template_name), tenant_prefix, skip_prefix, dirs_prefix) if s) | python | {
"resource": ""
} |
q32233 | parse_tenant_config_path | train | def parse_tenant_config_path(config_path):
"""
Convenience function for parsing django-tenants' path configuration strings.
If the string contains '%s', then the current tenant's schema name will be inserted at that location. Otherwise
the schema name will be appended to the end of the string.
:param config_path: A configuration path string that optionally contains '%s' to indicate where the tenant
schema name should be inserted.
:return: The formatted string containing the schema name
"""
try:
# Insert schema name
return config_path % connection.schema_name
except (TypeError, ValueError):
# No %s in string; append schema name at the end
return os.path.join(config_path, connection.schema_name) | python | {
"resource": ""
} |
q32234 | CloneSchema._create_clone_schema_function | train | def _create_clone_schema_function(self):
"""
Creates a postgres function `clone_schema` that copies a schema and its
contents. Will replace any existing `clone_schema` functions owned by the
`postgres` superuser.
"""
cursor = connection.cursor()
cursor.execute(CLONE_SCHEMA_FUNCTION)
cursor.close() | python | {
"resource": ""
} |
q32235 | CloneSchema.clone_schema | train | def clone_schema(self, base_schema_name, new_schema_name):
"""
Creates a new schema `new_schema_name` as a clone of an existing schema
`old_schema_name`.
"""
connection.set_schema_to_public()
cursor = connection.cursor()
# check if the clone_schema function already exists in the db
try:
cursor.execute("SELECT 'clone_schema'::regproc")
except ProgrammingError:
self._create_clone_schema_function()
transaction.commit()
sql = 'SELECT clone_schema(%(base_schema)s, %(new_schema)s, TRUE)'
cursor.execute(
sql,
{'base_schema': base_schema_name, 'new_schema': new_schema_name}
)
cursor.close() | python | {
"resource": ""
} |
q32236 | install_database | train | def install_database(name, owner, template='template0', encoding='UTF8', locale='en_US.UTF-8'):
"""
Require a PostgreSQL database.
::
from fabtools import require
require.postgres.database('myapp', owner='dbuser')
"""
create_database(name, owner, template=template, encoding=encoding,
locale=locale) | python | {
"resource": ""
} |
q32237 | ECSTransformer.add_volume | train | def add_volume(self, volume):
"""
Add a volume to self.volumes if it isn't already present
"""
for old_vol in self.volumes:
if volume == old_vol:
return
self.volumes.append(volume) | python | {
"resource": ""
} |
q32238 | ECSTransformer.emit_containers | train | def emit_containers(self, containers, verbose=True):
"""
Emits the task definition and sorts containers by name
:param containers: List of the container definitions
:type containers: list of dict
:param verbose: Print out newlines and indented JSON
:type verbose: bool
:returns: The text output
:rtype: str
"""
containers = sorted(containers, key=lambda c: c.get('name'))
task_definition = {
'family': self.family,
'containerDefinitions': containers,
'volumes': self.volumes or []
}
if verbose:
return json.dumps(task_definition, indent=4, sort_keys=True)
else:
return json.dumps(task_definition) | python | {
"resource": ""
} |
q32239 | ECSTransformer.ingest_volumes_param | train | def ingest_volumes_param(self, volumes):
"""
This is for ingesting the "volumes" of a task description
"""
data = {}
for volume in volumes:
if volume.get('host', {}).get('sourcePath'):
data[volume.get('name')] = {
'path': volume.get('host', {}).get('sourcePath'),
'readonly': volume.get('readOnly', False)
}
else:
data[volume.get('name')] = {
'path': '/tmp/{}'.format(uuid.uuid4().hex[:8]),
'readonly': volume.get('readOnly', False)
}
return data | python | {
"resource": ""
} |
q32240 | ECSTransformer._build_mountpoint | train | def _build_mountpoint(self, volume):
"""
Given a generic volume definition, create the mountPoints element
"""
self.add_volume(self._build_volume(volume))
return {
'sourceVolume': self.path_to_name(volume.get('host')),
'containerPath': volume.get('container')
} | python | {
"resource": ""
} |
q32241 | MarathonTransformer.flatten_container | train | def flatten_container(self, container):
"""
Accepts a marathon container and pulls out the nested values into the top level
"""
for names in ARG_MAP.values():
if names[TransformationTypes.MARATHON.value]['name'] and \
'.' in names[TransformationTypes.MARATHON.value]['name']:
marathon_dotted_name = names[TransformationTypes.MARATHON.value]['name']
parts = marathon_dotted_name.split('.')
if parts[-2] == 'parameters':
# Special lookup for docker parameters
common_type = names[TransformationTypes.MARATHON.value].get('type')
result = self._lookup_parameter(container, parts[-1], common_type)
if result:
container[marathon_dotted_name] = result
else:
result = lookup_nested_dict(container, *parts)
if result:
container[marathon_dotted_name] = result
return container | python | {
"resource": ""
} |
q32242 | MarathonTransformer._convert_volume | train | def _convert_volume(self, volume):
"""
This is for ingesting the "volumes" of a app description
"""
data = {
'host': volume.get('hostPath'),
'container': volume.get('containerPath'),
'readonly': volume.get('mode') == 'RO',
}
return data | python | {
"resource": ""
} |
q32243 | KubernetesTransformer._find_convertable_object | train | def _find_convertable_object(self, data):
"""
Get the first instance of a `self.pod_types`
"""
data = list(data)
convertable_object_idxs = [
idx
for idx, obj
in enumerate(data)
if obj.get('kind') in self.pod_types.keys()
]
if len(convertable_object_idxs) < 1:
raise Exception("Kubernetes config didn't contain any of {}".format(
', '.join(self.pod_types.keys())
))
return list(data)[convertable_object_idxs[0]] | python | {
"resource": ""
} |
q32244 | KubernetesTransformer._read_stream | train | def _read_stream(self, stream):
"""
Read in the pod stream
"""
data = yaml.safe_load_all(stream=stream)
obj = self._find_convertable_object(data)
pod = self.pod_types[obj['kind']](obj)
return obj, pod.get('containers'), self.ingest_volumes_param(pod.get('volumes', [])) | python | {
"resource": ""
} |
q32245 | KubernetesTransformer.ingest_volumes_param | train | def ingest_volumes_param(self, volumes):
"""
This is for ingesting the "volumes" of a pod spec
"""
data = {}
for volume in volumes:
if volume.get('hostPath', {}).get('path'):
data[volume.get('name')] = {
'path': volume.get('hostPath', {}).get('path'),
}
elif volume.get('emptyDir'):
data[volume.get('name')] = {}
else:
data[volume.get('name')] = {}
# TODO Support other k8s volume types?
return data | python | {
"resource": ""
} |
q32246 | KubernetesTransformer.flatten_container | train | def flatten_container(self, container):
"""
Accepts a kubernetes container and pulls out the nested values into the top level
"""
for names in ARG_MAP.values():
if names[TransformationTypes.KUBERNETES.value]['name'] and \
'.' in names[TransformationTypes.KUBERNETES.value]['name']:
kubernetes_dotted_name = names[TransformationTypes.KUBERNETES.value]['name']
parts = kubernetes_dotted_name.split('.')
result = lookup_nested_dict(container, *parts)
if result:
container[kubernetes_dotted_name] = result
return container | python | {
"resource": ""
} |
q32247 | KubernetesTransformer._build_volume | train | def _build_volume(self, volume):
"""
Given a generic volume definition, create the volumes element
"""
self.volumes[self._build_volume_name(volume.get('host'))] = {
'name': self._build_volume_name(volume.get('host')),
'hostPath': {
'path': volume.get('host')
}
}
response = {
'name': self._build_volume_name(volume.get('host')),
'mountPath': volume.get('container'),
}
if volume.get('readonly', False):
response['readOnly'] = bool(volume.get('readonly', False))
return response | python | {
"resource": ""
} |
q32248 | Converter._convert_container | train | def _convert_container(self, container, input_transformer, output_transformer):
"""
Converts a given dictionary to an output container definition
:type container: dict
:param container: The container definitions as a dictionary
:rtype: dict
:return: A output_type container definition
"""
output = {}
for parameter, options in ARG_MAP.items():
output_name = options.get(self.output_type, {}).get('name')
output_required = options.get(self.output_type, {}).get('required')
input_name = options.get(self.input_type, {}).get('name')
if container.get(input_name) and \
hasattr(input_transformer, 'ingest_{}'.format(parameter)) and \
output_name and hasattr(output_transformer, 'emit_{}'.format(parameter)):
# call transform_{}
ingest_func = getattr(input_transformer, 'ingest_{}'.format(parameter))
emit_func = getattr(output_transformer, 'emit_{}'.format(parameter))
output[output_name] = emit_func(ingest_func(container.get(input_name)))
if not container.get(input_name) and output_required:
msg_template = 'Container {name} is missing required parameter "{output_name}".'
self.messages.add(
msg_template.format(
output_name=output_name,
output_type=self.output_type,
name=container.get('name', container)
)
)
return output | python | {
"resource": ""
} |
q32249 | transform | train | def transform(input_file, input_type, output_type, verbose, quiet):
"""
container-transform is a small utility to transform various docker
container formats to one another.
Default input type is compose, default output type is ECS
Default is to read from STDIN if no INPUT_FILE is provided
All options may be set by environment variables with the prefix "CT_"
followed by the full argument name.
"""
converter = Converter(input_file, input_type, output_type)
output = converter.convert(verbose)
click.echo(click.style(output, fg='green'))
if not quiet:
for message in converter.messages:
click.echo(click.style(message, fg='red', bold=True), err=True) | python | {
"resource": ""
} |
q32250 | ChronosTransformer.flatten_container | train | def flatten_container(self, container):
"""
Accepts a chronos container and pulls out the nested values into the top level
"""
for names in ARG_MAP.values():
if names[TransformationTypes.CHRONOS.value]['name'] and \
'.' in names[TransformationTypes.CHRONOS.value]['name']:
chronos_dotted_name = names[TransformationTypes.CHRONOS.value]['name']
parts = chronos_dotted_name.split('.')
if parts[-2] == 'parameters':
# Special lookup for docker parameters
common_type = names[TransformationTypes.CHRONOS.value].get('type')
result = self._lookup_parameter(container, parts[-1], common_type)
if result:
container[chronos_dotted_name] = result
else:
result = lookup_nested_dict(container, *parts)
if result:
container[chronos_dotted_name] = result
return container | python | {
"resource": ""
} |
q32251 | ComposeTransformer.ingest_containers | train | def ingest_containers(self, containers=None):
"""
Transform the YAML into a dict with normalized keys
"""
containers = containers or self.stream or {}
output_containers = []
for container_name, definition in containers.items():
container = definition.copy()
container['name'] = container_name
output_containers.append(container)
return output_containers | python | {
"resource": ""
} |
q32252 | sha256 | train | def sha256(message, encoder=nacl.encoding.HexEncoder):
"""
Hashes ``message`` with SHA256.
:param message: The message to hash.
:type message: bytes
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes
"""
return encoder.encode(nacl.bindings.crypto_hash_sha256(message)) | python | {
"resource": ""
} |
q32253 | sha512 | train | def sha512(message, encoder=nacl.encoding.HexEncoder):
"""
Hashes ``message`` with SHA512.
:param message: The message to hash.
:type message: bytes
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes
"""
return encoder.encode(nacl.bindings.crypto_hash_sha512(message)) | python | {
"resource": ""
} |
q32254 | blake2b | train | def blake2b(data, digest_size=BLAKE2B_BYTES, key=b'',
salt=b'', person=b'',
encoder=nacl.encoding.HexEncoder):
"""
Hashes ``data`` with blake2b.
:param data: the digest input byte sequence
:type data: bytes
:param digest_size: the requested digest size; must be at most
:const:`BLAKE2B_BYTES_MAX`;
the default digest size is
:const:`BLAKE2B_BYTES`
:type digest_size: int
:param key: the key to be set for keyed MAC/PRF usage; if set, the key
must be at most :data:`~nacl.hash.BLAKE2B_KEYBYTES_MAX` long
:type key: bytes
:param salt: an initialization salt at most
:const:`BLAKE2B_SALTBYTES` long;
it will be zero-padded if needed
:type salt: bytes
:param person: a personalization string at most
:const:`BLAKE2B_PERSONALBYTES` long;
it will be zero-padded if needed
:type person: bytes
:param encoder: the encoder to use on returned digest
:type encoder: class
:returns: The hashed message.
:rtype: bytes
"""
digest = _b2b_hash(data, digest_size=digest_size, key=key,
salt=salt, person=person)
return encoder.encode(digest) | python | {
"resource": ""
} |
q32255 | siphash24 | train | def siphash24(message, key=b'', encoder=nacl.encoding.HexEncoder):
"""
Computes a keyed MAC of ``message`` using the short-input-optimized
siphash-2-4 construction.
:param message: The message to hash.
:type message: bytes
:param key: the message authentication key for the siphash MAC construct
:type key: bytes(:const:`SIPHASH_KEYBYTES`)
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes(:const:`SIPHASH_BYTES`)
"""
digest = _sip_hash(message, key)
return encoder.encode(digest) | python | {
"resource": ""
} |
q32256 | siphashx24 | train | def siphashx24(message, key=b'', encoder=nacl.encoding.HexEncoder):
"""
Computes a keyed MAC of ``message`` using the 128 bit variant of the
siphash-2-4 construction.
:param message: The message to hash.
:type message: bytes
:param key: the message authentication key for the siphash MAC construct
:type key: bytes(:const:`SIPHASHX_KEYBYTES`)
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes(:const:`SIPHASHX_BYTES`)
.. versionadded:: 1.2
"""
digest = _sip_hashx(message, key)
return encoder.encode(digest) | python | {
"resource": ""
} |
q32257 | crypto_sign_keypair | train | def crypto_sign_keypair():
"""
Returns a randomly generated public key and secret key.
:rtype: (bytes(public_key), bytes(secret_key))
"""
pk = ffi.new("unsigned char[]", crypto_sign_PUBLICKEYBYTES)
sk = ffi.new("unsigned char[]", crypto_sign_SECRETKEYBYTES)
rc = lib.crypto_sign_keypair(pk, sk)
ensure(rc == 0,
'Unexpected library error',
raising=exc.RuntimeError)
return (
ffi.buffer(pk, crypto_sign_PUBLICKEYBYTES)[:],
ffi.buffer(sk, crypto_sign_SECRETKEYBYTES)[:],
) | python | {
"resource": ""
} |
q32258 | crypto_sign_seed_keypair | train | def crypto_sign_seed_keypair(seed):
"""
Computes and returns the public key and secret key using the seed ``seed``.
:param seed: bytes
:rtype: (bytes(public_key), bytes(secret_key))
"""
if len(seed) != crypto_sign_SEEDBYTES:
raise exc.ValueError("Invalid seed")
pk = ffi.new("unsigned char[]", crypto_sign_PUBLICKEYBYTES)
sk = ffi.new("unsigned char[]", crypto_sign_SECRETKEYBYTES)
rc = lib.crypto_sign_seed_keypair(pk, sk, seed)
ensure(rc == 0,
'Unexpected library error',
raising=exc.RuntimeError)
return (
ffi.buffer(pk, crypto_sign_PUBLICKEYBYTES)[:],
ffi.buffer(sk, crypto_sign_SECRETKEYBYTES)[:],
) | python | {
"resource": ""
} |
q32259 | crypto_sign | train | def crypto_sign(message, sk):
"""
Signs the message ``message`` using the secret key ``sk`` and returns the
signed message.
:param message: bytes
:param sk: bytes
:rtype: bytes
"""
signed = ffi.new("unsigned char[]", len(message) + crypto_sign_BYTES)
signed_len = ffi.new("unsigned long long *")
rc = lib.crypto_sign(signed, signed_len, message, len(message), sk)
ensure(rc == 0,
'Unexpected library error',
raising=exc.RuntimeError)
return ffi.buffer(signed, signed_len[0])[:] | python | {
"resource": ""
} |
q32260 | crypto_sign_open | train | def crypto_sign_open(signed, pk):
"""
Verifies the signature of the signed message ``signed`` using the public
key ``pk`` and returns the unsigned message.
:param signed: bytes
:param pk: bytes
:rtype: bytes
"""
message = ffi.new("unsigned char[]", len(signed))
message_len = ffi.new("unsigned long long *")
if lib.crypto_sign_open(
message, message_len, signed, len(signed), pk) != 0:
raise exc.BadSignatureError("Signature was forged or corrupt")
return ffi.buffer(message, message_len[0])[:] | python | {
"resource": ""
} |
q32261 | crypto_sign_ed25519ph_update | train | def crypto_sign_ed25519ph_update(edph, pmsg):
"""
Update the hash state wrapped in edph
:param edph: the ed25519ph state being updated
:type edph: crypto_sign_ed25519ph_state
:param pmsg: the partial message
:type pmsg: bytes
:rtype: None
"""
ensure(isinstance(edph, crypto_sign_ed25519ph_state),
'edph parameter must be a ed25519ph_state object',
raising=exc.TypeError)
ensure(isinstance(pmsg, bytes),
'pmsg parameter must be a bytes object',
raising=exc.TypeError)
rc = lib.crypto_sign_ed25519ph_update(edph.state,
pmsg,
len(pmsg))
ensure(rc == 0,
'Unexpected library error',
raising=exc.RuntimeError) | python | {
"resource": ""
} |
q32262 | crypto_sign_ed25519ph_final_create | train | def crypto_sign_ed25519ph_final_create(edph,
sk):
"""
Create a signature for the data hashed in edph
using the secret key sk
:param edph: the ed25519ph state for the data
being signed
:type edph: crypto_sign_ed25519ph_state
:param sk: the ed25519 secret part of the signing key
:type sk: bytes
:return: ed25519ph signature
:rtype: bytes
"""
ensure(isinstance(edph, crypto_sign_ed25519ph_state),
'edph parameter must be a ed25519ph_state object',
raising=exc.TypeError)
ensure(isinstance(sk, bytes),
'secret key parameter must be a bytes object',
raising=exc.TypeError)
ensure(len(sk) == crypto_sign_SECRETKEYBYTES,
('secret key must be {0} '
'bytes long').format(crypto_sign_SECRETKEYBYTES),
raising=exc.TypeError)
signature = ffi.new("unsigned char[]", crypto_sign_BYTES)
rc = lib.crypto_sign_ed25519ph_final_create(edph.state,
signature,
ffi.NULL,
sk)
ensure(rc == 0,
'Unexpected library error',
raising=exc.RuntimeError)
return ffi.buffer(signature, crypto_sign_BYTES)[:] | python | {
"resource": ""
} |
q32263 | crypto_sign_ed25519ph_final_verify | train | def crypto_sign_ed25519ph_final_verify(edph,
signature,
pk):
"""
Verify a prehashed signature using the public key pk
:param edph: the ed25519ph state for the data
being verified
:type edph: crypto_sign_ed25519ph_state
:param signature: the signature being verified
:type signature: bytes
:param pk: the ed25519 public part of the signing key
:type pk: bytes
:return: True if the signature is valid
:rtype: boolean
:raises exc.BadSignatureError: if the signature is not valid
"""
ensure(isinstance(edph, crypto_sign_ed25519ph_state),
'edph parameter must be a ed25519ph_state object',
raising=exc.TypeError)
ensure(isinstance(signature, bytes),
'signature parameter must be a bytes object',
raising=exc.TypeError)
ensure(len(signature) == crypto_sign_BYTES,
('signature must be {0} '
'bytes long').format(crypto_sign_BYTES),
raising=exc.TypeError)
ensure(isinstance(pk, bytes),
'public key parameter must be a bytes object',
raising=exc.TypeError)
ensure(len(pk) == crypto_sign_PUBLICKEYBYTES,
('public key must be {0} '
'bytes long').format(crypto_sign_PUBLICKEYBYTES),
raising=exc.TypeError)
rc = lib.crypto_sign_ed25519ph_final_verify(edph.state,
signature,
pk)
if rc != 0:
raise exc.BadSignatureError("Signature was forged or corrupt")
return True | python | {
"resource": ""
} |
q32264 | kdf | train | def kdf(size, password, salt,
opslimit=OPSLIMIT_SENSITIVE,
memlimit=MEMLIMIT_SENSITIVE,
encoder=nacl.encoding.RawEncoder):
"""
Derive a ``size`` bytes long key from a caller-supplied
``password`` and ``salt`` pair using the argon2i
memory-hard construct.
the enclosing module provides the constants
- :py:const:`.OPSLIMIT_INTERACTIVE`
- :py:const:`.MEMLIMIT_INTERACTIVE`
- :py:const:`.OPSLIMIT_MODERATE`
- :py:const:`.MEMLIMIT_MODERATE`
- :py:const:`.OPSLIMIT_SENSITIVE`
- :py:const:`.MEMLIMIT_SENSITIVE`
as a guidance for correct settings.
:param size: derived key size, must be between
:py:const:`.BYTES_MIN` and
:py:const:`.BYTES_MAX`
:type size: int
:param password: password used to seed the key derivation procedure;
it length must be between
:py:const:`.PASSWD_MIN` and
:py:const:`.PASSWD_MAX`
:type password: bytes
:param salt: **RANDOM** salt used in the key derivation procedure;
its length must be exactly :py:const:`.SALTBYTES`
:type salt: bytes
:param opslimit: the time component (operation count)
of the key derivation procedure's computational cost;
it must be between
:py:const:`.OPSLIMIT_MIN` and
:py:const:`.OPSLIMIT_MAX`
:type opslimit: int
:param memlimit: the memory occupation component
of the key derivation procedure's computational cost;
it must be between
:py:const:`.MEMLIMIT_MIN` and
:py:const:`.MEMLIMIT_MAX`
:type memlimit: int
:rtype: bytes
.. versionadded:: 1.2
"""
return encoder.encode(
nacl.bindings.crypto_pwhash_alg(size, password, salt,
opslimit, memlimit,
ALG)
) | python | {
"resource": ""
} |
q32265 | str | train | def str(password,
opslimit=OPSLIMIT_INTERACTIVE,
memlimit=MEMLIMIT_INTERACTIVE):
"""
Hashes a password with a random salt, using the memory-hard
argon2i construct and returning an ascii string that has all
the needed info to check against a future password
The default settings for opslimit and memlimit are those deemed
correct for the interactive user login case.
:param bytes password:
:param int opslimit:
:param int memlimit:
:rtype: bytes
.. versionadded:: 1.2
"""
return nacl.bindings.crypto_pwhash_str_alg(password,
opslimit,
memlimit,
ALG) | python | {
"resource": ""
} |
q32266 | scrypt | train | def scrypt(password, salt='', n=2**20, r=8, p=1,
maxmem=2**25, dklen=64):
"""
Derive a cryptographic key using the scrypt KDF.
Implements the same signature as the ``hashlib.scrypt`` implemented
in cpython version 3.6
"""
return nacl.bindings.crypto_pwhash_scryptsalsa208sha256_ll(
password, salt, n, r, p, maxmem=maxmem, dklen=dklen) | python | {
"resource": ""
} |
q32267 | crypto_secretbox | train | def crypto_secretbox(message, nonce, key):
"""
Encrypts and returns the message ``message`` with the secret ``key`` and
the nonce ``nonce``.
:param message: bytes
:param nonce: bytes
:param key: bytes
:rtype: bytes
"""
if len(key) != crypto_secretbox_KEYBYTES:
raise exc.ValueError("Invalid key")
if len(nonce) != crypto_secretbox_NONCEBYTES:
raise exc.ValueError("Invalid nonce")
padded = b"\x00" * crypto_secretbox_ZEROBYTES + message
ciphertext = ffi.new("unsigned char[]", len(padded))
res = lib.crypto_secretbox(ciphertext, padded, len(padded), nonce, key)
ensure(res == 0, "Encryption failed", raising=exc.CryptoError)
ciphertext = ffi.buffer(ciphertext, len(padded))
return ciphertext[crypto_secretbox_BOXZEROBYTES:] | python | {
"resource": ""
} |
q32268 | _checkparams | train | def _checkparams(digest_size, key, salt, person):
"""Check hash paramters"""
ensure(isinstance(key, bytes),
'Key must be a bytes sequence',
raising=exc.TypeError)
ensure(isinstance(salt, bytes),
'Salt must be a bytes sequence',
raising=exc.TypeError)
ensure(isinstance(person, bytes),
'Person must be a bytes sequence',
raising=exc.TypeError)
ensure(isinstance(digest_size, integer_types),
'Digest size must be an integer number',
raising=exc.TypeError)
ensure(digest_size <= crypto_generichash_BYTES_MAX,
_TOOBIG.format("Digest_size", crypto_generichash_BYTES_MAX),
raising=exc.ValueError)
ensure(len(key) <= crypto_generichash_KEYBYTES_MAX,
_OVERLONG.format("Key", crypto_generichash_KEYBYTES_MAX),
raising=exc.ValueError)
ensure(len(salt) <= crypto_generichash_SALTBYTES,
_OVERLONG.format("Salt", crypto_generichash_SALTBYTES),
raising=exc.ValueError)
ensure(len(person) <= crypto_generichash_PERSONALBYTES,
_OVERLONG.format("Person", crypto_generichash_PERSONALBYTES),
raising=exc.ValueError) | python | {
"resource": ""
} |
q32269 | generichash_blake2b_salt_personal | train | def generichash_blake2b_salt_personal(data,
digest_size=crypto_generichash_BYTES,
key=b'', salt=b'', person=b''):
"""One shot hash interface
:param data: the input data to the hash function
:param digest_size: must be at most
:py:data:`.crypto_generichash_BYTES_MAX`;
the default digest size is
:py:data:`.crypto_generichash_BYTES`
:type digest_size: int
:param key: must be at most
:py:data:`.crypto_generichash_KEYBYTES_MAX` long
:type key: bytes
:param salt: must be at most
:py:data:`.crypto_generichash_SALTBYTES` long;
will be zero-padded if needed
:type salt: bytes
:param person: must be at most
:py:data:`.crypto_generichash_PERSONALBYTES` long:
will be zero-padded if needed
:type person: bytes
:return: digest_size long digest
:rtype: bytes
"""
_checkparams(digest_size, key, salt, person)
ensure(isinstance(data, bytes),
'Input data must be a bytes sequence',
raising=exc.TypeError)
digest = ffi.new("unsigned char[]", digest_size)
# both _salt and _personal must be zero-padded to the correct length
_salt = ffi.new("unsigned char []", crypto_generichash_SALTBYTES)
_person = ffi.new("unsigned char []", crypto_generichash_PERSONALBYTES)
ffi.memmove(_salt, salt, len(salt))
ffi.memmove(_person, person, len(person))
rc = lib.crypto_generichash_blake2b_salt_personal(digest, digest_size,
data, len(data),
key, len(key),
_salt, _person)
ensure(rc == 0, 'Unexpected failure',
raising=exc.RuntimeError)
return ffi.buffer(digest, digest_size)[:] | python | {
"resource": ""
} |
q32270 | generichash_blake2b_init | train | def generichash_blake2b_init(key=b'', salt=b'',
person=b'',
digest_size=crypto_generichash_BYTES):
"""
Create a new initialized blake2b hash state
:param key: must be at most
:py:data:`.crypto_generichash_KEYBYTES_MAX` long
:type key: bytes
:param salt: must be at most
:py:data:`.crypto_generichash_SALTBYTES` long;
will be zero-padded if needed
:type salt: bytes
:param person: must be at most
:py:data:`.crypto_generichash_PERSONALBYTES` long:
will be zero-padded if needed
:type person: bytes
:param digest_size: must be at most
:py:data:`.crypto_generichash_BYTES_MAX`;
the default digest size is
:py:data:`.crypto_generichash_BYTES`
:type digest_size: int
:return: a initialized :py:class:`.Blake2State`
:rtype: object
"""
_checkparams(digest_size, key, salt, person)
state = Blake2State(digest_size)
# both _salt and _personal must be zero-padded to the correct length
_salt = ffi.new("unsigned char []", crypto_generichash_SALTBYTES)
_person = ffi.new("unsigned char []", crypto_generichash_PERSONALBYTES)
ffi.memmove(_salt, salt, len(salt))
ffi.memmove(_person, person, len(person))
rc = lib.crypto_generichash_blake2b_init_salt_personal(state._statebuf,
key, len(key),
digest_size,
_salt, _person)
ensure(rc == 0, 'Unexpected failure',
raising=exc.RuntimeError)
return state | python | {
"resource": ""
} |
q32271 | generichash_blake2b_update | train | def generichash_blake2b_update(state, data):
"""Update the blake2b hash state
:param state: a initialized Blake2bState object as returned from
:py:func:`.crypto_generichash_blake2b_init`
:type state: :py:class:`.Blake2State`
:param data:
:type data: bytes
"""
ensure(isinstance(state, Blake2State),
'State must be a Blake2State object',
raising=exc.TypeError)
ensure(isinstance(data, bytes),
'Input data must be a bytes sequence',
raising=exc.TypeError)
rc = lib.crypto_generichash_blake2b_update(state._statebuf,
data, len(data))
ensure(rc == 0, 'Unexpected failure',
raising=exc.RuntimeError) | python | {
"resource": ""
} |
q32272 | generichash_blake2b_final | train | def generichash_blake2b_final(state):
"""Finalize the blake2b hash state and return the digest.
:param state: a initialized Blake2bState object as returned from
:py:func:`.crypto_generichash_blake2b_init`
:type state: :py:class:`.Blake2State`
:return: the blake2 digest of the passed-in data stream
:rtype: bytes
"""
ensure(isinstance(state, Blake2State),
'State must be a Blake2State object',
raising=exc.TypeError)
_digest = ffi.new("unsigned char[]", crypto_generichash_BYTES_MAX)
rc = lib.crypto_generichash_blake2b_final(state._statebuf,
_digest, state.digest_size)
ensure(rc == 0, 'Unexpected failure',
raising=exc.RuntimeError)
return ffi.buffer(_digest, state.digest_size)[:] | python | {
"resource": ""
} |
q32273 | crypto_secretstream_xchacha20poly1305_init_push | train | def crypto_secretstream_xchacha20poly1305_init_push(state, key):
"""
Initialize a crypto_secretstream_xchacha20poly1305 encryption buffer.
:param state: a secretstream state object
:type state: crypto_secretstream_xchacha20poly1305_state
:param key: must be
:data:`.crypto_secretstream_xchacha20poly1305_KEYBYTES` long
:type key: bytes
:return: header
:rtype: bytes
"""
ensure(
isinstance(state, crypto_secretstream_xchacha20poly1305_state),
'State must be a crypto_secretstream_xchacha20poly1305_state object',
raising=exc.TypeError,
)
ensure(
isinstance(key, bytes),
'Key must be a bytes sequence',
raising=exc.TypeError,
)
ensure(
len(key) == crypto_secretstream_xchacha20poly1305_KEYBYTES,
'Invalid key length',
raising=exc.ValueError,
)
headerbuf = ffi.new(
"unsigned char []",
crypto_secretstream_xchacha20poly1305_HEADERBYTES,
)
rc = lib.crypto_secretstream_xchacha20poly1305_init_push(
state.statebuf, headerbuf, key)
ensure(rc == 0, 'Unexpected failure', raising=exc.RuntimeError)
return ffi.buffer(headerbuf)[:] | python | {
"resource": ""
} |
q32274 | crypto_secretstream_xchacha20poly1305_push | train | def crypto_secretstream_xchacha20poly1305_push(
state,
m,
ad=None,
tag=crypto_secretstream_xchacha20poly1305_TAG_MESSAGE,
):
"""
Add an encrypted message to the secret stream.
:param state: a secretstream state object
:type state: crypto_secretstream_xchacha20poly1305_state
:param m: the message to encrypt, the maximum length of an individual
message is
:data:`.crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX`.
:type m: bytes
:param ad: additional data to include in the authentication tag
:type ad: bytes or None
:param tag: the message tag, usually
:data:`.crypto_secretstream_xchacha20poly1305_TAG_MESSAGE` or
:data:`.crypto_secretstream_xchacha20poly1305_TAG_FINAL`.
:type tag: int
:return: ciphertext
:rtype: bytes
"""
ensure(
isinstance(state, crypto_secretstream_xchacha20poly1305_state),
'State must be a crypto_secretstream_xchacha20poly1305_state object',
raising=exc.TypeError,
)
ensure(isinstance(m, bytes), 'Message is not bytes', raising=exc.TypeError)
ensure(
len(m) <= crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX,
'Message is too long',
raising=exc.ValueError,
)
ensure(
ad is None or isinstance(ad, bytes),
'Additional data must be bytes or None',
raising=exc.TypeError,
)
clen = len(m) + crypto_secretstream_xchacha20poly1305_ABYTES
if state.rawbuf is None or len(state.rawbuf) < clen:
state.rawbuf = ffi.new('unsigned char[]', clen)
if ad is None:
ad = ffi.NULL
adlen = 0
else:
adlen = len(ad)
rc = lib.crypto_secretstream_xchacha20poly1305_push(
state.statebuf,
state.rawbuf, ffi.NULL,
m, len(m),
ad, adlen,
tag,
)
ensure(rc == 0, 'Unexpected failure', raising=exc.RuntimeError)
return ffi.buffer(state.rawbuf, clen)[:] | python | {
"resource": ""
} |
q32275 | crypto_secretstream_xchacha20poly1305_init_pull | train | def crypto_secretstream_xchacha20poly1305_init_pull(state, header, key):
"""
Initialize a crypto_secretstream_xchacha20poly1305 decryption buffer.
:param state: a secretstream state object
:type state: crypto_secretstream_xchacha20poly1305_state
:param header: must be
:data:`.crypto_secretstream_xchacha20poly1305_HEADERBYTES` long
:type header: bytes
:param key: must be
:data:`.crypto_secretstream_xchacha20poly1305_KEYBYTES` long
:type key: bytes
"""
ensure(
isinstance(state, crypto_secretstream_xchacha20poly1305_state),
'State must be a crypto_secretstream_xchacha20poly1305_state object',
raising=exc.TypeError,
)
ensure(
isinstance(header, bytes),
'Header must be a bytes sequence',
raising=exc.TypeError,
)
ensure(
len(header) == crypto_secretstream_xchacha20poly1305_HEADERBYTES,
'Invalid header length',
raising=exc.ValueError,
)
ensure(
isinstance(key, bytes),
'Key must be a bytes sequence',
raising=exc.TypeError,
)
ensure(
len(key) == crypto_secretstream_xchacha20poly1305_KEYBYTES,
'Invalid key length',
raising=exc.ValueError,
)
if state.tagbuf is None:
state.tagbuf = ffi.new('unsigned char *')
rc = lib.crypto_secretstream_xchacha20poly1305_init_pull(
state.statebuf, header, key)
ensure(rc == 0, 'Unexpected failure', raising=exc.RuntimeError) | python | {
"resource": ""
} |
q32276 | crypto_secretstream_xchacha20poly1305_pull | train | def crypto_secretstream_xchacha20poly1305_pull(state, c, ad=None):
"""
Read a decrypted message from the secret stream.
:param state: a secretstream state object
:type state: crypto_secretstream_xchacha20poly1305_state
:param c: the ciphertext to decrypt, the maximum length of an individual
ciphertext is
:data:`.crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX` +
:data:`.crypto_secretstream_xchacha20poly1305_ABYTES`.
:type c: bytes
:param ad: additional data to include in the authentication tag
:type ad: bytes or None
:return: (message, tag)
:rtype: (bytes, int)
"""
ensure(
isinstance(state, crypto_secretstream_xchacha20poly1305_state),
'State must be a crypto_secretstream_xchacha20poly1305_state object',
raising=exc.TypeError,
)
ensure(
state.tagbuf is not None,
(
'State must be initialized using '
'crypto_secretstream_xchacha20poly1305_init_pull'
),
raising=exc.ValueError,
)
ensure(
isinstance(c, bytes),
'Ciphertext is not bytes',
raising=exc.TypeError,
)
ensure(
len(c) > crypto_secretstream_xchacha20poly1305_ABYTES,
'Ciphertext is too short',
raising=exc.ValueError,
)
ensure(
len(c) <= (
crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX +
crypto_secretstream_xchacha20poly1305_ABYTES
),
'Ciphertext is too long',
raising=exc.ValueError,
)
ensure(
ad is None or isinstance(ad, bytes),
'Additional data must be bytes or None',
raising=exc.TypeError,
)
mlen = len(c) - crypto_secretstream_xchacha20poly1305_ABYTES
if state.rawbuf is None or len(state.rawbuf) < mlen:
state.rawbuf = ffi.new('unsigned char[]', mlen)
if ad is None:
ad = ffi.NULL
adlen = 0
else:
adlen = len(ad)
rc = lib.crypto_secretstream_xchacha20poly1305_pull(
state.statebuf,
state.rawbuf, ffi.NULL,
state.tagbuf,
c, len(c),
ad, adlen,
)
ensure(rc == 0, 'Unexpected failure', raising=exc.RuntimeError)
return (ffi.buffer(state.rawbuf, mlen)[:], int(state.tagbuf[0])) | python | {
"resource": ""
} |
q32277 | crypto_secretstream_xchacha20poly1305_rekey | train | def crypto_secretstream_xchacha20poly1305_rekey(state):
"""
Explicitly change the encryption key in the stream.
Normally the stream is re-keyed as needed or an explicit ``tag`` of
:data:`.crypto_secretstream_xchacha20poly1305_TAG_REKEY` is added to a
message to ensure forward secrecy, but this method can be used instead
if the re-keying is controlled without adding the tag.
:param state: a secretstream state object
:type state: crypto_secretstream_xchacha20poly1305_state
"""
ensure(
isinstance(state, crypto_secretstream_xchacha20poly1305_state),
'State must be a crypto_secretstream_xchacha20poly1305_state object',
raising=exc.TypeError,
)
lib.crypto_secretstream_xchacha20poly1305_rekey(state.statebuf) | python | {
"resource": ""
} |
q32278 | verify | train | def verify(password_hash, password):
"""
Takes a modular crypt encoded stored password hash derived using one
of the algorithms supported by `libsodium` and checks if the user provided
password will hash to the same string when using the parameters saved
in the stored hash
"""
if password_hash.startswith(argon2id.STRPREFIX):
return argon2id.verify(password_hash, password)
elif password_hash.startswith(argon2i.STRPREFIX):
return argon2id.verify(password_hash, password)
elif password_hash.startswith(scrypt.STRPREFIX):
return scrypt.verify(password_hash, password)
else:
raise(CryptPrefixError("given password_hash is not "
"in a supported format"
)
) | python | {
"resource": ""
} |
q32279 | kdf | train | def kdf(size, password, salt,
opslimit=OPSLIMIT_SENSITIVE,
memlimit=MEMLIMIT_SENSITIVE,
encoder=nacl.encoding.RawEncoder):
"""
Derive a ``size`` bytes long key from a caller-supplied
``password`` and ``salt`` pair using the scryptsalsa208sha256
memory-hard construct.
the enclosing module provides the constants
- :py:const:`.OPSLIMIT_INTERACTIVE`
- :py:const:`.MEMLIMIT_INTERACTIVE`
- :py:const:`.OPSLIMIT_SENSITIVE`
- :py:const:`.MEMLIMIT_SENSITIVE`
- :py:const:`.OPSLIMIT_MODERATE`
- :py:const:`.MEMLIMIT_MODERATE`
as a guidance for correct settings respectively for the
interactive login and the long term key protecting sensitive data
use cases.
:param size: derived key size, must be between
:py:const:`.BYTES_MIN` and
:py:const:`.BYTES_MAX`
:type size: int
:param password: password used to seed the key derivation procedure;
it length must be between
:py:const:`.PASSWD_MIN` and
:py:const:`.PASSWD_MAX`
:type password: bytes
:param salt: **RANDOM** salt used in the key derivation procedure;
its length must be exactly :py:const:`.SALTBYTES`
:type salt: bytes
:param opslimit: the time component (operation count)
of the key derivation procedure's computational cost;
it must be between
:py:const:`.OPSLIMIT_MIN` and
:py:const:`.OPSLIMIT_MAX`
:type opslimit: int
:param memlimit: the memory occupation component
of the key derivation procedure's computational cost;
it must be between
:py:const:`.MEMLIMIT_MIN` and
:py:const:`.MEMLIMIT_MAX`
:type memlimit: int
:rtype: bytes
.. versionadded:: 1.2
"""
ensure(
len(salt) == SALTBYTES,
"The salt must be exactly %s, not %s bytes long" % (
SALTBYTES,
len(salt)
),
raising=exc.ValueError
)
n_log2, r, p = nacl.bindings.nacl_bindings_pick_scrypt_params(opslimit,
memlimit)
maxmem = memlimit + (2 ** 16)
return encoder.encode(
nacl.bindings.crypto_pwhash_scryptsalsa208sha256_ll(
password, salt, 2 ** n_log2, r, p, maxmem=maxmem, dklen=size)
) | python | {
"resource": ""
} |
q32280 | str | train | def str(password,
opslimit=OPSLIMIT_INTERACTIVE,
memlimit=MEMLIMIT_INTERACTIVE):
"""
Hashes a password with a random salt, using the memory-hard
scryptsalsa208sha256 construct and returning an ascii string
that has all the needed info to check against a future password
The default settings for opslimit and memlimit are those deemed
correct for the interactive user login case.
:param bytes password:
:param int opslimit:
:param int memlimit:
:rtype: bytes
.. versionadded:: 1.2
"""
return nacl.bindings.crypto_pwhash_scryptsalsa208sha256_str(password,
opslimit,
memlimit) | python | {
"resource": ""
} |
q32281 | verify | train | def verify(password_hash, password):
"""
Takes the output of scryptsalsa208sha256 and compares it against
a user provided password to see if they are the same
:param password_hash: bytes
:param password: bytes
:rtype: boolean
.. versionadded:: 1.2
"""
ensure(len(password_hash) == PWHASH_SIZE,
"The password hash must be exactly %s bytes long" %
nacl.bindings.crypto_pwhash_scryptsalsa208sha256_STRBYTES,
raising=exc.ValueError)
return nacl.bindings.crypto_pwhash_scryptsalsa208sha256_str_verify(
password_hash, password
) | python | {
"resource": ""
} |
q32282 | PrivateKey.from_seed | train | def from_seed(cls, seed, encoder=encoding.RawEncoder):
"""
Generate a PrivateKey using a deterministic construction
starting from a caller-provided seed
.. warning:: The seed **must** be high-entropy; therefore,
its generator **must** be a cryptographic quality
random function like, for example, :func:`~nacl.utils.random`.
.. warning:: The seed **must** be protected and remain secret.
Anyone who knows the seed is really in possession of
the corresponding PrivateKey.
:param seed: The seed used to generate the private key
:rtype: :class:`~nacl.public.PrivateKey`
"""
# decode the seed
seed = encoder.decode(seed)
# Verify the given seed type and size are correct
if not (isinstance(seed, bytes) and len(seed) == cls.SEED_SIZE):
raise exc.TypeError(("PrivateKey seed must be a {0} bytes long "
"binary sequence").format(cls.SEED_SIZE)
)
# generate a raw keypair from the given seed
raw_pk, raw_sk = nacl.bindings.crypto_box_seed_keypair(seed)
# construct a instance from the raw secret key
return cls(raw_sk) | python | {
"resource": ""
} |
q32283 | SealedBox.encrypt | train | def encrypt(self, plaintext, encoder=encoding.RawEncoder):
"""
Encrypts the plaintext message using a random-generated ephemeral
keypair and returns a "composed ciphertext", containing both
the public part of the keypair and the ciphertext proper,
encoded with the encoder.
The private part of the ephemeral key-pair will be scrubbed before
returning the ciphertext, therefore, the sender will not be able to
decrypt the generated ciphertext.
:param plaintext: [:class:`bytes`] The plaintext message to encrypt
:param encoder: The encoder to use to encode the ciphertext
:return bytes: encoded ciphertext
"""
ciphertext = nacl.bindings.crypto_box_seal(
plaintext,
self._public_key
)
encoded_ciphertext = encoder.encode(ciphertext)
return encoded_ciphertext | python | {
"resource": ""
} |
q32284 | SealedBox.decrypt | train | def decrypt(self, ciphertext, encoder=encoding.RawEncoder):
"""
Decrypts the ciphertext using the ephemeral public key enclosed
in the ciphertext and the SealedBox private key, returning
the plaintext message.
:param ciphertext: [:class:`bytes`] The encrypted message to decrypt
:param encoder: The encoder used to decode the ciphertext.
:return bytes: The original plaintext
"""
# Decode our ciphertext
ciphertext = encoder.decode(ciphertext)
plaintext = nacl.bindings.crypto_box_seal_open(
ciphertext,
self._public_key,
self._private_key,
)
return plaintext | python | {
"resource": ""
} |
q32285 | release | train | def release(ctx, version):
"""
``version`` should be a string like '0.4' or '1.0'.
"""
invoke.run("git tag -s {0} -m '{0} release'".format(version))
invoke.run("git push --tags")
invoke.run("python setup.py sdist")
invoke.run("twine upload -s dist/PyNaCl-{0}* ".format(version))
session = requests.Session()
token = getpass.getpass("Input the Jenkins token: ")
response = session.post(
"{0}/build".format(JENKINS_URL),
params={
"cause": "Building wheels for {0}".format(version),
"token": token
}
)
response.raise_for_status()
wait_for_build_completed(session)
paths = download_artifacts(session)
invoke.run("twine upload {0}".format(" ".join(paths))) | python | {
"resource": ""
} |
q32286 | randombytes | train | def randombytes(size):
"""
Returns ``size`` number of random bytes from a cryptographically secure
random source.
:param size: int
:rtype: bytes
"""
buf = ffi.new("unsigned char[]", size)
lib.randombytes(buf, size)
return ffi.buffer(buf, size)[:] | python | {
"resource": ""
} |
q32287 | crypto_scalarmult_base | train | def crypto_scalarmult_base(n):
"""
Computes and returns the scalar product of a standard group element and an
integer ``n``.
:param n: bytes
:rtype: bytes
"""
q = ffi.new("unsigned char[]", crypto_scalarmult_BYTES)
rc = lib.crypto_scalarmult_base(q, n)
ensure(rc == 0,
'Unexpected library error',
raising=exc.RuntimeError)
return ffi.buffer(q, crypto_scalarmult_SCALARBYTES)[:] | python | {
"resource": ""
} |
q32288 | crypto_scalarmult_ed25519_base | train | def crypto_scalarmult_ed25519_base(n):
"""
Computes and returns the scalar product of a standard group element and an
integer ``n`` on the edwards25519 curve.
:param n: a :py:data:`.crypto_scalarmult_ed25519_SCALARBYTES` long bytes
sequence representing a scalar
:type n: bytes
:return: a point on the edwards25519 curve, represented as a
:py:data:`.crypto_scalarmult_ed25519_BYTES` long bytes sequence
:rtype: bytes
"""
ensure(isinstance(n, bytes) and
len(n) == crypto_scalarmult_ed25519_SCALARBYTES,
'Input must be a {} long bytes sequence'.format(
'crypto_scalarmult_ed25519_SCALARBYTES'),
raising=exc.TypeError)
q = ffi.new("unsigned char[]", crypto_scalarmult_ed25519_BYTES)
rc = lib.crypto_scalarmult_ed25519_base(q, n)
ensure(rc == 0,
'Unexpected library error',
raising=exc.RuntimeError)
return ffi.buffer(q, crypto_scalarmult_ed25519_BYTES)[:] | python | {
"resource": ""
} |
q32289 | nacl_bindings_pick_scrypt_params | train | def nacl_bindings_pick_scrypt_params(opslimit, memlimit):
"""Python implementation of libsodium's pickparams"""
if opslimit < 32768:
opslimit = 32768
r = 8
if opslimit < (memlimit // 32):
p = 1
maxn = opslimit // (4 * r)
for n_log2 in range(1, 63): # pragma: no branch
if (2 ** n_log2) > (maxn // 2):
break
else:
maxn = memlimit // (r * 128)
for n_log2 in range(1, 63): # pragma: no branch
if (2 ** n_log2) > maxn // 2:
break
maxrp = (opslimit // 4) // (2 ** n_log2)
if maxrp > 0x3fffffff: # pragma: no cover
maxrp = 0x3fffffff
p = maxrp // r
return n_log2, r, p | python | {
"resource": ""
} |
q32290 | crypto_pwhash_scryptsalsa208sha256_ll | train | def crypto_pwhash_scryptsalsa208sha256_ll(passwd, salt, n, r, p, dklen=64,
maxmem=SCRYPT_MAX_MEM):
"""
Derive a cryptographic key using the ``passwd`` and ``salt``
given as input.
The work factor can be tuned by by picking different
values for the parameters
:param bytes passwd:
:param bytes salt:
:param bytes salt: *must* be *exactly* :py:const:`.SALTBYTES` long
:param int dklen:
:param int opslimit:
:param int n:
:param int r: block size,
:param int p: the parallelism factor
:param int maxmem: the maximum available memory available for scrypt's
operations
:rtype: bytes
"""
ensure(isinstance(n, integer_types),
raising=TypeError)
ensure(isinstance(r, integer_types),
raising=TypeError)
ensure(isinstance(p, integer_types),
raising=TypeError)
ensure(isinstance(passwd, bytes),
raising=TypeError)
ensure(isinstance(salt, bytes),
raising=TypeError)
_check_memory_occupation(n, r, p, maxmem)
buf = ffi.new("uint8_t[]", dklen)
ret = lib.crypto_pwhash_scryptsalsa208sha256_ll(passwd, len(passwd),
salt, len(salt),
n, r, p,
buf, dklen)
ensure(ret == 0, 'Unexpected failure in key derivation',
raising=exc.RuntimeError)
return ffi.buffer(ffi.cast("char *", buf), dklen)[:] | python | {
"resource": ""
} |
q32291 | crypto_pwhash_scryptsalsa208sha256_str | train | def crypto_pwhash_scryptsalsa208sha256_str(
passwd, opslimit=SCRYPT_OPSLIMIT_INTERACTIVE,
memlimit=SCRYPT_MEMLIMIT_INTERACTIVE):
"""
Derive a cryptographic key using the ``passwd`` and ``salt``
given as input, returning a string representation which includes
the salt and the tuning parameters.
The returned string can be directly stored as a password hash.
See :py:func:`.crypto_pwhash_scryptsalsa208sha256` for a short
discussion about ``opslimit`` and ``memlimit`` values.
:param bytes passwd:
:param int opslimit:
:param int memlimit:
:return: serialized key hash, including salt and tuning parameters
:rtype: bytes
"""
buf = ffi.new("char[]", SCRYPT_STRBYTES)
ret = lib.crypto_pwhash_scryptsalsa208sha256_str(buf, passwd,
len(passwd),
opslimit,
memlimit)
ensure(ret == 0, 'Unexpected failure in password hashing',
raising=exc.RuntimeError)
return ffi.string(buf) | python | {
"resource": ""
} |
q32292 | crypto_pwhash_scryptsalsa208sha256_str_verify | train | def crypto_pwhash_scryptsalsa208sha256_str_verify(passwd_hash, passwd):
"""
Verifies the ``passwd`` against the ``passwd_hash`` that was generated.
Returns True or False depending on the success
:param passwd_hash: bytes
:param passwd: bytes
:rtype: boolean
"""
ensure(len(passwd_hash) == SCRYPT_STRBYTES - 1, 'Invalid password hash',
raising=exc.ValueError)
ret = lib.crypto_pwhash_scryptsalsa208sha256_str_verify(passwd_hash,
passwd,
len(passwd))
ensure(ret == 0,
"Wrong password",
raising=exc.InvalidkeyError)
# all went well, therefore:
return True | python | {
"resource": ""
} |
q32293 | crypto_pwhash_alg | train | def crypto_pwhash_alg(outlen, passwd, salt, opslimit, memlimit, alg):
"""
Derive a raw cryptographic key using the ``passwd`` and the ``salt``
given as input to the ``alg`` algorithm.
:param outlen: the length of the derived key
:type outlen: int
:param passwd: The input password
:type passwd: bytes
:param opslimit: computational cost
:type opslimit: int
:param memlimit: memory cost
:type memlimit: int
:param alg: algorithm identifier
:type alg: int
:return: derived key
:rtype: bytes
"""
ensure(isinstance(outlen, integer_types),
raising=exc.TypeError)
ensure(isinstance(opslimit, integer_types),
raising=exc.TypeError)
ensure(isinstance(memlimit, integer_types),
raising=exc.TypeError)
ensure(isinstance(alg, integer_types),
raising=exc.TypeError)
ensure(isinstance(passwd, bytes),
raising=exc.TypeError)
if len(salt) != crypto_pwhash_SALTBYTES:
raise exc.ValueError("salt must be exactly {0} bytes long".format(
crypto_pwhash_SALTBYTES))
if outlen < crypto_pwhash_BYTES_MIN:
raise exc.ValueError(
'derived key must be at least {0} bytes long'.format(
crypto_pwhash_BYTES_MIN))
elif outlen > crypto_pwhash_BYTES_MAX:
raise exc.ValueError(
'derived key must be at most {0} bytes long'.format(
crypto_pwhash_BYTES_MAX))
_check_argon2_limits_alg(opslimit, memlimit, alg)
outbuf = ffi.new("unsigned char[]", outlen)
ret = lib.crypto_pwhash(outbuf, outlen, passwd, len(passwd),
salt, opslimit, memlimit, alg)
ensure(ret == 0, 'Unexpected failure in key derivation',
raising=exc.RuntimeError)
return ffi.buffer(outbuf, outlen)[:] | python | {
"resource": ""
} |
q32294 | crypto_pwhash_str_alg | train | def crypto_pwhash_str_alg(passwd, opslimit, memlimit, alg):
"""
Derive a cryptographic key using the ``passwd`` given as input
and a random ``salt``, returning a string representation which
includes the salt, the tuning parameters and the used algorithm.
:param passwd: The input password
:type passwd: bytes
:param opslimit: computational cost
:type opslimit: int
:param memlimit: memory cost
:type memlimit: int
:param alg: The algorithm to use
:type alg: int
:return: serialized derived key and parameters
:rtype: bytes
"""
ensure(isinstance(opslimit, integer_types),
raising=TypeError)
ensure(isinstance(memlimit, integer_types),
raising=TypeError)
ensure(isinstance(passwd, bytes),
raising=TypeError)
_check_argon2_limits_alg(opslimit, memlimit, alg)
outbuf = ffi.new("char[]", 128)
ret = lib.crypto_pwhash_str_alg(outbuf, passwd, len(passwd),
opslimit, memlimit, alg)
ensure(ret == 0, 'Unexpected failure in key derivation',
raising=exc.RuntimeError)
return ffi.string(outbuf) | python | {
"resource": ""
} |
q32295 | crypto_pwhash_str_verify | train | def crypto_pwhash_str_verify(passwd_hash, passwd):
"""
Verifies the ``passwd`` against a given password hash.
Returns True on success, raises InvalidkeyError on failure
:param passwd_hash: saved password hash
:type passwd_hash: bytes
:param passwd: password to be checked
:type passwd: bytes
:return: success
:rtype: boolean
"""
ensure(isinstance(passwd_hash, bytes),
raising=TypeError)
ensure(isinstance(passwd, bytes),
raising=TypeError)
ensure(len(passwd_hash) <= 127,
"Hash must be at most 127 bytes long",
raising=exc.ValueError)
ret = lib.crypto_pwhash_str_verify(passwd_hash, passwd, len(passwd))
ensure(ret == 0,
"Wrong password",
raising=exc.InvalidkeyError)
# all went well, therefore:
return True | python | {
"resource": ""
} |
q32296 | crypto_aead_chacha20poly1305_ietf_encrypt | train | def crypto_aead_chacha20poly1305_ietf_encrypt(message, aad, nonce, key):
"""
Encrypt the given ``message`` using the IETF ratified chacha20poly1305
construction described in RFC7539.
:param message:
:type message: bytes
:param aad:
:type aad: bytes
:param nonce:
:type nonce: bytes
:param key:
:type key: bytes
:return: authenticated ciphertext
:rtype: bytes
"""
ensure(isinstance(message, bytes), 'Input message type must be bytes',
raising=exc.TypeError)
mlen = len(message)
ensure(mlen <= crypto_aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX,
'Message must be at most {0} bytes long'.format(
crypto_aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX),
raising=exc.ValueError)
ensure(isinstance(aad, bytes) or (aad is None),
'Additional data must be bytes or None',
raising=exc.TypeError)
ensure(isinstance(nonce, bytes) and
len(nonce) == crypto_aead_chacha20poly1305_ietf_NPUBBYTES,
'Nonce must be a {0} bytes long bytes sequence'.format(
crypto_aead_chacha20poly1305_ietf_NPUBBYTES),
raising=exc.TypeError)
ensure(isinstance(key, bytes) and
len(key) == crypto_aead_chacha20poly1305_ietf_KEYBYTES,
'Key must be a {0} bytes long bytes sequence'.format(
crypto_aead_chacha20poly1305_ietf_KEYBYTES),
raising=exc.TypeError)
if aad:
_aad = aad
aalen = len(aad)
else:
_aad = ffi.NULL
aalen = 0
mxout = mlen + crypto_aead_chacha20poly1305_ietf_ABYTES
clen = ffi.new("unsigned long long *")
ciphertext = ffi.new("unsigned char[]", mxout)
res = lib.crypto_aead_chacha20poly1305_ietf_encrypt(ciphertext,
clen,
message,
mlen,
_aad,
aalen,
ffi.NULL,
nonce,
key)
ensure(res == 0, "Encryption failed.", raising=exc.CryptoError)
return ffi.buffer(ciphertext, clen[0])[:] | python | {
"resource": ""
} |
q32297 | crypto_aead_chacha20poly1305_encrypt | train | def crypto_aead_chacha20poly1305_encrypt(message, aad, nonce, key):
"""
Encrypt the given ``message`` using the "legacy" construction
described in draft-agl-tls-chacha20poly1305.
:param message:
:type message: bytes
:param aad:
:type aad: bytes
:param nonce:
:type nonce: bytes
:param key:
:type key: bytes
:return: authenticated ciphertext
:rtype: bytes
"""
ensure(isinstance(message, bytes), 'Input message type must be bytes',
raising=exc.TypeError)
mlen = len(message)
ensure(mlen <= crypto_aead_chacha20poly1305_MESSAGEBYTES_MAX,
'Message must be at most {0} bytes long'.format(
crypto_aead_chacha20poly1305_MESSAGEBYTES_MAX),
raising=exc.ValueError)
ensure(isinstance(aad, bytes) or (aad is None),
'Additional data must be bytes or None',
raising=exc.TypeError)
ensure(isinstance(nonce, bytes) and
len(nonce) == crypto_aead_chacha20poly1305_NPUBBYTES,
'Nonce must be a {0} bytes long bytes sequence'.format(
crypto_aead_chacha20poly1305_NPUBBYTES),
raising=exc.TypeError)
ensure(isinstance(key, bytes) and
len(key) == crypto_aead_chacha20poly1305_KEYBYTES,
'Key must be a {0} bytes long bytes sequence'.format(
crypto_aead_chacha20poly1305_KEYBYTES),
raising=exc.TypeError)
if aad:
_aad = aad
aalen = len(aad)
else:
_aad = ffi.NULL
aalen = 0
mlen = len(message)
mxout = mlen + crypto_aead_chacha20poly1305_ietf_ABYTES
clen = ffi.new("unsigned long long *")
ciphertext = ffi.new("unsigned char[]", mxout)
res = lib.crypto_aead_chacha20poly1305_encrypt(ciphertext,
clen,
message,
mlen,
_aad,
aalen,
ffi.NULL,
nonce,
key)
ensure(res == 0, "Encryption failed.", raising=exc.CryptoError)
return ffi.buffer(ciphertext, clen[0])[:] | python | {
"resource": ""
} |
q32298 | crypto_aead_xchacha20poly1305_ietf_encrypt | train | def crypto_aead_xchacha20poly1305_ietf_encrypt(message, aad, nonce, key):
"""
Encrypt the given ``message`` using the long-nonces xchacha20poly1305
construction.
:param message:
:type message: bytes
:param aad:
:type aad: bytes
:param nonce:
:type nonce: bytes
:param key:
:type key: bytes
:return: authenticated ciphertext
:rtype: bytes
"""
ensure(isinstance(message, bytes), 'Input message type must be bytes',
raising=exc.TypeError)
mlen = len(message)
ensure(mlen <= crypto_aead_xchacha20poly1305_ietf_MESSAGEBYTES_MAX,
'Message must be at most {0} bytes long'.format(
crypto_aead_xchacha20poly1305_ietf_MESSAGEBYTES_MAX),
raising=exc.ValueError)
ensure(isinstance(aad, bytes) or (aad is None),
'Additional data must be bytes or None',
raising=exc.TypeError)
ensure(isinstance(nonce, bytes) and
len(nonce) == crypto_aead_xchacha20poly1305_ietf_NPUBBYTES,
'Nonce must be a {0} bytes long bytes sequence'.format(
crypto_aead_xchacha20poly1305_ietf_NPUBBYTES),
raising=exc.TypeError)
ensure(isinstance(key, bytes) and
len(key) == crypto_aead_xchacha20poly1305_ietf_KEYBYTES,
'Key must be a {0} bytes long bytes sequence'.format(
crypto_aead_xchacha20poly1305_ietf_KEYBYTES),
raising=exc.TypeError)
if aad:
_aad = aad
aalen = len(aad)
else:
_aad = ffi.NULL
aalen = 0
mlen = len(message)
mxout = mlen + crypto_aead_xchacha20poly1305_ietf_ABYTES
clen = ffi.new("unsigned long long *")
ciphertext = ffi.new("unsigned char[]", mxout)
res = lib.crypto_aead_xchacha20poly1305_ietf_encrypt(ciphertext,
clen,
message,
mlen,
_aad,
aalen,
ffi.NULL,
nonce,
key)
ensure(res == 0, "Encryption failed.", raising=exc.CryptoError)
return ffi.buffer(ciphertext, clen[0])[:] | python | {
"resource": ""
} |
q32299 | crypto_core_ed25519_is_valid_point | train | def crypto_core_ed25519_is_valid_point(p):
"""
Check if ``p`` represents a point on the edwards25519 curve, in canonical
form, on the main subgroup, and that the point doesn't have a small order.
:param p: a :py:data:`.crypto_core_ed25519_BYTES` long bytes sequence
representing a point on the edwards25519 curve
:type p: bytes
:return: point validity
:rtype: bool
"""
ensure(isinstance(p, bytes) and len(p) == crypto_core_ed25519_BYTES,
'Point must be a crypto_core_ed25519_BYTES long bytes sequence',
raising=exc.TypeError)
rc = lib.crypto_core_ed25519_is_valid_point(p)
return rc == 1 | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.