source_code
stringlengths 45
7.24k
| function_name
stringlengths 1
111
| messages
list |
|---|---|---|
def x(self, publish, names=False, name_only=False):
if names:
for name in names:
if not name_only and isinstance(name, re._pattern_type):
if re.match(name, publish.name):
return True
else:
operand = name if name_only else [name, './%s' % name]
if publish in operand:
return True
return False
else:
return True
|
publish_match
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, publish, names=False, name_only=False):\n if names:\n for name in names:\n if not name_only and isinstance(name, re._pattern_type):\n if re.match(name, publish.name):\n return True\n else:\n operand = name if name_only else [name, './%s' % name]\n if publish in operand:\n return True\n return False\n else:\n return True\n",
"role": "user"
},
{
"content": "publish_match",
"role": "assistant"
}
] |
def x(config, client, fill_repo=False, components=[]):
repo_dict = {}
publish_dict = {}
for origin in ['repo', 'mirror']:
for name, repo in config.get(origin, {}).items():
if components and repo.get('component') not in components:
continue
if fill_repo and origin == 'repo':
packages = Publish._get_packages('repos', name)
repo_dict[name] = packages
for distribution in repo.get('distributions'):
publish_name = str.join('/', distribution.split('/')[:-1])
publish_dict[publish_name, repo.get('component')] = name
return repo_dict, publish_dict
|
get_repo_information
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(config, client, fill_repo=False, components=[]):\n repo_dict = {}\n publish_dict = {}\n for origin in ['repo', 'mirror']:\n for name, repo in config.get(origin, {}).items():\n if components and repo.get('component') not in components:\n continue\n if fill_repo and origin == 'repo':\n packages = Publish._get_packages('repos', name)\n repo_dict[name] = packages\n for distribution in repo.get('distributions'):\n publish_name = str.join('/', distribution.split('/')[:-1])\n publish_dict[publish_name, repo.get('component')] = name\n return repo_dict, publish_dict\n",
"role": "user"
},
{
"content": "get_repo_information",
"role": "assistant"
}
] |
def x(self, other, components=[]):
lg.debug('Comparing publish %s (%s) and %s (%s)' % (self.name, self.
storage or 'local', other.name, other.storage or 'local'))
diff, equal = {}, {}
for component, snapshots in self.components.items():
if component not in list(other.components.keys()):
diff[component] = snapshots
continue
equal_snapshots = list(set(snapshots).intersection(other.components
[component]))
if equal_snapshots:
lg.debug('Equal snapshots for %s: %s' % (component,
equal_snapshots))
equal[component] = equal_snapshots
diff_snapshots = list(set(snapshots).difference(other.components[
component]))
if diff_snapshots:
lg.debug('Different snapshots for %s: %s' % (component,
diff_snapshots))
diff[component] = diff_snapshots
return diff, equal
|
compare
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, other, components=[]):\n lg.debug('Comparing publish %s (%s) and %s (%s)' % (self.name, self.\n storage or 'local', other.name, other.storage or 'local'))\n diff, equal = {}, {}\n for component, snapshots in self.components.items():\n if component not in list(other.components.keys()):\n diff[component] = snapshots\n continue\n equal_snapshots = list(set(snapshots).intersection(other.components\n [component]))\n if equal_snapshots:\n lg.debug('Equal snapshots for %s: %s' % (component,\n equal_snapshots))\n equal[component] = equal_snapshots\n diff_snapshots = list(set(snapshots).difference(other.components[\n component]))\n if diff_snapshots:\n lg.debug('Different snapshots for %s: %s' % (component,\n diff_snapshots))\n diff[component] = diff_snapshots\n return diff, equal\n",
"role": "user"
},
{
"content": "compare",
"role": "assistant"
}
] |
def x(self):
publishes = self._get_publishes(self.client)
for publish in publishes:
if publish['Distribution'] == self.distribution and publish['Prefix'
].replace('/', '_') == (self.prefix or '.') and publish['Storage'
] == self.storage:
return publish
raise NoSuchPublish('Publish %s (%s) does not exist' % (self.name, self
.storage or 'local'))
|
get_publish
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n publishes = self._get_publishes(self.client)\n for publish in publishes:\n if publish['Distribution'] == self.distribution and publish['Prefix'\n ].replace('/', '_') == (self.prefix or '.') and publish['Storage'\n ] == self.storage:\n return publish\n raise NoSuchPublish('Publish %s (%s) does not exist' % (self.name, self\n .storage or 'local'))\n",
"role": "user"
},
{
"content": "get_publish",
"role": "assistant"
}
] |
def x(self, save_path):
timestamp = time.strftime('%Y%m%d%H%M%S')
yaml_dict = {}
yaml_dict['publish'] = self.name
yaml_dict['name'] = timestamp
yaml_dict['components'] = []
yaml_dict['storage'] = self.storage
for component, snapshots in self.components.items():
packages = self.get_packages(component)
package_dict = []
for package in packages:
arch, name, version, ref = self.parse_package_ref(package)
package_dict.append({'package': name, 'version': version,
'arch': arch, 'ref': ref})
snapshot = self._find_snapshot(snapshots[0])
yaml_dict['components'].append({'component': component, 'snapshot':
snapshot['Name'], 'description': snapshot['Description'],
'packages': package_dict})
name = self.name.replace('/', '-')
lg.info('Saving publish %s in %s' % (name, save_path))
with open(save_path, 'w') as save_file:
yaml.dump(yaml_dict, save_file, default_flow_style=False)
|
save_publish
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, save_path):\n timestamp = time.strftime('%Y%m%d%H%M%S')\n yaml_dict = {}\n yaml_dict['publish'] = self.name\n yaml_dict['name'] = timestamp\n yaml_dict['components'] = []\n yaml_dict['storage'] = self.storage\n for component, snapshots in self.components.items():\n packages = self.get_packages(component)\n package_dict = []\n for package in packages:\n arch, name, version, ref = self.parse_package_ref(package)\n package_dict.append({'package': name, 'version': version,\n 'arch': arch, 'ref': ref})\n snapshot = self._find_snapshot(snapshots[0])\n yaml_dict['components'].append({'component': component, 'snapshot':\n snapshot['Name'], 'description': snapshot['Description'],\n 'packages': package_dict})\n name = self.name.replace('/', '-')\n lg.info('Saving publish %s in %s' % (name, save_path))\n with open(save_path, 'w') as save_file:\n yaml.dump(yaml_dict, save_file, default_flow_style=False)\n",
"role": "user"
},
{
"content": "save_publish",
"role": "assistant"
}
] |
def x(self, config, components, recreate=False):
if 'all' in components:
components = []
try:
self.load()
publish = True
except NoSuchPublish:
publish = False
new_publish_snapshots = []
to_publish = []
created_snapshots = []
for saved_component in config.get('components', []):
component_name = saved_component.get('component')
if not component_name:
raise Exception('Corrupted file')
if components and component_name not in components:
continue
saved_packages = []
if not saved_component.get('packages'):
raise Exception('Component %s is empty' % component_name)
for package in saved_component.get('packages'):
package_ref = '{} {} {} {}'.format(package.get('arch'), package
.get('package'), package.get('version'), package.get('ref'))
saved_packages.append(package_ref)
to_publish.append(component_name)
timestamp = time.strftime('%Y%m%d%H%M%S')
snapshot_name = '{}-{}-{}'.format('restored', timestamp,
saved_component.get('snapshot'))
lg.debug('Creating snapshot %s for component %s of packages: %s' %
(snapshot_name, component_name, saved_packages))
try:
self.client.do_post('/snapshots', data={'Name': snapshot_name,
'SourceSnapshots': [], 'Description': saved_component.get(
'description'), 'PackageRefs': saved_packages})
created_snapshots.append(snapshot_name)
except AptlyException as e:
if e.res.status_code == 404:
self._remove_snapshots(created_snapshots)
raise Exception("Source snapshot or packages don't exist")
else:
raise
new_publish_snapshots.append({'Component': component_name, 'Name':
snapshot_name})
if components:
self.publish_snapshots = [x for x in self.publish_snapshots if x[
'Component'] not in components and x['Component'] not in to_publish
]
check_components = [x for x in new_publish_snapshots if x[
'Component'] in components]
if len(check_components) != len(components):
self._remove_snapshots(created_snapshots)
raise Exception(
'Not possible to find all the components required in the backup file'
)
self.publish_snapshots += new_publish_snapshots
self.do_publish(recreate=recreate, merge_snapshots=False)
|
restore_publish
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, config, components, recreate=False):\n if 'all' in components:\n components = []\n try:\n self.load()\n publish = True\n except NoSuchPublish:\n publish = False\n new_publish_snapshots = []\n to_publish = []\n created_snapshots = []\n for saved_component in config.get('components', []):\n component_name = saved_component.get('component')\n if not component_name:\n raise Exception('Corrupted file')\n if components and component_name not in components:\n continue\n saved_packages = []\n if not saved_component.get('packages'):\n raise Exception('Component %s is empty' % component_name)\n for package in saved_component.get('packages'):\n package_ref = '{} {} {} {}'.format(package.get('arch'), package\n .get('package'), package.get('version'), package.get('ref'))\n saved_packages.append(package_ref)\n to_publish.append(component_name)\n timestamp = time.strftime('%Y%m%d%H%M%S')\n snapshot_name = '{}-{}-{}'.format('restored', timestamp,\n saved_component.get('snapshot'))\n lg.debug('Creating snapshot %s for component %s of packages: %s' %\n (snapshot_name, component_name, saved_packages))\n try:\n self.client.do_post('/snapshots', data={'Name': snapshot_name,\n 'SourceSnapshots': [], 'Description': saved_component.get(\n 'description'), 'PackageRefs': saved_packages})\n created_snapshots.append(snapshot_name)\n except AptlyException as e:\n if e.res.status_code == 404:\n self._remove_snapshots(created_snapshots)\n raise Exception(\"Source snapshot or packages don't exist\")\n else:\n raise\n new_publish_snapshots.append({'Component': component_name, 'Name':\n snapshot_name})\n if components:\n self.publish_snapshots = [x for x in self.publish_snapshots if x[\n 'Component'] not in components and x['Component'] not in to_publish\n ]\n check_components = [x for x in new_publish_snapshots if x[\n 'Component'] in components]\n if len(check_components) != len(components):\n self._remove_snapshots(created_snapshots)\n raise Exception(\n 'Not possible to find all the components required in the backup file'\n )\n self.publish_snapshots += new_publish_snapshots\n self.do_publish(recreate=recreate, merge_snapshots=False)\n",
"role": "user"
},
{
"content": "restore_publish",
"role": "assistant"
}
] |
def x(self):
publish = self._get_publish()
self.architectures = publish['Architectures']
for source in publish['Sources']:
component = source['Component']
snapshot = source['Name']
self.publish_snapshots.append({'Component': component, 'Name':
snapshot})
snapshot_remote = self._find_snapshot(snapshot)
for source in self._get_source_snapshots(snapshot_remote,
fallback_self=True):
self.add(source, component)
|
load
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n publish = self._get_publish()\n self.architectures = publish['Architectures']\n for source in publish['Sources']:\n component = source['Component']\n snapshot = source['Name']\n self.publish_snapshots.append({'Component': component, 'Name':\n snapshot})\n snapshot_remote = self._find_snapshot(snapshot)\n for source in self._get_source_snapshots(snapshot_remote,\n fallback_self=True):\n self.add(source, component)\n",
"role": "user"
},
{
"content": "load",
"role": "assistant"
}
] |
def x(self, component=None, components=[], packages=None):
if component:
components = [component]
package_refs = []
for snapshot in self.publish_snapshots:
if component and snapshot['Component'] not in components:
continue
component_refs = self._get_packages(self.client, 'snapshots',
snapshot['Name'])
if packages:
for ref in component_refs:
if self.parse_package_ref(ref)[1] in packages:
package_refs.append(ref)
else:
package_refs.extend(component_refs)
return package_refs
|
get_packages
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, component=None, components=[], packages=None):\n if component:\n components = [component]\n package_refs = []\n for snapshot in self.publish_snapshots:\n if component and snapshot['Component'] not in components:\n continue\n component_refs = self._get_packages(self.client, 'snapshots',\n snapshot['Name'])\n if packages:\n for ref in component_refs:\n if self.parse_package_ref(ref)[1] in packages:\n package_refs.append(ref)\n else:\n package_refs.extend(component_refs)\n return package_refs\n",
"role": "user"
},
{
"content": "get_packages",
"role": "assistant"
}
] |
def x(self, ref):
if not ref:
return None
parsed = re.match('(.*)\\ (.*)\\ (.*)\\ (.*)', ref)
return parsed.groups()
|
parse_package_ref
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, ref):\n if not ref:\n return None\n parsed = re.match('(.*)\\\\ (.*)\\\\ (.*)\\\\ (.*)', ref)\n return parsed.groups()\n",
"role": "user"
},
{
"content": "parse_package_ref",
"role": "assistant"
}
] |
def x(self, snapshot, component='main'):
try:
self.components[component].append(snapshot)
except KeyError:
self.components[component] = [snapshot]
|
add
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, snapshot, component='main'):\n try:\n self.components[component].append(snapshot)\n except KeyError:\n self.components[component] = [snapshot]\n",
"role": "user"
},
{
"content": "add",
"role": "assistant"
}
] |
def x(self, name):
remote_snapshots = self._get_snapshots(self.client)
for remote in reversed(remote_snapshots):
if remote['Name'] == name or re.match(name, remote['Name']):
return remote
return None
|
find_snapshot
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, name):\n remote_snapshots = self._get_snapshots(self.client)\n for remote in reversed(remote_snapshots):\n if remote['Name'] == name or re.match(name, remote['Name']):\n return remote\n return None\n",
"role": "user"
},
{
"content": "find_snapshot",
"role": "assistant"
}
] |
def x(self, snapshot, fallback_self=False):
if not snapshot:
return []
source_snapshots = re.findall("'([\\w\\d\\.-]+)'", snapshot['Description'])
if not source_snapshots and fallback_self:
source_snapshots = [snapshot['Name']]
source_snapshots.sort()
return source_snapshots
|
get_source_snapshots
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, snapshot, fallback_self=False):\n if not snapshot:\n return []\n source_snapshots = re.findall(\"'([\\\\w\\\\d\\\\.-]+)'\", snapshot['Description'])\n if not source_snapshots and fallback_self:\n source_snapshots = [snapshot['Name']]\n source_snapshots.sort()\n return source_snapshots\n",
"role": "user"
},
{
"content": "get_source_snapshots",
"role": "assistant"
}
] |
def x(self):
self.publish_snapshots = []
for component, snapshots in self.components.items():
if len(snapshots) <= 1:
lg.debug(
'Component %s has only one snapshot %s, not creating merge snapshot'
% (component, snapshots))
self.publish_snapshots.append({'Component': component, 'Name':
snapshots[0]})
continue
remote_snapshot = self._find_snapshot('^%s%s-%s-\\d+' % (self.
merge_prefix, self.name.replace('./', '').replace('/', '-'),
component))
if remote_snapshot:
source_snapshots = self._get_source_snapshots(remote_snapshot)
snapshots_want = list(snapshots)
snapshots_want.sort()
lg.debug(
'Comparing snapshots: snapshot_name=%s, snapshot_sources=%s, wanted_sources=%s'
% (remote_snapshot['Name'], source_snapshots, snapshots_want))
if snapshots_want == source_snapshots:
lg.info('Remote merge snapshot already exists: %s (%s)' % (
remote_snapshot['Name'], source_snapshots))
self.publish_snapshots.append({'Component': component,
'Name': remote_snapshot['Name']})
continue
snapshot_name = '%s%s-%s-%s' % (self.merge_prefix, self.name.
replace('./', '').replace('/', '-'), component, self.timestamp)
lg.info(
'Creating merge snapshot %s for component %s of snapshots %s' %
(snapshot_name, component, snapshots))
package_refs = []
for snapshot in snapshots:
packages = self._get_packages(self.client, 'snapshots', snapshot)
package_refs.extend(packages)
try:
self.client.do_post('/snapshots', data={'Name': snapshot_name,
'SourceSnapshots': snapshots, 'Description':
'Merged from sources: %s' % ', '.join("'%s'" % snap for
snap in snapshots), 'PackageRefs': package_refs})
except AptlyException as e:
if e.res.status_code == 400:
lg.warning(
'Error creating snapshot %s, assuming it already exists' %
snapshot_name)
else:
raise
self.publish_snapshots.append({'Component': component, 'Name':
snapshot_name})
|
merge_snapshots
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n self.publish_snapshots = []\n for component, snapshots in self.components.items():\n if len(snapshots) <= 1:\n lg.debug(\n 'Component %s has only one snapshot %s, not creating merge snapshot'\n % (component, snapshots))\n self.publish_snapshots.append({'Component': component, 'Name':\n snapshots[0]})\n continue\n remote_snapshot = self._find_snapshot('^%s%s-%s-\\\\d+' % (self.\n merge_prefix, self.name.replace('./', '').replace('/', '-'),\n component))\n if remote_snapshot:\n source_snapshots = self._get_source_snapshots(remote_snapshot)\n snapshots_want = list(snapshots)\n snapshots_want.sort()\n lg.debug(\n 'Comparing snapshots: snapshot_name=%s, snapshot_sources=%s, wanted_sources=%s'\n % (remote_snapshot['Name'], source_snapshots, snapshots_want))\n if snapshots_want == source_snapshots:\n lg.info('Remote merge snapshot already exists: %s (%s)' % (\n remote_snapshot['Name'], source_snapshots))\n self.publish_snapshots.append({'Component': component,\n 'Name': remote_snapshot['Name']})\n continue\n snapshot_name = '%s%s-%s-%s' % (self.merge_prefix, self.name.\n replace('./', '').replace('/', '-'), component, self.timestamp)\n lg.info(\n 'Creating merge snapshot %s for component %s of snapshots %s' %\n (snapshot_name, component, snapshots))\n package_refs = []\n for snapshot in snapshots:\n packages = self._get_packages(self.client, 'snapshots', snapshot)\n package_refs.extend(packages)\n try:\n self.client.do_post('/snapshots', data={'Name': snapshot_name,\n 'SourceSnapshots': snapshots, 'Description': \n 'Merged from sources: %s' % ', '.join(\"'%s'\" % snap for\n snap in snapshots), 'PackageRefs': package_refs})\n except AptlyException as e:\n if e.res.status_code == 400:\n lg.warning(\n 'Error creating snapshot %s, assuming it already exists' %\n snapshot_name)\n else:\n raise\n self.publish_snapshots.append({'Component': component, 'Name':\n snapshot_name})\n",
"role": "user"
},
{
"content": "merge_snapshots",
"role": "assistant"
}
] |
def x(func):
@functools.wraps(func)
def x(*args, **kwargs):
print_time = kwargs.pop('print_time', False)
if not print_time:
return func(*args, **kwargs)
else:
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
print(func.__name__ + ' took %.3f seconds' % (end_time -
start_time))
return result
return wrapper
|
timing_decorator
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(func):\n\n @functools.wraps(func)\n def x(*args, **kwargs):\n print_time = kwargs.pop('print_time', False)\n if not print_time:\n return func(*args, **kwargs)\n else:\n start_time = time.time()\n result = func(*args, **kwargs)\n end_time = time.time()\n print(func.__name__ + ' took %.3f seconds' % (end_time -\n start_time))\n return result\n return wrapper\n",
"role": "user"
},
{
"content": "timing_decorator",
"role": "assistant"
}
] |
def x(func):
@functools.wraps(func)
def x(*args, **kwargs):
save_name = kwargs.pop('save_name', None)
save = kwargs.pop('save', save_name is not None)
load = kwargs.pop('load', save_name is not None)
overwrite_existing = kwargs.pop('overwrite_existing', True)
warn_if_error = kwargs.pop('warn_if_error', False)
if load:
if save_name is None:
warnings.warn(
'{} has load=True but cannot load because save_name=None'
.format(func.__name__), UserWarning)
else:
try:
return pickle_load(save_name)
except (OSError, IOError) as err:
if warn_if_error:
msg = '{} had {} loading file {}.'.format(func.
__name__, type(err).__name__, save_name)
msg = ' Continuing without loading.'
warnings.warn(msg, UserWarning)
result = func(*args, **kwargs)
if save:
if save_name is None:
warnings.warn(func.__name__ + ' has save=True but cannot ' +
'save because save_name=None', UserWarning)
else:
pickle_save(result, save_name, overwrite_existing=
overwrite_existing)
return result
return wrapper
|
save_load_result
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(func):\n\n @functools.wraps(func)\n def x(*args, **kwargs):\n save_name = kwargs.pop('save_name', None)\n save = kwargs.pop('save', save_name is not None)\n load = kwargs.pop('load', save_name is not None)\n overwrite_existing = kwargs.pop('overwrite_existing', True)\n warn_if_error = kwargs.pop('warn_if_error', False)\n if load:\n if save_name is None:\n warnings.warn(\n '{} has load=True but cannot load because save_name=None'\n .format(func.__name__), UserWarning)\n else:\n try:\n return pickle_load(save_name)\n except (OSError, IOError) as err:\n if warn_if_error:\n msg = '{} had {} loading file {}.'.format(func.\n __name__, type(err).__name__, save_name)\n msg = ' Continuing without loading.'\n warnings.warn(msg, UserWarning)\n result = func(*args, **kwargs)\n if save:\n if save_name is None:\n warnings.warn(func.__name__ + ' has save=True but cannot ' +\n 'save because save_name=None', UserWarning)\n else:\n pickle_save(result, save_name, overwrite_existing=\n overwrite_existing)\n return result\n return wrapper\n",
"role": "user"
},
{
"content": "save_load_result",
"role": "assistant"
}
] |
def x(data, name, **kwargs):
extension = kwargs.pop('extension', '.pkl')
overwrite_existing = kwargs.pop('overwrite_existing', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
filename = name + extension
dirname = os.path.dirname(filename)
if not os.path.exists(dirname) and dirname != '':
os.makedirs(dirname)
if os.path.isfile(filename) and not overwrite_existing:
print(filename + ' already exists! Saving with time appended')
filename = name + '_' + time.asctime().replace(' ', '_')
filename += extension
try:
PermissionError
except NameError:
PermissionError = IOError
try:
outfile = open(filename, 'wb')
pickle.dump(data, outfile)
outfile.close()
except (MemoryError, PermissionError) as err:
warnings.warn(type(err).__name__ +
' in pickle_save: continue without saving.', UserWarning)
|
pickle_save
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data, name, **kwargs):\n extension = kwargs.pop('extension', '.pkl')\n overwrite_existing = kwargs.pop('overwrite_existing', True)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n filename = name + extension\n dirname = os.path.dirname(filename)\n if not os.path.exists(dirname) and dirname != '':\n os.makedirs(dirname)\n if os.path.isfile(filename) and not overwrite_existing:\n print(filename + ' already exists! Saving with time appended')\n filename = name + '_' + time.asctime().replace(' ', '_')\n filename += extension\n try:\n PermissionError\n except NameError:\n PermissionError = IOError\n try:\n outfile = open(filename, 'wb')\n pickle.dump(data, outfile)\n outfile.close()\n except (MemoryError, PermissionError) as err:\n warnings.warn(type(err).__name__ +\n ' in pickle_save: continue without saving.', UserWarning)\n",
"role": "user"
},
{
"content": "pickle_save",
"role": "assistant"
}
] |
def x(name, extension='.pkl'):
filename = name + extension
infile = open(filename, 'rb')
data = pickle.load(infile)
infile.close()
return data
|
pickle_load
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(name, extension='.pkl'):\n filename = name + extension\n infile = open(filename, 'rb')\n data = pickle.load(infile)\n infile.close()\n return data\n",
"role": "user"
},
{
"content": "pickle_load",
"role": "assistant"
}
] |
def x(ns_run, threads=None, ninit_sep=False, random_seed=False):
if random_seed is not False:
state = np.random.get_state()
np.random.seed(random_seed)
if threads is None:
threads = nestcheck.ns_run_utils.get_run_threads(ns_run)
n_threads = len(threads)
if ninit_sep:
try:
ninit = ns_run['settings']['ninit']
assert np.all(ns_run['thread_min_max'][:ninit, 0] == -np.inf
), 'ninit_sep assumes the initial threads are labeled (0,...,ninit-1), so these should start by sampling the whole prior.'
inds = np.random.randint(0, ninit, ninit)
inds = np.append(inds, np.random.randint(ninit, n_threads,
n_threads - ninit))
except KeyError:
warnings.warn(
'bootstrap_resample_run has kwarg ninit_sep=True but ns_run["settings"]["ninit"] does not exist. Doing bootstrap with ninit_sep=False'
, UserWarning)
ninit_sep = False
if not ninit_sep:
inds = np.random.randint(0, n_threads, n_threads)
threads_temp = [threads[i] for i in inds]
resampled_run = nestcheck.ns_run_utils.combine_threads(threads_temp)
try:
resampled_run['settings'] = ns_run['settings']
except KeyError:
pass
if random_seed is not False:
np.random.set_state(state)
return resampled_run
|
bootstrap_resample_run
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run, threads=None, ninit_sep=False, random_seed=False):\n if random_seed is not False:\n state = np.random.get_state()\n np.random.seed(random_seed)\n if threads is None:\n threads = nestcheck.ns_run_utils.get_run_threads(ns_run)\n n_threads = len(threads)\n if ninit_sep:\n try:\n ninit = ns_run['settings']['ninit']\n assert np.all(ns_run['thread_min_max'][:ninit, 0] == -np.inf\n ), 'ninit_sep assumes the initial threads are labeled (0,...,ninit-1), so these should start by sampling the whole prior.'\n inds = np.random.randint(0, ninit, ninit)\n inds = np.append(inds, np.random.randint(ninit, n_threads, \n n_threads - ninit))\n except KeyError:\n warnings.warn(\n 'bootstrap_resample_run has kwarg ninit_sep=True but ns_run[\"settings\"][\"ninit\"] does not exist. Doing bootstrap with ninit_sep=False'\n , UserWarning)\n ninit_sep = False\n if not ninit_sep:\n inds = np.random.randint(0, n_threads, n_threads)\n threads_temp = [threads[i] for i in inds]\n resampled_run = nestcheck.ns_run_utils.combine_threads(threads_temp)\n try:\n resampled_run['settings'] = ns_run['settings']\n except KeyError:\n pass\n if random_seed is not False:\n np.random.set_state(state)\n return resampled_run\n",
"role": "user"
},
{
"content": "bootstrap_resample_run",
"role": "assistant"
}
] |
def x(ns_run, estimator_list, **kwargs):
bs_values = run_bootstrap_values(ns_run, estimator_list, **kwargs)
stds = np.zeros(bs_values.shape[0])
for j, _ in enumerate(stds):
stds[j] = np.std(bs_values[j, :], ddof=1)
return stds
|
run_std_bootstrap
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run, estimator_list, **kwargs):\n bs_values = run_bootstrap_values(ns_run, estimator_list, **kwargs)\n stds = np.zeros(bs_values.shape[0])\n for j, _ in enumerate(stds):\n stds[j] = np.std(bs_values[j, :], ddof=1)\n return stds\n",
"role": "user"
},
{
"content": "run_std_bootstrap",
"role": "assistant"
}
] |
def x(ns_run, estimator_list, **kwargs):
ninit_sep = kwargs.pop('ninit_sep', False)
flip_skew = kwargs.pop('flip_skew', True)
n_simulate = kwargs.pop('n_simulate')
random_seeds = kwargs.pop('random_seeds', range(n_simulate))
assert len(random_seeds) == n_simulate
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
threads = nestcheck.ns_run_utils.get_run_threads(ns_run)
bs_values = np.zeros((len(estimator_list), n_simulate))
for i, random_seed in enumerate(random_seeds):
ns_run_temp = bootstrap_resample_run(ns_run, threads=threads,
ninit_sep=ninit_sep, random_seed=random_seed)
bs_values[:, i] = nestcheck.ns_run_utils.run_estimators(ns_run_temp,
estimator_list)
del ns_run_temp
if flip_skew:
estimator_means = np.mean(bs_values, axis=1)
for i, mu in enumerate(estimator_means):
bs_values[i, :] = 2 * mu - bs_values[i, :]
return bs_values
|
run_bootstrap_values
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run, estimator_list, **kwargs):\n ninit_sep = kwargs.pop('ninit_sep', False)\n flip_skew = kwargs.pop('flip_skew', True)\n n_simulate = kwargs.pop('n_simulate')\n random_seeds = kwargs.pop('random_seeds', range(n_simulate))\n assert len(random_seeds) == n_simulate\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n threads = nestcheck.ns_run_utils.get_run_threads(ns_run)\n bs_values = np.zeros((len(estimator_list), n_simulate))\n for i, random_seed in enumerate(random_seeds):\n ns_run_temp = bootstrap_resample_run(ns_run, threads=threads,\n ninit_sep=ninit_sep, random_seed=random_seed)\n bs_values[:, i] = nestcheck.ns_run_utils.run_estimators(ns_run_temp,\n estimator_list)\n del ns_run_temp\n if flip_skew:\n estimator_means = np.mean(bs_values, axis=1)\n for i, mu in enumerate(estimator_means):\n bs_values[i, :] = 2 * mu - bs_values[i, :]\n return bs_values\n",
"role": "user"
},
{
"content": "run_bootstrap_values",
"role": "assistant"
}
] |
def x(ns_run, estimator_list, **kwargs):
cred_int = kwargs.pop('cred_int')
bs_values = run_bootstrap_values(ns_run, estimator_list, **kwargs)
expected_estimators = nestcheck.ns_run_utils.run_estimators(ns_run,
estimator_list)
cdf = (np.asarray(range(bs_values.shape[1])) + 0.5) / bs_values.shape[1]
ci_output = expected_estimators * 2
for i, _ in enumerate(ci_output):
ci_output[i] -= np.interp(1.0 - cred_int, cdf, np.sort(bs_values[i, :])
)
return ci_output
|
run_ci_bootstrap
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run, estimator_list, **kwargs):\n cred_int = kwargs.pop('cred_int')\n bs_values = run_bootstrap_values(ns_run, estimator_list, **kwargs)\n expected_estimators = nestcheck.ns_run_utils.run_estimators(ns_run,\n estimator_list)\n cdf = (np.asarray(range(bs_values.shape[1])) + 0.5) / bs_values.shape[1]\n ci_output = expected_estimators * 2\n for i, _ in enumerate(ci_output):\n ci_output[i] -= np.interp(1.0 - cred_int, cdf, np.sort(bs_values[i, :])\n )\n return ci_output\n",
"role": "user"
},
{
"content": "run_ci_bootstrap",
"role": "assistant"
}
] |
def x(ns_run, estimator_list, n_simulate=None):
assert n_simulate is not None, 'run_std_simulate: must specify n_simulate'
all_values = np.zeros((len(estimator_list), n_simulate))
for i in range(n_simulate):
all_values[:, i] = nestcheck.ns_run_utils.run_estimators(ns_run,
estimator_list, simulate=True)
stds = np.zeros(all_values.shape[0])
for i, _ in enumerate(stds):
stds[i] = np.std(all_values[i, :], ddof=1)
return stds
|
run_std_simulate
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run, estimator_list, n_simulate=None):\n assert n_simulate is not None, 'run_std_simulate: must specify n_simulate'\n all_values = np.zeros((len(estimator_list), n_simulate))\n for i in range(n_simulate):\n all_values[:, i] = nestcheck.ns_run_utils.run_estimators(ns_run,\n estimator_list, simulate=True)\n stds = np.zeros(all_values.shape[0])\n for i, _ in enumerate(stds):\n stds[i] = np.std(all_values[i, :], ddof=1)\n return stds\n",
"role": "user"
},
{
"content": "run_std_simulate",
"role": "assistant"
}
] |
def x(vals_std, vals_std_u, bs_std, bs_std_u, **kwargs):
nsim = kwargs.pop('nsim', 1000000)
random_seed = kwargs.pop('random_seed', 0)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
imp_var = vals_std ** 2 - bs_std ** 2
imp_std = np.sqrt(np.abs(imp_var)) * np.sign(imp_var)
ind = np.where(imp_std <= 0)[0]
imp_std[ind] = 0
imp_std_u = np.zeros(imp_std.shape)
imp_frac = imp_std / vals_std
imp_frac_u = np.zeros(imp_frac.shape)
for i, _ in enumerate(imp_std_u):
state = np.random.get_state()
np.random.seed(random_seed)
sim_vals_std = np.random.normal(vals_std[i], vals_std_u[i], size=nsim)
sim_bs_std = np.random.normal(bs_std[i], bs_std_u[i], size=nsim)
sim_imp_var = sim_vals_std ** 2 - sim_bs_std ** 2
sim_imp_std = np.sqrt(np.abs(sim_imp_var)) * np.sign(sim_imp_var)
imp_std_u[i] = np.std(sim_imp_std, ddof=1)
imp_frac_u[i] = np.std(sim_imp_std / sim_vals_std, ddof=1)
np.random.set_state(state)
return imp_std, imp_std_u, imp_frac, imp_frac_u
|
implementation_std
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(vals_std, vals_std_u, bs_std, bs_std_u, **kwargs):\n nsim = kwargs.pop('nsim', 1000000)\n random_seed = kwargs.pop('random_seed', 0)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n imp_var = vals_std ** 2 - bs_std ** 2\n imp_std = np.sqrt(np.abs(imp_var)) * np.sign(imp_var)\n ind = np.where(imp_std <= 0)[0]\n imp_std[ind] = 0\n imp_std_u = np.zeros(imp_std.shape)\n imp_frac = imp_std / vals_std\n imp_frac_u = np.zeros(imp_frac.shape)\n for i, _ in enumerate(imp_std_u):\n state = np.random.get_state()\n np.random.seed(random_seed)\n sim_vals_std = np.random.normal(vals_std[i], vals_std_u[i], size=nsim)\n sim_bs_std = np.random.normal(bs_std[i], bs_std_u[i], size=nsim)\n sim_imp_var = sim_vals_std ** 2 - sim_bs_std ** 2\n sim_imp_std = np.sqrt(np.abs(sim_imp_var)) * np.sign(sim_imp_var)\n imp_std_u[i] = np.std(sim_imp_std, ddof=1)\n imp_frac_u[i] = np.std(sim_imp_std / sim_vals_std, ddof=1)\n np.random.set_state(state)\n return imp_std, imp_std_u, imp_frac, imp_frac_u\n",
"role": "user"
},
{
"content": "implementation_std",
"role": "assistant"
}
] |
def x(run, estimator_list):
threads = nestcheck.ns_run_utils.get_run_threads(run)
vals_list = [nestcheck.ns_run_utils.run_estimators(th, estimator_list) for
th in threads]
vals_array = np.stack(vals_list, axis=1)
assert vals_array.shape == (len(estimator_list), len(threads))
return vals_array
|
run_thread_values
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(run, estimator_list):\n threads = nestcheck.ns_run_utils.get_run_threads(run)\n vals_list = [nestcheck.ns_run_utils.run_estimators(th, estimator_list) for\n th in threads]\n vals_array = np.stack(vals_list, axis=1)\n assert vals_array.shape == (len(estimator_list), len(threads))\n return vals_array\n",
"role": "user"
},
{
"content": "run_thread_values",
"role": "assistant"
}
] |
def x(dist_list, earth_mover_dist=True, energy_dist=True):
out = []
index = []
for i, samp_i in enumerate(dist_list):
for j, samp_j in enumerate(dist_list):
if j < i:
index.append(str((i, j)))
out.append(statistical_distances(samp_i, samp_j,
earth_mover_dist=earth_mover_dist, energy_dist=energy_dist)
)
columns = ['ks pvalue', 'ks distance']
if earth_mover_dist:
columns.append('earth mover distance')
if energy_dist:
columns.append('energy distance')
ser = pd.DataFrame(out, index=index, columns=columns).unstack()
ser.index.names = ['calculation type', 'run']
return ser
|
pairwise_distances
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(dist_list, earth_mover_dist=True, energy_dist=True):\n out = []\n index = []\n for i, samp_i in enumerate(dist_list):\n for j, samp_j in enumerate(dist_list):\n if j < i:\n index.append(str((i, j)))\n out.append(statistical_distances(samp_i, samp_j,\n earth_mover_dist=earth_mover_dist, energy_dist=energy_dist)\n )\n columns = ['ks pvalue', 'ks distance']\n if earth_mover_dist:\n columns.append('earth mover distance')\n if energy_dist:\n columns.append('energy distance')\n ser = pd.DataFrame(out, index=index, columns=columns).unstack()\n ser.index.names = ['calculation type', 'run']\n return ser\n",
"role": "user"
},
{
"content": "pairwise_distances",
"role": "assistant"
}
] |
def x(samples1, samples2, earth_mover_dist=True, energy_dist=True):
out = []
temp = scipy.stats.ks_2samp(samples1, samples2)
out.append(temp.pvalue)
out.append(temp.statistic)
if earth_mover_dist:
out.append(scipy.stats.wasserstein_distance(samples1, samples2))
if energy_dist:
out.append(scipy.stats.energy_distance(samples1, samples2))
return np.asarray(out)
|
statistical_distances
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(samples1, samples2, earth_mover_dist=True, energy_dist=True):\n out = []\n temp = scipy.stats.ks_2samp(samples1, samples2)\n out.append(temp.pvalue)\n out.append(temp.statistic)\n if earth_mover_dist:\n out.append(scipy.stats.wasserstein_distance(samples1, samples2))\n if energy_dist:\n out.append(scipy.stats.energy_distance(samples1, samples2))\n return np.asarray(out)\n",
"role": "user"
},
{
"content": "statistical_distances",
"role": "assistant"
}
] |
def x(nsamples, **kwargs):
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
logl_start = kwargs.pop('logl_start', -np.inf)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if seed is not False:
np.random.seed(seed)
thread = {'logl': np.sort(np.random.random(nsamples)) * logl_range,
'nlive_array': np.full(nsamples, 1.0), 'theta': np.random.random((
nsamples, ndim)), 'thread_labels': np.zeros(nsamples).astype(int)}
if logl_start != -np.inf:
thread['logl'] += logl_start
thread['thread_min_max'] = np.asarray([[logl_start, thread['logl'][-1]]])
return thread
|
get_dummy_thread
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(nsamples, **kwargs):\n seed = kwargs.pop('seed', False)\n ndim = kwargs.pop('ndim', 2)\n logl_start = kwargs.pop('logl_start', -np.inf)\n logl_range = kwargs.pop('logl_range', 1)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n if seed is not False:\n np.random.seed(seed)\n thread = {'logl': np.sort(np.random.random(nsamples)) * logl_range,\n 'nlive_array': np.full(nsamples, 1.0), 'theta': np.random.random((\n nsamples, ndim)), 'thread_labels': np.zeros(nsamples).astype(int)}\n if logl_start != -np.inf:\n thread['logl'] += logl_start\n thread['thread_min_max'] = np.asarray([[logl_start, thread['logl'][-1]]])\n return thread\n",
"role": "user"
},
{
"content": "get_dummy_thread",
"role": "assistant"
}
] |
def x(nthread, nsamples, **kwargs):
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
logl_start = kwargs.pop('logl_start', -np.inf)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
threads = []
if seed is not False:
np.random.seed(seed)
threads = []
for _ in range(nthread):
threads.append(get_dummy_thread(nsamples, ndim=ndim, seed=False,
logl_start=logl_start, logl_range=logl_range))
threads = sorted(threads, key=lambda th: th['logl'][0])
for i, _ in enumerate(threads):
threads[i]['thread_labels'] = np.full(nsamples, i)
return nestcheck.ns_run_utils.combine_threads(threads)
|
get_dummy_run
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(nthread, nsamples, **kwargs):\n seed = kwargs.pop('seed', False)\n ndim = kwargs.pop('ndim', 2)\n logl_start = kwargs.pop('logl_start', -np.inf)\n logl_range = kwargs.pop('logl_range', 1)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n threads = []\n if seed is not False:\n np.random.seed(seed)\n threads = []\n for _ in range(nthread):\n threads.append(get_dummy_thread(nsamples, ndim=ndim, seed=False,\n logl_start=logl_start, logl_range=logl_range))\n threads = sorted(threads, key=lambda th: th['logl'][0])\n for i, _ in enumerate(threads):\n threads[i]['thread_labels'] = np.full(nsamples, i)\n return nestcheck.ns_run_utils.combine_threads(threads)\n",
"role": "user"
},
{
"content": "get_dummy_run",
"role": "assistant"
}
] |
def x(nsamples, **kwargs):
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
nthread_init = kwargs.pop('nthread_init', 2)
nthread_dyn = kwargs.pop('nthread_dyn', 3)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
init = get_dummy_run(nthread_init, nsamples, ndim=ndim, seed=seed,
logl_start=-np.inf, logl_range=logl_range)
dyn_starts = list(np.random.choice(init['logl'], nthread_dyn, replace=True)
)
threads = nestcheck.ns_run_utils.get_run_threads(init)
threads += [get_dummy_thread(nsamples, ndim=ndim, seed=False,
logl_start=start, logl_range=logl_range) for start in dyn_starts]
for i, _ in enumerate(threads):
threads[i]['thread_labels'] = np.full(nsamples, i)
run = nestcheck.ns_run_utils.combine_threads(threads)
samples = nestcheck.write_polychord_output.run_dead_birth_array(run)
return nestcheck.data_processing.process_samples_array(samples)
|
get_dummy_dynamic_run
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(nsamples, **kwargs):\n seed = kwargs.pop('seed', False)\n ndim = kwargs.pop('ndim', 2)\n nthread_init = kwargs.pop('nthread_init', 2)\n nthread_dyn = kwargs.pop('nthread_dyn', 3)\n logl_range = kwargs.pop('logl_range', 1)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n init = get_dummy_run(nthread_init, nsamples, ndim=ndim, seed=seed,\n logl_start=-np.inf, logl_range=logl_range)\n dyn_starts = list(np.random.choice(init['logl'], nthread_dyn, replace=True)\n )\n threads = nestcheck.ns_run_utils.get_run_threads(init)\n threads += [get_dummy_thread(nsamples, ndim=ndim, seed=False,\n logl_start=start, logl_range=logl_range) for start in dyn_starts]\n for i, _ in enumerate(threads):\n threads[i]['thread_labels'] = np.full(nsamples, i)\n run = nestcheck.ns_run_utils.combine_threads(threads)\n samples = nestcheck.write_polychord_output.run_dead_birth_array(run)\n return nestcheck.data_processing.process_samples_array(samples)\n",
"role": "user"
},
{
"content": "get_dummy_dynamic_run",
"role": "assistant"
}
] |
def x():
pkg_dir = get_package_dir()
with open(os.path.join(pkg_dir, '.pypi_long_desc.rst')) as readme_file:
long_description = readme_file.read()
return long_description
|
get_long_description
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x():\n pkg_dir = get_package_dir()\n with open(os.path.join(pkg_dir, '.pypi_long_desc.rst')) as readme_file:\n long_description = readme_file.read()\n return long_description\n",
"role": "user"
},
{
"content": "get_long_description",
"role": "assistant"
}
] |
def x():
pkg_dir = get_package_dir()
with open(os.path.join(pkg_dir, 'nestcheck/_version.py')) as ver_file:
string = ver_file.read()
return string.strip().replace('__version__ = ', '').replace("'", '')
|
get_version
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x():\n pkg_dir = get_package_dir()\n with open(os.path.join(pkg_dir, 'nestcheck/_version.py')) as ver_file:\n string = ver_file.read()\n return string.strip().replace('__version__ = ', '').replace(\"'\", '')\n",
"role": "user"
},
{
"content": "get_version",
"role": "assistant"
}
] |
def x(method_names, run_dict, **kwargs):
logx_given_logl = kwargs.pop('logx_given_logl', None)
logl_given_logx = kwargs.pop('logl_given_logx', None)
logx_min = kwargs.pop('logx_min', None)
ymax = kwargs.pop('ymax', None)
npoints = kwargs.pop('npoints', 100)
figsize = kwargs.pop('figsize', (6.4, 2))
post_mass_norm = kwargs.pop('post_mass_norm', None)
cum_post_mass_norm = kwargs.pop('cum_post_mass_norm', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert set(method_names) == set(run_dict.keys()
), 'input method names=' + str(method_names
) + ' do not match run_dict keys=' + str(run_dict.keys())
fig = plt.figure(figsize=figsize)
ax = plt.gca()
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
linecolor_dict = {'standard': colors[2], 'dynamic $G=0$': colors[8],
'dynamic $G=1$': colors[9]}
ax.set_prop_cycle('color', [colors[i] for i in [4, 1, 6, 0, 3, 5, 7]])
integrals_dict = {}
logx_min_list = []
for method_name in method_names:
integrals = np.zeros(len(run_dict[method_name]))
for nr, run in enumerate(run_dict[method_name]):
if 'logx' in run:
logx = run['logx']
elif logx_given_logl is not None:
logx = logx_given_logl(run['logl'])
else:
logx = nestcheck.ns_run_utils.get_logx(run['nlive_array'],
simulate=False)
logx_min_list.append(logx[-1])
logx[0] = 0
if nr == 0:
try:
line, = ax.plot(logx, run['nlive_array'], linewidth=1,
label=method_name, color=linecolor_dict[method_name])
except KeyError:
line, = ax.plot(logx, run['nlive_array'], linewidth=1,
label=method_name)
else:
ax.plot(logx, run['nlive_array'], linewidth=1, color=line.
get_color())
integrals[nr] = -np.trapz(run['nlive_array'], x=logx)
integrals_dict[method_name] = integrals[np.isfinite(integrals)]
if logx_min is None:
logx_min = np.asarray(logx_min_list).min()
if logl_given_logx is not None:
logx_plot = np.linspace(logx_min, 0, npoints)
logl = logl_given_logx(logx_plot)
logx_plot = logx_plot[np.where(~np.isnan(logl))[0]]
logl = logl[np.where(~np.isnan(logl))[0]]
w_an = rel_posterior_mass(logx_plot, logl)
w_an *= average_by_key(integrals_dict, post_mass_norm)
ax.plot(logx_plot, w_an, linewidth=2, label=
'relative posterior mass', linestyle=':', color='k')
w_an_c = np.cumsum(w_an)
w_an_c /= np.trapz(w_an_c, x=logx_plot)
w_an_c *= average_by_key(integrals_dict, cum_post_mass_norm)
ax.plot(logx_plot, w_an_c, linewidth=2, linestyle='--', dashes=(2,
3), label='posterior mass remaining', color='darkblue')
ax.set_ylabel('number of live points')
ax.set_xlabel('$\\log X $')
if ymax is not None:
ax.set_ylim([0, ymax])
else:
ax.set_ylim(bottom=0)
ax.set_xlim([logx_min, 0])
ax.legend()
return fig
|
plot_run_nlive
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(method_names, run_dict, **kwargs):\n logx_given_logl = kwargs.pop('logx_given_logl', None)\n logl_given_logx = kwargs.pop('logl_given_logx', None)\n logx_min = kwargs.pop('logx_min', None)\n ymax = kwargs.pop('ymax', None)\n npoints = kwargs.pop('npoints', 100)\n figsize = kwargs.pop('figsize', (6.4, 2))\n post_mass_norm = kwargs.pop('post_mass_norm', None)\n cum_post_mass_norm = kwargs.pop('cum_post_mass_norm', None)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n assert set(method_names) == set(run_dict.keys()\n ), 'input method names=' + str(method_names\n ) + ' do not match run_dict keys=' + str(run_dict.keys())\n fig = plt.figure(figsize=figsize)\n ax = plt.gca()\n colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n linecolor_dict = {'standard': colors[2], 'dynamic $G=0$': colors[8],\n 'dynamic $G=1$': colors[9]}\n ax.set_prop_cycle('color', [colors[i] for i in [4, 1, 6, 0, 3, 5, 7]])\n integrals_dict = {}\n logx_min_list = []\n for method_name in method_names:\n integrals = np.zeros(len(run_dict[method_name]))\n for nr, run in enumerate(run_dict[method_name]):\n if 'logx' in run:\n logx = run['logx']\n elif logx_given_logl is not None:\n logx = logx_given_logl(run['logl'])\n else:\n logx = nestcheck.ns_run_utils.get_logx(run['nlive_array'],\n simulate=False)\n logx_min_list.append(logx[-1])\n logx[0] = 0\n if nr == 0:\n try:\n line, = ax.plot(logx, run['nlive_array'], linewidth=1,\n label=method_name, color=linecolor_dict[method_name])\n except KeyError:\n line, = ax.plot(logx, run['nlive_array'], linewidth=1,\n label=method_name)\n else:\n ax.plot(logx, run['nlive_array'], linewidth=1, color=line.\n get_color())\n integrals[nr] = -np.trapz(run['nlive_array'], x=logx)\n integrals_dict[method_name] = integrals[np.isfinite(integrals)]\n if logx_min is None:\n logx_min = np.asarray(logx_min_list).min()\n if logl_given_logx is not None:\n logx_plot = np.linspace(logx_min, 0, npoints)\n logl = logl_given_logx(logx_plot)\n logx_plot = logx_plot[np.where(~np.isnan(logl))[0]]\n logl = logl[np.where(~np.isnan(logl))[0]]\n w_an = rel_posterior_mass(logx_plot, logl)\n w_an *= average_by_key(integrals_dict, post_mass_norm)\n ax.plot(logx_plot, w_an, linewidth=2, label=\n 'relative posterior mass', linestyle=':', color='k')\n w_an_c = np.cumsum(w_an)\n w_an_c /= np.trapz(w_an_c, x=logx_plot)\n w_an_c *= average_by_key(integrals_dict, cum_post_mass_norm)\n ax.plot(logx_plot, w_an_c, linewidth=2, linestyle='--', dashes=(2, \n 3), label='posterior mass remaining', color='darkblue')\n ax.set_ylabel('number of live points')\n ax.set_xlabel('$\\\\log X $')\n if ymax is not None:\n ax.set_ylim([0, ymax])\n else:\n ax.set_ylim(bottom=0)\n ax.set_xlim([logx_min, 0])\n ax.legend()\n return fig\n",
"role": "user"
},
{
"content": "plot_run_nlive",
"role": "assistant"
}
] |
def x(df, xlims=None, **kwargs):
assert xlims is None or isinstance(xlims, dict)
figsize = kwargs.pop('figsize', (6.4, 1.5))
num_xticks = kwargs.pop('num_xticks', None)
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', int(np.ceil(len(df.columns) / nrows)))
normalize = kwargs.pop('normalize', True)
legend = kwargs.pop('legend', False)
legend_kwargs = kwargs.pop('legend_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
for nax, col in enumerate(df):
if nrows == 1:
ax = axes[nax]
else:
ax = axes[nax // ncols, nax % ncols]
supmin = df[col].apply(np.min).min()
supmax = df[col].apply(np.max).max()
support = np.linspace(supmin - 0.1 * (supmax - supmin), supmax +
0.1 * (supmax - supmin), 200)
handles = []
labels = []
for name, samps in df[col].iteritems():
pdf = scipy.stats.gaussian_kde(samps)(support)
if not normalize:
pdf /= pdf.max()
handles.append(ax.plot(support, pdf, label=name)[0])
labels.append(name)
ax.set_ylim(bottom=0)
ax.set_yticks([])
if xlims is not None:
try:
ax.set_xlim(xlims[col])
except KeyError:
pass
ax.set_xlabel(col)
if num_xticks is not None:
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(nbins=
num_xticks))
if legend:
fig.legend(handles, labels, **legend_kwargs)
return fig
|
kde_plot_df
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(df, xlims=None, **kwargs):\n assert xlims is None or isinstance(xlims, dict)\n figsize = kwargs.pop('figsize', (6.4, 1.5))\n num_xticks = kwargs.pop('num_xticks', None)\n nrows = kwargs.pop('nrows', 1)\n ncols = kwargs.pop('ncols', int(np.ceil(len(df.columns) / nrows)))\n normalize = kwargs.pop('normalize', True)\n legend = kwargs.pop('legend', False)\n legend_kwargs = kwargs.pop('legend_kwargs', {})\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)\n for nax, col in enumerate(df):\n if nrows == 1:\n ax = axes[nax]\n else:\n ax = axes[nax // ncols, nax % ncols]\n supmin = df[col].apply(np.min).min()\n supmax = df[col].apply(np.max).max()\n support = np.linspace(supmin - 0.1 * (supmax - supmin), supmax + \n 0.1 * (supmax - supmin), 200)\n handles = []\n labels = []\n for name, samps in df[col].iteritems():\n pdf = scipy.stats.gaussian_kde(samps)(support)\n if not normalize:\n pdf /= pdf.max()\n handles.append(ax.plot(support, pdf, label=name)[0])\n labels.append(name)\n ax.set_ylim(bottom=0)\n ax.set_yticks([])\n if xlims is not None:\n try:\n ax.set_xlim(xlims[col])\n except KeyError:\n pass\n ax.set_xlabel(col)\n if num_xticks is not None:\n ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(nbins=\n num_xticks))\n if legend:\n fig.legend(handles, labels, **legend_kwargs)\n return fig\n",
"role": "user"
},
{
"content": "kde_plot_df",
"role": "assistant"
}
] |
def x(run_list, **kwargs):
fthetas = kwargs.pop('fthetas', [lambda theta: theta[:, 0], lambda
theta: theta[:, 1]])
labels = kwargs.pop('labels', [('$\\theta_' + str(i + 1) + '$') for i in
range(len(fthetas))])
ftheta_lims = kwargs.pop('ftheta_lims', [[-1, 1]] * len(fthetas))
n_simulate = kwargs.pop('n_simulate', 100)
random_seed = kwargs.pop('random_seed', 0)
figsize = kwargs.pop('figsize', (6.4, 2))
nx = kwargs.pop('nx', 100)
ny = kwargs.pop('ny', nx)
cache_in = kwargs.pop('cache', None)
parallel = kwargs.pop('parallel', True)
rasterize_contours = kwargs.pop('rasterize_contours', True)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'disable': True})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
state = np.random.get_state()
np.random.seed(random_seed)
if not isinstance(run_list, list):
run_list = [run_list]
assert len(labels) == len(fthetas
), 'There should be the same number of axes and labels'
width_ratios = [40] * len(fthetas) + [1] * len(run_list)
fig, axes = plt.subplots(nrows=1, ncols=len(run_list) + len(fthetas),
gridspec_kw={'wspace': 0.1, 'width_ratios': width_ratios}, figsize=
figsize)
colormaps = ['Reds_r', 'Blues_r', 'Greys_r', 'Greens_r', 'Oranges_r']
mean_colors = ['darkred', 'darkblue', 'darkgrey', 'darkgreen', 'darkorange'
]
for nrun, run in reversed(list(enumerate(run_list))):
try:
cache = cache_in + '_' + str(nrun)
except TypeError:
cache = None
cbar = plot_bs_dists(run, fthetas, axes[:len(fthetas)], parallel=
parallel, ftheta_lims=ftheta_lims, cache=cache, n_simulate=
n_simulate, nx=nx, ny=ny, rasterize_contours=rasterize_contours,
mean_color=mean_colors[nrun], colormap=colormaps[nrun],
tqdm_kwargs=tqdm_kwargs)
colorbar_plot = plt.colorbar(cbar, cax=axes[len(fthetas) + nrun],
ticks=[1, 2, 3])
colorbar_plot.solids.set_edgecolor('face')
colorbar_plot.ax.set_yticklabels([])
if nrun == len(run_list) - 1:
colorbar_plot.ax.set_yticklabels(['$1\\sigma$', '$2\\sigma$',
'$3\\sigma$'])
for nax, ax in enumerate(axes[:len(fthetas)]):
ax.set_yticks([])
ax.set_xlabel(labels[nax])
if ax.is_first_col():
ax.set_ylabel('probability')
prune = 'upper' if nax != len(fthetas) - 1 else None
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(nbins=5,
prune=prune))
np.random.set_state(state)
return fig
|
bs_param_dists
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(run_list, **kwargs):\n fthetas = kwargs.pop('fthetas', [lambda theta: theta[:, 0], lambda\n theta: theta[:, 1]])\n labels = kwargs.pop('labels', [('$\\\\theta_' + str(i + 1) + '$') for i in\n range(len(fthetas))])\n ftheta_lims = kwargs.pop('ftheta_lims', [[-1, 1]] * len(fthetas))\n n_simulate = kwargs.pop('n_simulate', 100)\n random_seed = kwargs.pop('random_seed', 0)\n figsize = kwargs.pop('figsize', (6.4, 2))\n nx = kwargs.pop('nx', 100)\n ny = kwargs.pop('ny', nx)\n cache_in = kwargs.pop('cache', None)\n parallel = kwargs.pop('parallel', True)\n rasterize_contours = kwargs.pop('rasterize_contours', True)\n tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'disable': True})\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n state = np.random.get_state()\n np.random.seed(random_seed)\n if not isinstance(run_list, list):\n run_list = [run_list]\n assert len(labels) == len(fthetas\n ), 'There should be the same number of axes and labels'\n width_ratios = [40] * len(fthetas) + [1] * len(run_list)\n fig, axes = plt.subplots(nrows=1, ncols=len(run_list) + len(fthetas),\n gridspec_kw={'wspace': 0.1, 'width_ratios': width_ratios}, figsize=\n figsize)\n colormaps = ['Reds_r', 'Blues_r', 'Greys_r', 'Greens_r', 'Oranges_r']\n mean_colors = ['darkred', 'darkblue', 'darkgrey', 'darkgreen', 'darkorange'\n ]\n for nrun, run in reversed(list(enumerate(run_list))):\n try:\n cache = cache_in + '_' + str(nrun)\n except TypeError:\n cache = None\n cbar = plot_bs_dists(run, fthetas, axes[:len(fthetas)], parallel=\n parallel, ftheta_lims=ftheta_lims, cache=cache, n_simulate=\n n_simulate, nx=nx, ny=ny, rasterize_contours=rasterize_contours,\n mean_color=mean_colors[nrun], colormap=colormaps[nrun],\n tqdm_kwargs=tqdm_kwargs)\n colorbar_plot = plt.colorbar(cbar, cax=axes[len(fthetas) + nrun],\n ticks=[1, 2, 3])\n colorbar_plot.solids.set_edgecolor('face')\n colorbar_plot.ax.set_yticklabels([])\n if nrun == len(run_list) - 1:\n colorbar_plot.ax.set_yticklabels(['$1\\\\sigma$', '$2\\\\sigma$',\n '$3\\\\sigma$'])\n for nax, ax in enumerate(axes[:len(fthetas)]):\n ax.set_yticks([])\n ax.set_xlabel(labels[nax])\n if ax.is_first_col():\n ax.set_ylabel('probability')\n prune = 'upper' if nax != len(fthetas) - 1 else None\n ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(nbins=5,\n prune=prune))\n np.random.set_state(state)\n return fig\n",
"role": "user"
},
{
"content": "bs_param_dists",
"role": "assistant"
}
] |
def x(run, fthetas, axes, **kwargs):
ftheta_lims = kwargs.pop('ftheta_lims', [[-1, 1]] * len(fthetas))
n_simulate = kwargs.pop('n_simulate', 100)
colormap = kwargs.pop('colormap', plt.get_cmap('Reds_r'))
mean_color = kwargs.pop('mean_color', None)
nx = kwargs.pop('nx', 100)
ny = kwargs.pop('ny', nx)
cache_in = kwargs.pop('cache', None)
parallel = kwargs.pop('parallel', True)
rasterize_contours = kwargs.pop('rasterize_contours', True)
smooth = kwargs.pop('smooth', False)
flip_axes = kwargs.pop('flip_axes', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'leave': False})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert len(fthetas) == len(axes
), 'There should be the same number of axes and functions to plot'
assert len(fthetas) == len(ftheta_lims
), 'There should be the same number of axes and functions to plot'
threads = nestcheck.ns_run_utils.get_run_threads(run)
bs_samps = []
for i in range(n_simulate):
run_temp = nestcheck.error_analysis.bootstrap_resample_run(run,
threads=threads)
w_temp = nestcheck.ns_run_utils.get_w_rel(run_temp, simulate=False)
bs_samps.append((run_temp['theta'], w_temp))
for nf, ftheta in enumerate(fthetas):
max_samps = 2 * max([bs_samp[0].shape[0] for bs_samp in bs_samps])
samples_array = np.full((n_simulate, max_samps), np.nan)
for i, (theta, weights) in enumerate(bs_samps):
nsamp = 2 * theta.shape[0]
samples_array[i, :nsamp:2] = ftheta(theta)
samples_array[i, 1:nsamp:2] = weights
ftheta_vals = np.linspace(ftheta_lims[nf][0], ftheta_lims[nf][1], nx)
try:
cache = cache_in + '_' + str(nf)
except TypeError:
cache = None
samp_kde = functools.partial(alternate_helper, func=
weighted_1d_gaussian_kde)
y, pmf = fgivenx.drivers.compute_pmf(samp_kde, ftheta_vals,
samples_array, ny=ny, cache=cache, parallel=parallel,
tqdm_kwargs=tqdm_kwargs)
if flip_axes:
cbar = fgivenx.plot.plot(y, ftheta_vals, np.swapaxes(pmf, 0, 1),
axes[nf], colors=colormap, rasterize_contours=
rasterize_contours, smooth=smooth)
else:
cbar = fgivenx.plot.plot(ftheta_vals, y, pmf, axes[nf], colors=
colormap, rasterize_contours=rasterize_contours, smooth=smooth)
if mean_color is not None:
w_rel = nestcheck.ns_run_utils.get_w_rel(run, simulate=False)
w_rel /= np.sum(w_rel)
means = [np.sum(w_rel * f(run['theta'])) for f in fthetas]
for nf, mean in enumerate(means):
if flip_axes:
axes[nf].axhline(y=mean, lw=1, linestyle='--', color=mean_color
)
else:
axes[nf].axvline(x=mean, lw=1, linestyle='--', color=mean_color
)
return cbar
|
plot_bs_dists
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(run, fthetas, axes, **kwargs):\n ftheta_lims = kwargs.pop('ftheta_lims', [[-1, 1]] * len(fthetas))\n n_simulate = kwargs.pop('n_simulate', 100)\n colormap = kwargs.pop('colormap', plt.get_cmap('Reds_r'))\n mean_color = kwargs.pop('mean_color', None)\n nx = kwargs.pop('nx', 100)\n ny = kwargs.pop('ny', nx)\n cache_in = kwargs.pop('cache', None)\n parallel = kwargs.pop('parallel', True)\n rasterize_contours = kwargs.pop('rasterize_contours', True)\n smooth = kwargs.pop('smooth', False)\n flip_axes = kwargs.pop('flip_axes', False)\n tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'leave': False})\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n assert len(fthetas) == len(axes\n ), 'There should be the same number of axes and functions to plot'\n assert len(fthetas) == len(ftheta_lims\n ), 'There should be the same number of axes and functions to plot'\n threads = nestcheck.ns_run_utils.get_run_threads(run)\n bs_samps = []\n for i in range(n_simulate):\n run_temp = nestcheck.error_analysis.bootstrap_resample_run(run,\n threads=threads)\n w_temp = nestcheck.ns_run_utils.get_w_rel(run_temp, simulate=False)\n bs_samps.append((run_temp['theta'], w_temp))\n for nf, ftheta in enumerate(fthetas):\n max_samps = 2 * max([bs_samp[0].shape[0] for bs_samp in bs_samps])\n samples_array = np.full((n_simulate, max_samps), np.nan)\n for i, (theta, weights) in enumerate(bs_samps):\n nsamp = 2 * theta.shape[0]\n samples_array[i, :nsamp:2] = ftheta(theta)\n samples_array[i, 1:nsamp:2] = weights\n ftheta_vals = np.linspace(ftheta_lims[nf][0], ftheta_lims[nf][1], nx)\n try:\n cache = cache_in + '_' + str(nf)\n except TypeError:\n cache = None\n samp_kde = functools.partial(alternate_helper, func=\n weighted_1d_gaussian_kde)\n y, pmf = fgivenx.drivers.compute_pmf(samp_kde, ftheta_vals,\n samples_array, ny=ny, cache=cache, parallel=parallel,\n tqdm_kwargs=tqdm_kwargs)\n if flip_axes:\n cbar = fgivenx.plot.plot(y, ftheta_vals, np.swapaxes(pmf, 0, 1),\n axes[nf], colors=colormap, rasterize_contours=\n rasterize_contours, smooth=smooth)\n else:\n cbar = fgivenx.plot.plot(ftheta_vals, y, pmf, axes[nf], colors=\n colormap, rasterize_contours=rasterize_contours, smooth=smooth)\n if mean_color is not None:\n w_rel = nestcheck.ns_run_utils.get_w_rel(run, simulate=False)\n w_rel /= np.sum(w_rel)\n means = [np.sum(w_rel * f(run['theta'])) for f in fthetas]\n for nf, mean in enumerate(means):\n if flip_axes:\n axes[nf].axhline(y=mean, lw=1, linestyle='--', color=mean_color\n )\n else:\n axes[nf].axvline(x=mean, lw=1, linestyle='--', color=mean_color\n )\n return cbar\n",
"role": "user"
},
{
"content": "plot_bs_dists",
"role": "assistant"
}
] |
def x(x, alt_samps, func=None):
alt_samps = alt_samps[~np.isnan(alt_samps)]
arg1 = alt_samps[::2]
arg2 = alt_samps[1::2]
return func(x, arg1, arg2)
|
alternate_helper
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(x, alt_samps, func=None):\n alt_samps = alt_samps[~np.isnan(alt_samps)]\n arg1 = alt_samps[::2]\n arg2 = alt_samps[1::2]\n return func(x, arg1, arg2)\n",
"role": "user"
},
{
"content": "alternate_helper",
"role": "assistant"
}
] |
def x(x, samples, weights):
assert x.ndim == 1
assert samples.ndim == 1
assert samples.shape == weights.shape
weights /= np.sum(weights)
nz_weights = weights[np.nonzero(weights)]
nsamp_eff = np.exp(-1.0 * np.sum(nz_weights * np.log(nz_weights)))
mu = np.sum(weights * samples)
var = np.sum(weights * (samples - mu) ** 2)
var *= nsamp_eff / (nsamp_eff - 1)
scott_factor = np.power(nsamp_eff, -1.0 / 5)
sig = np.sqrt(var) * scott_factor
xx, ss = np.meshgrid(x, samples)
chisquared = ((xx - ss) / sig) ** 2
energy = np.exp(-0.5 * chisquared) / np.sqrt(2 * np.pi * sig ** 2)
result = np.sum(energy * weights[:, np.newaxis], axis=0)
return result
|
weighted_1d_gaussian_kde
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(x, samples, weights):\n assert x.ndim == 1\n assert samples.ndim == 1\n assert samples.shape == weights.shape\n weights /= np.sum(weights)\n nz_weights = weights[np.nonzero(weights)]\n nsamp_eff = np.exp(-1.0 * np.sum(nz_weights * np.log(nz_weights)))\n mu = np.sum(weights * samples)\n var = np.sum(weights * (samples - mu) ** 2)\n var *= nsamp_eff / (nsamp_eff - 1)\n scott_factor = np.power(nsamp_eff, -1.0 / 5)\n sig = np.sqrt(var) * scott_factor\n xx, ss = np.meshgrid(x, samples)\n chisquared = ((xx - ss) / sig) ** 2\n energy = np.exp(-0.5 * chisquared) / np.sqrt(2 * np.pi * sig ** 2)\n result = np.sum(energy * weights[:, np.newaxis], axis=0)\n return result\n",
"role": "user"
},
{
"content": "weighted_1d_gaussian_kde",
"role": "assistant"
}
] |
def x(logx, logl):
logw = logx + logl
w_rel = np.exp(logw - logw.max())
w_rel /= np.abs(np.trapz(w_rel, x=logx))
return w_rel
|
rel_posterior_mass
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(logx, logl):\n logw = logx + logl\n w_rel = np.exp(logw - logw.max())\n w_rel /= np.abs(np.trapz(w_rel, x=logx))\n return w_rel\n",
"role": "user"
},
{
"content": "rel_posterior_mass",
"role": "assistant"
}
] |
def x(dict_in, key):
if key is None:
return np.mean(np.concatenate(list(dict_in.values())))
else:
try:
return np.mean(dict_in[key])
except KeyError:
print('method name "' + key + '" not found, so ' +
'normalise area under the analytic relative posterior ' +
'mass curve using the mean of all methods.')
return np.mean(np.concatenate(list(dict_in.values())))
|
average_by_key
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(dict_in, key):\n if key is None:\n return np.mean(np.concatenate(list(dict_in.values())))\n else:\n try:\n return np.mean(dict_in[key])\n except KeyError:\n print('method name \"' + key + '\" not found, so ' +\n 'normalise area under the analytic relative posterior ' +\n 'mass curve using the mean of all methods.')\n return np.mean(np.concatenate(list(dict_in.values())))\n",
"role": "user"
},
{
"content": "average_by_key",
"role": "assistant"
}
] |
def x(file_roots, **kwargs):
base_dir = kwargs.pop('base_dir', 'chains')
process_func = kwargs.pop('process_func', process_polychord_run)
func_kwargs = kwargs.pop('func_kwargs', {})
func_kwargs['errors_to_handle'] = kwargs.pop('errors_to_handle', ())
data = nestcheck.parallel_utils.parallel_apply(process_error_helper,
file_roots, func_args=(base_dir, process_func), func_kwargs=
func_kwargs, **kwargs)
data = sorted(data, key=lambda x: file_roots.index(x['output'][
'file_root']))
errors = {}
for i, run in enumerate(data):
if 'error' in run:
try:
errors[run['error']].append(i)
except KeyError:
errors[run['error']] = [i]
for error_name, index_list in errors.items():
message = error_name + ' processing ' + str(len(index_list)
) + ' / ' + str(len(file_roots)) + ' files'
if len(index_list) != len(file_roots):
message += '. Roots with errors have (zero based) indexes: ' + str(
index_list)
print(message)
return [run for run in data if 'error' not in run]
|
batch_process_data
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(file_roots, **kwargs):\n base_dir = kwargs.pop('base_dir', 'chains')\n process_func = kwargs.pop('process_func', process_polychord_run)\n func_kwargs = kwargs.pop('func_kwargs', {})\n func_kwargs['errors_to_handle'] = kwargs.pop('errors_to_handle', ())\n data = nestcheck.parallel_utils.parallel_apply(process_error_helper,\n file_roots, func_args=(base_dir, process_func), func_kwargs=\n func_kwargs, **kwargs)\n data = sorted(data, key=lambda x: file_roots.index(x['output'][\n 'file_root']))\n errors = {}\n for i, run in enumerate(data):\n if 'error' in run:\n try:\n errors[run['error']].append(i)\n except KeyError:\n errors[run['error']] = [i]\n for error_name, index_list in errors.items():\n message = error_name + ' processing ' + str(len(index_list)\n ) + ' / ' + str(len(file_roots)) + ' files'\n if len(index_list) != len(file_roots):\n message += '. Roots with errors have (zero based) indexes: ' + str(\n index_list)\n print(message)\n return [run for run in data if 'error' not in run]\n",
"role": "user"
},
{
"content": "batch_process_data",
"role": "assistant"
}
] |
def x(root, base_dir, process_func, errors_to_handle=(), **func_kwargs):
try:
return process_func(root, base_dir, **func_kwargs)
except errors_to_handle as err:
run = {'error': type(err).__name__, 'output': {'file_root': root}}
return run
|
process_error_helper
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(root, base_dir, process_func, errors_to_handle=(), **func_kwargs):\n try:\n return process_func(root, base_dir, **func_kwargs)\n except errors_to_handle as err:\n run = {'error': type(err).__name__, 'output': {'file_root': root}}\n return run\n",
"role": "user"
},
{
"content": "process_error_helper",
"role": "assistant"
}
] |
def x(file_root, base_dir, process_stats_file=True, **kwargs):
samples = np.loadtxt(os.path.join(base_dir, file_root) + '_dead-birth.txt')
ns_run = process_samples_array(samples, **kwargs)
ns_run['output'] = {'base_dir': base_dir, 'file_root': file_root}
if process_stats_file:
try:
ns_run['output'] = process_polychord_stats(file_root, base_dir)
except (OSError, IOError, ValueError) as err:
warnings.warn(
'process_polychord_stats raised {} processing {}.stats file. Proceeding without stats.'
.format(type(err).__name__, os.path.join(base_dir,
file_root)), UserWarning)
return ns_run
|
process_polychord_run
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(file_root, base_dir, process_stats_file=True, **kwargs):\n samples = np.loadtxt(os.path.join(base_dir, file_root) + '_dead-birth.txt')\n ns_run = process_samples_array(samples, **kwargs)\n ns_run['output'] = {'base_dir': base_dir, 'file_root': file_root}\n if process_stats_file:\n try:\n ns_run['output'] = process_polychord_stats(file_root, base_dir)\n except (OSError, IOError, ValueError) as err:\n warnings.warn(\n 'process_polychord_stats raised {} processing {}.stats file. Proceeding without stats.'\n .format(type(err).__name__, os.path.join(base_dir,\n file_root)), UserWarning)\n return ns_run\n",
"role": "user"
},
{
"content": "process_polychord_run",
"role": "assistant"
}
] |
def x(file_root, base_dir, **kwargs):
dead = np.loadtxt(os.path.join(base_dir, file_root) + '-dead-birth.txt')
live = np.loadtxt(os.path.join(base_dir, file_root) +
'-phys_live-birth.txt')
dead = dead[:, :-2]
live = live[:, :-1]
assert dead[:, -2].max() < live[:, -2].min(), (
'final live points should have greater logls than any dead point!',
dead, live)
ns_run = process_samples_array(np.vstack((dead, live)), **kwargs)
assert np.all(ns_run['thread_min_max'][:, 0] == -np.inf
), 'As MultiNest does not currently perform dynamic nested sampling, all threads should start by sampling the whole prior.'
ns_run['output'] = {}
ns_run['output']['file_root'] = file_root
ns_run['output']['base_dir'] = base_dir
return ns_run
|
process_multinest_run
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(file_root, base_dir, **kwargs):\n dead = np.loadtxt(os.path.join(base_dir, file_root) + '-dead-birth.txt')\n live = np.loadtxt(os.path.join(base_dir, file_root) +\n '-phys_live-birth.txt')\n dead = dead[:, :-2]\n live = live[:, :-1]\n assert dead[:, -2].max() < live[:, -2].min(), (\n 'final live points should have greater logls than any dead point!',\n dead, live)\n ns_run = process_samples_array(np.vstack((dead, live)), **kwargs)\n assert np.all(ns_run['thread_min_max'][:, 0] == -np.inf\n ), 'As MultiNest does not currently perform dynamic nested sampling, all threads should start by sampling the whole prior.'\n ns_run['output'] = {}\n ns_run['output']['file_root'] = file_root\n ns_run['output']['base_dir'] = base_dir\n return ns_run\n",
"role": "user"
},
{
"content": "process_multinest_run",
"role": "assistant"
}
] |
def x(results):
samples = np.zeros((results.samples.shape[0], results.samples.shape[1] + 3)
)
samples[:, 0] = results.logl
samples[:, 1] = results.samples_id
samples[:, 3:] = results.samples
unique_th, first_inds = np.unique(results.samples_id, return_index=True)
assert np.array_equal(unique_th, np.asarray(range(unique_th.shape[0])))
thread_min_max = np.full((unique_th.shape[0], 2), np.nan)
try:
assert unique_th.shape[0] == results.nlive
assert np.array_equal(np.unique(results.samples_id[-results.nlive:]
), np.asarray(range(results.nlive))
), 'perhaps the final live points are not included?'
thread_min_max[:, 0] = -np.inf
except AttributeError:
assert unique_th.shape[0] == sum(results.batch_nlive)
for th_lab, ind in zip(unique_th, first_inds):
thread_min_max[th_lab, 0] = results.batch_bounds[results.
samples_batch[ind], 0]
for th_lab in unique_th:
final_ind = np.where(results.samples_id == th_lab)[0][-1]
thread_min_max[th_lab, 1] = results.logl[final_ind]
samples[final_ind, 2] = -1
assert np.all(~np.isnan(thread_min_max))
run = nestcheck.ns_run_utils.dict_given_run_array(samples, thread_min_max)
nestcheck.ns_run_utils.check_ns_run(run)
return run
|
process_dynesty_run
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(results):\n samples = np.zeros((results.samples.shape[0], results.samples.shape[1] + 3)\n )\n samples[:, 0] = results.logl\n samples[:, 1] = results.samples_id\n samples[:, 3:] = results.samples\n unique_th, first_inds = np.unique(results.samples_id, return_index=True)\n assert np.array_equal(unique_th, np.asarray(range(unique_th.shape[0])))\n thread_min_max = np.full((unique_th.shape[0], 2), np.nan)\n try:\n assert unique_th.shape[0] == results.nlive\n assert np.array_equal(np.unique(results.samples_id[-results.nlive:]\n ), np.asarray(range(results.nlive))\n ), 'perhaps the final live points are not included?'\n thread_min_max[:, 0] = -np.inf\n except AttributeError:\n assert unique_th.shape[0] == sum(results.batch_nlive)\n for th_lab, ind in zip(unique_th, first_inds):\n thread_min_max[th_lab, 0] = results.batch_bounds[results.\n samples_batch[ind], 0]\n for th_lab in unique_th:\n final_ind = np.where(results.samples_id == th_lab)[0][-1]\n thread_min_max[th_lab, 1] = results.logl[final_ind]\n samples[final_ind, 2] = -1\n assert np.all(~np.isnan(thread_min_max))\n run = nestcheck.ns_run_utils.dict_given_run_array(samples, thread_min_max)\n nestcheck.ns_run_utils.check_ns_run(run)\n return run\n",
"role": "user"
},
{
"content": "process_dynesty_run",
"role": "assistant"
}
] |
def x(file_root, base_dir):
filename = os.path.join(base_dir, file_root) + '.stats'
output = {'base_dir': base_dir, 'file_root': file_root}
with open(filename, 'r') as stats_file:
lines = stats_file.readlines()
output['logZ'] = float(lines[8].split()[2])
output['logZerr'] = float(lines[8].split()[4])
output['logZs'] = []
output['logZerrs'] = []
for line in lines[14:]:
if line[:5] != 'log(Z':
break
output['logZs'].append(float(re.findall('=(.*)', line)[0].split()[0]))
output['logZerrs'].append(float(re.findall('=(.*)', line)[0].split(
)[2]))
nclust = len(output['logZs'])
output['ncluster'] = nclust
output['nposterior'] = int(lines[20 + nclust].split()[1])
output['nequals'] = int(lines[21 + nclust].split()[1])
output['ndead'] = int(lines[22 + nclust].split()[1])
output['nlive'] = int(lines[23 + nclust].split()[1])
try:
output['nlike'] = int(lines[24 + nclust].split()[1])
except ValueError:
output['nlike'] = np.nan
output['avnlike'] = float(lines[25 + nclust].split()[1])
output['avnlikeslice'] = float(lines[25 + nclust].split()[3])
if len(lines) > 29 + nclust:
output['param_means'] = []
output['param_mean_errs'] = []
for line in lines[29 + nclust:]:
output['param_means'].append(float(line.split()[1]))
output['param_mean_errs'].append(float(line.split()[3]))
return output
|
process_polychord_stats
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(file_root, base_dir):\n filename = os.path.join(base_dir, file_root) + '.stats'\n output = {'base_dir': base_dir, 'file_root': file_root}\n with open(filename, 'r') as stats_file:\n lines = stats_file.readlines()\n output['logZ'] = float(lines[8].split()[2])\n output['logZerr'] = float(lines[8].split()[4])\n output['logZs'] = []\n output['logZerrs'] = []\n for line in lines[14:]:\n if line[:5] != 'log(Z':\n break\n output['logZs'].append(float(re.findall('=(.*)', line)[0].split()[0]))\n output['logZerrs'].append(float(re.findall('=(.*)', line)[0].split(\n )[2]))\n nclust = len(output['logZs'])\n output['ncluster'] = nclust\n output['nposterior'] = int(lines[20 + nclust].split()[1])\n output['nequals'] = int(lines[21 + nclust].split()[1])\n output['ndead'] = int(lines[22 + nclust].split()[1])\n output['nlive'] = int(lines[23 + nclust].split()[1])\n try:\n output['nlike'] = int(lines[24 + nclust].split()[1])\n except ValueError:\n output['nlike'] = np.nan\n output['avnlike'] = float(lines[25 + nclust].split()[1])\n output['avnlikeslice'] = float(lines[25 + nclust].split()[3])\n if len(lines) > 29 + nclust:\n output['param_means'] = []\n output['param_mean_errs'] = []\n for line in lines[29 + nclust:]:\n output['param_means'].append(float(line.split()[1]))\n output['param_mean_errs'].append(float(line.split()[3]))\n return output\n",
"role": "user"
},
{
"content": "process_polychord_stats",
"role": "assistant"
}
] |
def x(samples, **kwargs):
samples = samples[np.argsort(samples[:, -2])]
ns_run = {}
ns_run['logl'] = samples[:, -2]
ns_run['theta'] = samples[:, :-2]
birth_contours = samples[:, -1]
birth_inds = birth_inds_given_contours(birth_contours, ns_run['logl'],
**kwargs)
ns_run['thread_labels'] = threads_given_birth_inds(birth_inds)
unique_threads = np.unique(ns_run['thread_labels'])
assert np.array_equal(unique_threads, np.asarray(range(unique_threads.
shape[0])))
thread_min_max = np.zeros((unique_threads.shape[0], 2))
delta_nlive = np.zeros(samples.shape[0] + 1)
for label in unique_threads:
thread_inds = np.where(ns_run['thread_labels'] == label)[0]
thread_min_max[label, 1] = ns_run['logl'][thread_inds[-1]]
thread_start_birth_ind = birth_inds[thread_inds[0]]
delta_nlive[thread_inds[-1] + 1] -= 1
if thread_start_birth_ind == birth_inds[0]:
thread_min_max[label, 0] = -np.inf
delta_nlive[0] += 1
else:
assert thread_start_birth_ind >= 0
thread_min_max[label, 0] = ns_run['logl'][thread_start_birth_ind]
delta_nlive[thread_start_birth_ind + 1] += 1
ns_run['thread_min_max'] = thread_min_max
ns_run['nlive_array'] = np.cumsum(delta_nlive)[:-1]
return ns_run
|
process_samples_array
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(samples, **kwargs):\n samples = samples[np.argsort(samples[:, -2])]\n ns_run = {}\n ns_run['logl'] = samples[:, -2]\n ns_run['theta'] = samples[:, :-2]\n birth_contours = samples[:, -1]\n birth_inds = birth_inds_given_contours(birth_contours, ns_run['logl'],\n **kwargs)\n ns_run['thread_labels'] = threads_given_birth_inds(birth_inds)\n unique_threads = np.unique(ns_run['thread_labels'])\n assert np.array_equal(unique_threads, np.asarray(range(unique_threads.\n shape[0])))\n thread_min_max = np.zeros((unique_threads.shape[0], 2))\n delta_nlive = np.zeros(samples.shape[0] + 1)\n for label in unique_threads:\n thread_inds = np.where(ns_run['thread_labels'] == label)[0]\n thread_min_max[label, 1] = ns_run['logl'][thread_inds[-1]]\n thread_start_birth_ind = birth_inds[thread_inds[0]]\n delta_nlive[thread_inds[-1] + 1] -= 1\n if thread_start_birth_ind == birth_inds[0]:\n thread_min_max[label, 0] = -np.inf\n delta_nlive[0] += 1\n else:\n assert thread_start_birth_ind >= 0\n thread_min_max[label, 0] = ns_run['logl'][thread_start_birth_ind]\n delta_nlive[thread_start_birth_ind + 1] += 1\n ns_run['thread_min_max'] = thread_min_max\n ns_run['nlive_array'] = np.cumsum(delta_nlive)[:-1]\n return ns_run\n",
"role": "user"
},
{
"content": "process_samples_array",
"role": "assistant"
}
] |
def x(birth_logl_arr, logl_arr, **kwargs):
dup_assert = kwargs.pop('dup_assert', False)
dup_warn = kwargs.pop('dup_warn', False)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert logl_arr.ndim == 1, logl_arr.ndim
assert birth_logl_arr.ndim == 1, birth_logl_arr.ndim
nestcheck.ns_run_utils.check_ns_run_logls({'logl': logl_arr},
dup_assert=dup_assert, dup_warn=dup_warn)
state = np.random.get_state()
np.random.seed(0)
init_birth = birth_logl_arr[0]
assert np.all(birth_logl_arr <= logl_arr), logl_arr[birth_logl_arr >
logl_arr]
birth_inds = np.full(birth_logl_arr.shape, np.nan)
birth_inds[birth_logl_arr == init_birth] = -1
for i, birth_logl in enumerate(birth_logl_arr):
if not np.isnan(birth_inds[i]):
continue
dup_deaths = np.where(logl_arr == birth_logl)[0]
if dup_deaths.shape == (1,):
birth_inds[i] = dup_deaths[0]
continue
dup_births = np.where(birth_logl_arr == birth_logl)[0]
assert dup_deaths.shape[0] > 1, dup_deaths
if np.all(birth_logl_arr[dup_deaths] != birth_logl):
np.random.shuffle(dup_deaths)
inds_to_use = dup_deaths
else:
try:
inds_to_use = sample_less_than_condition(dup_deaths, dup_births
)
except ValueError:
raise ValueError(
'There is no way to allocate indexes dup_deaths={} such that each is less than dup_births={}.'
.format(dup_deaths, dup_births))
try:
birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]]
except ValueError:
warnings.warn(
'for logl={}, the number of points born (indexes={}) is bigger than the number of points dying (indexes={}). This indicates a problem with your nested sampling software - it may be caused by a bug in PolyChord which was fixed in PolyChord v1.14, so try upgrading. I will try to give an approximate allocation of threads but this may fail.'
.format(birth_logl, dup_births, inds_to_use), UserWarning)
extra_inds = np.random.choice(inds_to_use, size=dup_births.
shape[0] - inds_to_use.shape[0])
inds_to_use = np.concatenate((inds_to_use, extra_inds))
np.random.shuffle(inds_to_use)
birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]]
assert np.all(~np.isnan(birth_inds)), np.isnan(birth_inds).sum()
np.random.set_state(state)
return birth_inds.astype(int)
|
birth_inds_given_contours
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(birth_logl_arr, logl_arr, **kwargs):\n dup_assert = kwargs.pop('dup_assert', False)\n dup_warn = kwargs.pop('dup_warn', False)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n assert logl_arr.ndim == 1, logl_arr.ndim\n assert birth_logl_arr.ndim == 1, birth_logl_arr.ndim\n nestcheck.ns_run_utils.check_ns_run_logls({'logl': logl_arr},\n dup_assert=dup_assert, dup_warn=dup_warn)\n state = np.random.get_state()\n np.random.seed(0)\n init_birth = birth_logl_arr[0]\n assert np.all(birth_logl_arr <= logl_arr), logl_arr[birth_logl_arr >\n logl_arr]\n birth_inds = np.full(birth_logl_arr.shape, np.nan)\n birth_inds[birth_logl_arr == init_birth] = -1\n for i, birth_logl in enumerate(birth_logl_arr):\n if not np.isnan(birth_inds[i]):\n continue\n dup_deaths = np.where(logl_arr == birth_logl)[0]\n if dup_deaths.shape == (1,):\n birth_inds[i] = dup_deaths[0]\n continue\n dup_births = np.where(birth_logl_arr == birth_logl)[0]\n assert dup_deaths.shape[0] > 1, dup_deaths\n if np.all(birth_logl_arr[dup_deaths] != birth_logl):\n np.random.shuffle(dup_deaths)\n inds_to_use = dup_deaths\n else:\n try:\n inds_to_use = sample_less_than_condition(dup_deaths, dup_births\n )\n except ValueError:\n raise ValueError(\n 'There is no way to allocate indexes dup_deaths={} such that each is less than dup_births={}.'\n .format(dup_deaths, dup_births))\n try:\n birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]]\n except ValueError:\n warnings.warn(\n 'for logl={}, the number of points born (indexes={}) is bigger than the number of points dying (indexes={}). This indicates a problem with your nested sampling software - it may be caused by a bug in PolyChord which was fixed in PolyChord v1.14, so try upgrading. I will try to give an approximate allocation of threads but this may fail.'\n .format(birth_logl, dup_births, inds_to_use), UserWarning)\n extra_inds = np.random.choice(inds_to_use, size=dup_births.\n shape[0] - inds_to_use.shape[0])\n inds_to_use = np.concatenate((inds_to_use, extra_inds))\n np.random.shuffle(inds_to_use)\n birth_inds[dup_births] = inds_to_use[:dup_births.shape[0]]\n assert np.all(~np.isnan(birth_inds)), np.isnan(birth_inds).sum()\n np.random.set_state(state)\n return birth_inds.astype(int)\n",
"role": "user"
},
{
"content": "birth_inds_given_contours",
"role": "assistant"
}
] |
def x(choices_in, condition):
output = np.zeros(min(condition.shape[0], choices_in.shape[0]))
choices = copy.deepcopy(choices_in)
for i, _ in enumerate(output):
avail_inds = np.where(choices < condition[i])[0]
selected_ind = np.random.choice(avail_inds)
output[i] = choices[selected_ind]
choices = np.delete(choices, selected_ind)
return output
|
sample_less_than_condition
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(choices_in, condition):\n output = np.zeros(min(condition.shape[0], choices_in.shape[0]))\n choices = copy.deepcopy(choices_in)\n for i, _ in enumerate(output):\n avail_inds = np.where(choices < condition[i])[0]\n selected_ind = np.random.choice(avail_inds)\n output[i] = choices[selected_ind]\n choices = np.delete(choices, selected_ind)\n return output\n",
"role": "user"
},
{
"content": "sample_less_than_condition",
"role": "assistant"
}
] |
def x(birth_inds):
unique, counts = np.unique(birth_inds, return_counts=True)
thread_start_inds = np.concatenate((unique[:1], unique[1:][counts[1:] > 1])
)
thread_start_counts = np.concatenate((counts[:1], counts[1:][counts[1:] >
1] - 1))
thread_labels = np.full(birth_inds.shape, np.nan)
thread_num = 0
for nmulti, multi in enumerate(thread_start_inds):
for i, start_ind in enumerate(np.where(birth_inds == multi)[0]):
if i != 0 or nmulti == 0:
assert np.isnan(thread_labels[start_ind])
thread_labels[start_ind] = thread_num
next_ind = np.where(birth_inds == start_ind)[0]
while next_ind.shape != (0,):
assert np.isnan(thread_labels[next_ind[0]])
thread_labels[next_ind[0]] = thread_num
next_ind = np.where(birth_inds == next_ind[0])[0]
thread_num += 1
if not np.all(~np.isnan(thread_labels)):
warnings.warn(
"""{} points (out of a total of {}) were not given a thread label! This is likely due to small numerical errors in your nested sampling software while running the calculation or writing the input files. I will try to give an approximate answer by randomly assigning these points to threads.
Indexes without labels are {}
Indexes on which threads start are {} with {} threads starting on each."""
.format(np.isnan(thread_labels).sum(), birth_inds.shape[0], np.
where(np.isnan(thread_labels))[0], thread_start_inds,
thread_start_counts))
inds = np.where(np.isnan(thread_labels))[0]
state = np.random.get_state()
np.random.seed(0)
for ind in inds:
labels_to_choose = np.intersect1d(thread_labels[:ind],
thread_labels[ind + 1:])
if labels_to_choose.shape[0] == 0:
labels_to_choose = np.unique(thread_labels[~np.isnan(
thread_labels)])
thread_labels[ind] = np.random.choice(labels_to_choose)
np.random.set_state(state)
assert np.all(~np.isnan(thread_labels)
), '{} points still do not have thread labels'.format(np.isnan(
thread_labels).sum())
assert np.array_equal(thread_labels, thread_labels.astype(int)
), 'Thread labels should all be ints!'
thread_labels = thread_labels.astype(int)
assert np.array_equal(np.unique(thread_labels), np.asarray(range(sum(
thread_start_counts)))), str(np.unique(thread_labels)
) + ' is not equal to range(' + str(sum(thread_start_counts)) + ')'
return thread_labels
|
threads_given_birth_inds
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(birth_inds):\n unique, counts = np.unique(birth_inds, return_counts=True)\n thread_start_inds = np.concatenate((unique[:1], unique[1:][counts[1:] > 1])\n )\n thread_start_counts = np.concatenate((counts[:1], counts[1:][counts[1:] >\n 1] - 1))\n thread_labels = np.full(birth_inds.shape, np.nan)\n thread_num = 0\n for nmulti, multi in enumerate(thread_start_inds):\n for i, start_ind in enumerate(np.where(birth_inds == multi)[0]):\n if i != 0 or nmulti == 0:\n assert np.isnan(thread_labels[start_ind])\n thread_labels[start_ind] = thread_num\n next_ind = np.where(birth_inds == start_ind)[0]\n while next_ind.shape != (0,):\n assert np.isnan(thread_labels[next_ind[0]])\n thread_labels[next_ind[0]] = thread_num\n next_ind = np.where(birth_inds == next_ind[0])[0]\n thread_num += 1\n if not np.all(~np.isnan(thread_labels)):\n warnings.warn(\n \"\"\"{} points (out of a total of {}) were not given a thread label! This is likely due to small numerical errors in your nested sampling software while running the calculation or writing the input files. I will try to give an approximate answer by randomly assigning these points to threads.\nIndexes without labels are {}\nIndexes on which threads start are {} with {} threads starting on each.\"\"\"\n .format(np.isnan(thread_labels).sum(), birth_inds.shape[0], np.\n where(np.isnan(thread_labels))[0], thread_start_inds,\n thread_start_counts))\n inds = np.where(np.isnan(thread_labels))[0]\n state = np.random.get_state()\n np.random.seed(0)\n for ind in inds:\n labels_to_choose = np.intersect1d(thread_labels[:ind],\n thread_labels[ind + 1:])\n if labels_to_choose.shape[0] == 0:\n labels_to_choose = np.unique(thread_labels[~np.isnan(\n thread_labels)])\n thread_labels[ind] = np.random.choice(labels_to_choose)\n np.random.set_state(state)\n assert np.all(~np.isnan(thread_labels)\n ), '{} points still do not have thread labels'.format(np.isnan(\n thread_labels).sum())\n assert np.array_equal(thread_labels, thread_labels.astype(int)\n ), 'Thread labels should all be ints!'\n thread_labels = thread_labels.astype(int)\n assert np.array_equal(np.unique(thread_labels), np.asarray(range(sum(\n thread_start_counts)))), str(np.unique(thread_labels)\n ) + ' is not equal to range(' + str(sum(thread_start_counts)) + ')'\n return thread_labels\n",
"role": "user"
},
{
"content": "threads_given_birth_inds",
"role": "assistant"
}
] |
def x(func, *arg_iterable, **kwargs):
chunksize = kwargs.pop('chunksize', 1)
func_pre_args = kwargs.pop('func_pre_args', ())
func_kwargs = kwargs.pop('func_kwargs', {})
max_workers = kwargs.pop('max_workers', None)
parallel = kwargs.pop('parallel', True)
parallel_warning = kwargs.pop('parallel_warning', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
func_to_map = functools.partial(func, *func_pre_args, **func_kwargs)
if parallel:
pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers)
return list(pool.map(func_to_map, *arg_iterable, chunksize=chunksize))
else:
if parallel_warning:
warnings.warn(
'parallel_map has parallel=False - turn on parallelisation for faster processing'
, UserWarning)
return list(map(func_to_map, *arg_iterable))
|
parallel_map
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(func, *arg_iterable, **kwargs):\n chunksize = kwargs.pop('chunksize', 1)\n func_pre_args = kwargs.pop('func_pre_args', ())\n func_kwargs = kwargs.pop('func_kwargs', {})\n max_workers = kwargs.pop('max_workers', None)\n parallel = kwargs.pop('parallel', True)\n parallel_warning = kwargs.pop('parallel_warning', True)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n func_to_map = functools.partial(func, *func_pre_args, **func_kwargs)\n if parallel:\n pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers)\n return list(pool.map(func_to_map, *arg_iterable, chunksize=chunksize))\n else:\n if parallel_warning:\n warnings.warn(\n 'parallel_map has parallel=False - turn on parallelisation for faster processing'\n , UserWarning)\n return list(map(func_to_map, *arg_iterable))\n",
"role": "user"
},
{
"content": "parallel_map",
"role": "assistant"
}
] |
def x(func, arg_iterable, **kwargs):
max_workers = kwargs.pop('max_workers', None)
parallel = kwargs.pop('parallel', True)
parallel_warning = kwargs.pop('parallel_warning', True)
func_args = kwargs.pop('func_args', ())
func_pre_args = kwargs.pop('func_pre_args', ())
func_kwargs = kwargs.pop('func_kwargs', {})
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if 'leave' not in tqdm_kwargs:
tqdm_kwargs['leave'] = False
assert isinstance(func_args, tuple), str(func_args) + ' is type ' + str(
type(func_args))
assert isinstance(func_pre_args, tuple), str(func_pre_args
) + ' is type ' + str(type(func_pre_args))
progress = select_tqdm()
if not parallel:
if parallel_warning:
warnings.warn(
'parallel_map has parallel=False - turn on parallelisation for faster processing'
, UserWarning)
return [func(*(func_pre_args + (x,) + func_args), **func_kwargs) for
x in progress(arg_iterable, **tqdm_kwargs)]
else:
pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers)
futures = []
for element in arg_iterable:
futures.append(pool.submit(func, *(func_pre_args + (element,) +
func_args), **func_kwargs))
results = []
for fut in progress(concurrent.futures.as_completed(futures), total
=len(arg_iterable), **tqdm_kwargs):
results.append(fut.result())
return results
|
parallel_apply
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(func, arg_iterable, **kwargs):\n max_workers = kwargs.pop('max_workers', None)\n parallel = kwargs.pop('parallel', True)\n parallel_warning = kwargs.pop('parallel_warning', True)\n func_args = kwargs.pop('func_args', ())\n func_pre_args = kwargs.pop('func_pre_args', ())\n func_kwargs = kwargs.pop('func_kwargs', {})\n tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n if 'leave' not in tqdm_kwargs:\n tqdm_kwargs['leave'] = False\n assert isinstance(func_args, tuple), str(func_args) + ' is type ' + str(\n type(func_args))\n assert isinstance(func_pre_args, tuple), str(func_pre_args\n ) + ' is type ' + str(type(func_pre_args))\n progress = select_tqdm()\n if not parallel:\n if parallel_warning:\n warnings.warn(\n 'parallel_map has parallel=False - turn on parallelisation for faster processing'\n , UserWarning)\n return [func(*(func_pre_args + (x,) + func_args), **func_kwargs) for\n x in progress(arg_iterable, **tqdm_kwargs)]\n else:\n pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers)\n futures = []\n for element in arg_iterable:\n futures.append(pool.submit(func, *(func_pre_args + (element,) +\n func_args), **func_kwargs))\n results = []\n for fut in progress(concurrent.futures.as_completed(futures), total\n =len(arg_iterable), **tqdm_kwargs):\n results.append(fut.result())\n return results\n",
"role": "user"
},
{
"content": "parallel_apply",
"role": "assistant"
}
] |
def x():
try:
progress = tqdm.tqdm_notebook
assert get_ipython().has_trait('kernel')
except (NameError, AssertionError):
progress = tqdm.tqdm
return progress
|
select_tqdm
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x():\n try:\n progress = tqdm.tqdm_notebook\n assert get_ipython().has_trait('kernel')\n except (NameError, AssertionError):\n progress = tqdm.tqdm\n return progress\n",
"role": "user"
},
{
"content": "select_tqdm",
"role": "assistant"
}
] |
def x(results_array, names, axis=0, **kwargs):
assert axis == 0 or axis == 1
df = pd.DataFrame(results_array)
if axis == 1:
df = df.T
df.columns = names
return summary_df(df, **kwargs)
|
summary_df_from_array
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(results_array, names, axis=0, **kwargs):\n assert axis == 0 or axis == 1\n df = pd.DataFrame(results_array)\n if axis == 1:\n df = df.T\n df.columns = names\n return summary_df(df, **kwargs)\n",
"role": "user"
},
{
"content": "summary_df_from_array",
"role": "assistant"
}
] |
def x(results_list, names, **kwargs):
for arr in results_list:
assert arr.shape == (len(names),)
df = pd.DataFrame(np.stack(results_list, axis=0))
df.columns = names
return summary_df(df, **kwargs)
|
summary_df_from_list
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(results_list, names, **kwargs):\n for arr in results_list:\n assert arr.shape == (len(names),)\n df = pd.DataFrame(np.stack(results_list, axis=0))\n df.columns = names\n return summary_df(df, **kwargs)\n",
"role": "user"
},
{
"content": "summary_df_from_list",
"role": "assistant"
}
] |
def x(multi_in, inds_to_keep=None, **kwargs):
include_true_values = kwargs.pop('include_true_values', False)
true_values = kwargs.get('true_values', None)
if inds_to_keep is None:
inds_to_keep = list(multi_in.index.names)[:-1]
if 'calculation type' not in inds_to_keep:
df = multi_in.groupby(inds_to_keep).apply(summary_df,
include_true_values=False, **kwargs)
else:
inds_to_keep = [(lev if lev != 'calculation type' else
'calculation type temp') for lev in inds_to_keep]
multi_temp = copy.deepcopy(multi_in)
multi_temp.index.set_names([(lev if lev != 'calculation type' else
'calculation type temp') for lev in list(multi_temp.index.names
)], inplace=True)
df = multi_temp.groupby(inds_to_keep).apply(summary_df,
include_true_values=False, **kwargs)
ind = df.index.get_level_values('calculation type temp'
) + ' ' + df.index.get_level_values('calculation type')
order = list(df.index.names)
order.remove('calculation type temp')
df.index = df.index.droplevel(['calculation type',
'calculation type temp'])
df['calculation type'] = list(ind)
df.set_index('calculation type', append=True, inplace=True)
df = df.reorder_levels(order)
if include_true_values:
assert true_values is not None
tv_ind = [('true values' if name == 'calculation type' else '') for
name in df.index.names[:-1]] + ['value']
df.loc[tuple(tv_ind), :] = true_values
return df
|
summary_df_from_multi
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(multi_in, inds_to_keep=None, **kwargs):\n include_true_values = kwargs.pop('include_true_values', False)\n true_values = kwargs.get('true_values', None)\n if inds_to_keep is None:\n inds_to_keep = list(multi_in.index.names)[:-1]\n if 'calculation type' not in inds_to_keep:\n df = multi_in.groupby(inds_to_keep).apply(summary_df,\n include_true_values=False, **kwargs)\n else:\n inds_to_keep = [(lev if lev != 'calculation type' else\n 'calculation type temp') for lev in inds_to_keep]\n multi_temp = copy.deepcopy(multi_in)\n multi_temp.index.set_names([(lev if lev != 'calculation type' else\n 'calculation type temp') for lev in list(multi_temp.index.names\n )], inplace=True)\n df = multi_temp.groupby(inds_to_keep).apply(summary_df,\n include_true_values=False, **kwargs)\n ind = df.index.get_level_values('calculation type temp'\n ) + ' ' + df.index.get_level_values('calculation type')\n order = list(df.index.names)\n order.remove('calculation type temp')\n df.index = df.index.droplevel(['calculation type',\n 'calculation type temp'])\n df['calculation type'] = list(ind)\n df.set_index('calculation type', append=True, inplace=True)\n df = df.reorder_levels(order)\n if include_true_values:\n assert true_values is not None\n tv_ind = [('true values' if name == 'calculation type' else '') for\n name in df.index.names[:-1]] + ['value']\n df.loc[tuple(tv_ind), :] = true_values\n return df\n",
"role": "user"
},
{
"content": "summary_df_from_multi",
"role": "assistant"
}
] |
def x(df_in, **kwargs):
true_values = kwargs.pop('true_values', None)
include_true_values = kwargs.pop('include_true_values', False)
include_rmse = kwargs.pop('include_rmse', False)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if true_values is not None:
assert true_values.shape[0] == df_in.shape[1
], 'There should be one true value for every column! true_values.shape=' + str(
true_values.shape) + ', df_in.shape=' + str(df_in.shape)
df = pd.DataFrame([df_in.mean(axis=0), df_in.std(axis=0, ddof=1)],
index=['mean', 'std'])
if include_true_values:
assert true_values is not None
df.loc['true values'] = true_values
df.index = pd.CategoricalIndex(df.index.values, ordered=True,
categories=['true values', 'mean', 'std', 'rmse'], name=
'calculation type')
num_cals = df_in.shape[0]
mean_unc = df.loc['std'] / np.sqrt(num_cals)
std_unc = df.loc['std'] * np.sqrt(1 / (2 * (num_cals - 1)))
df['result type'] = pd.Categorical(['value'] * df.shape[0], ordered=
True, categories=['value', 'uncertainty'])
df.set_index(['result type'], drop=True, append=True, inplace=True)
df.loc[('mean', 'uncertainty'), :] = mean_unc.values
df.loc[('std', 'uncertainty'), :] = std_unc.values
if include_rmse:
assert true_values is not None, 'Need to input true values for RMSE!'
rmse, rmse_unc = rmse_and_unc(df_in.values, true_values)
df.loc[('rmse', 'value'), :] = rmse
df.loc[('rmse', 'uncertainty'), :] = rmse_unc
df.sort_index(inplace=True)
df.set_index([df.index.get_level_values('calculation type').astype(str),
df.index.get_level_values('result type')], inplace=True)
return df
|
summary_df
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(df_in, **kwargs):\n true_values = kwargs.pop('true_values', None)\n include_true_values = kwargs.pop('include_true_values', False)\n include_rmse = kwargs.pop('include_rmse', False)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n if true_values is not None:\n assert true_values.shape[0] == df_in.shape[1\n ], 'There should be one true value for every column! true_values.shape=' + str(\n true_values.shape) + ', df_in.shape=' + str(df_in.shape)\n df = pd.DataFrame([df_in.mean(axis=0), df_in.std(axis=0, ddof=1)],\n index=['mean', 'std'])\n if include_true_values:\n assert true_values is not None\n df.loc['true values'] = true_values\n df.index = pd.CategoricalIndex(df.index.values, ordered=True,\n categories=['true values', 'mean', 'std', 'rmse'], name=\n 'calculation type')\n num_cals = df_in.shape[0]\n mean_unc = df.loc['std'] / np.sqrt(num_cals)\n std_unc = df.loc['std'] * np.sqrt(1 / (2 * (num_cals - 1)))\n df['result type'] = pd.Categorical(['value'] * df.shape[0], ordered=\n True, categories=['value', 'uncertainty'])\n df.set_index(['result type'], drop=True, append=True, inplace=True)\n df.loc[('mean', 'uncertainty'), :] = mean_unc.values\n df.loc[('std', 'uncertainty'), :] = std_unc.values\n if include_rmse:\n assert true_values is not None, 'Need to input true values for RMSE!'\n rmse, rmse_unc = rmse_and_unc(df_in.values, true_values)\n df.loc[('rmse', 'value'), :] = rmse\n df.loc[('rmse', 'uncertainty'), :] = rmse_unc\n df.sort_index(inplace=True)\n df.set_index([df.index.get_level_values('calculation type').astype(str),\n df.index.get_level_values('result type')], inplace=True)\n return df\n",
"role": "user"
},
{
"content": "summary_df",
"role": "assistant"
}
] |
def x(method_names, method_values, est_names, **kwargs):
true_values = kwargs.pop('true_values', None)
include_true_values = kwargs.pop('include_true_values', False)
include_rmse = kwargs.pop('include_rmse', False)
adjust_nsamp = kwargs.pop('adjust_nsamp', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if adjust_nsamp is not None:
assert adjust_nsamp.shape == (len(method_names),)
assert len(method_names) == len(method_values)
df_dict = {}
for i, method_name in enumerate(method_names):
df = summary_df_from_list(method_values[i], est_names, true_values=
true_values, include_true_values=False, include_rmse=include_rmse)
if i != 0:
stats = ['std']
if include_rmse:
stats.append('rmse')
if adjust_nsamp is not None:
adjust = adjust_nsamp[0] / adjust_nsamp[i]
else:
adjust = 1
for stat in stats:
gain, gain_unc = get_eff_gain(df_dict[method_names[0]].loc[
stat, 'value'], df_dict[method_names[0]].loc[stat,
'uncertainty'], df.loc[stat, 'value'], df.loc[stat,
'uncertainty'], adjust=adjust)
key = stat + ' efficiency gain'
df.loc[(key, 'value'), :] = gain
df.loc[(key, 'uncertainty'), :] = gain_unc
df_dict[method_name] = df
results = pd.concat(df_dict)
results.index.rename('dynamic settings', level=0, inplace=True)
new_ind = []
new_ind.append(pd.CategoricalIndex(results.index.get_level_values(
'calculation type'), ordered=True, categories=['true values',
'mean', 'std', 'rmse', 'std efficiency gain', 'rmse efficiency gain']))
new_ind.append(pd.CategoricalIndex(results.index.get_level_values(
'dynamic settings'), ordered=True, categories=[''] + method_names))
new_ind.append(results.index.get_level_values('result type'))
results.set_index(new_ind, inplace=True)
if include_true_values:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=
'indexing past lexsort depth may impact performance.')
results.loc[('true values', '', 'value'), :] = true_values
results.sort_index(inplace=True)
return results
|
efficiency_gain_df
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(method_names, method_values, est_names, **kwargs):\n true_values = kwargs.pop('true_values', None)\n include_true_values = kwargs.pop('include_true_values', False)\n include_rmse = kwargs.pop('include_rmse', False)\n adjust_nsamp = kwargs.pop('adjust_nsamp', None)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n if adjust_nsamp is not None:\n assert adjust_nsamp.shape == (len(method_names),)\n assert len(method_names) == len(method_values)\n df_dict = {}\n for i, method_name in enumerate(method_names):\n df = summary_df_from_list(method_values[i], est_names, true_values=\n true_values, include_true_values=False, include_rmse=include_rmse)\n if i != 0:\n stats = ['std']\n if include_rmse:\n stats.append('rmse')\n if adjust_nsamp is not None:\n adjust = adjust_nsamp[0] / adjust_nsamp[i]\n else:\n adjust = 1\n for stat in stats:\n gain, gain_unc = get_eff_gain(df_dict[method_names[0]].loc[\n stat, 'value'], df_dict[method_names[0]].loc[stat,\n 'uncertainty'], df.loc[stat, 'value'], df.loc[stat,\n 'uncertainty'], adjust=adjust)\n key = stat + ' efficiency gain'\n df.loc[(key, 'value'), :] = gain\n df.loc[(key, 'uncertainty'), :] = gain_unc\n df_dict[method_name] = df\n results = pd.concat(df_dict)\n results.index.rename('dynamic settings', level=0, inplace=True)\n new_ind = []\n new_ind.append(pd.CategoricalIndex(results.index.get_level_values(\n 'calculation type'), ordered=True, categories=['true values',\n 'mean', 'std', 'rmse', 'std efficiency gain', 'rmse efficiency gain']))\n new_ind.append(pd.CategoricalIndex(results.index.get_level_values(\n 'dynamic settings'), ordered=True, categories=[''] + method_names))\n new_ind.append(results.index.get_level_values('result type'))\n results.set_index(new_ind, inplace=True)\n if include_true_values:\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', message=\n 'indexing past lexsort depth may impact performance.')\n results.loc[('true values', '', 'value'), :] = true_values\n results.sort_index(inplace=True)\n return results\n",
"role": "user"
},
{
"content": "efficiency_gain_df",
"role": "assistant"
}
] |
def x(eff_gain_df):
idxs = pd.IndexSlice[['std', 'std efficiency gain'], :, :]
paper_df = copy.deepcopy(eff_gain_df.loc[idxs, :])
means = eff_gain_df.xs('mean', level='calculation type').xs('value',
level='result type')
for col in ['samples', 'likelihood calls']:
try:
col_vals = []
for val in means[col].values:
col_vals += [int(np.rint(val)), np.nan]
col_vals += [np.nan] * (paper_df.shape[0] - len(col_vals))
paper_df[col] = col_vals
except KeyError:
pass
row_name_map = {'std efficiency gain': 'Efficiency gain',
'St.Dev. efficiency gain': 'Efficiency gain', 'dynamic ': '', 'std':
'St.Dev.'}
row_names = paper_df.index.get_level_values(0).astype(str
) + ' ' + paper_df.index.get_level_values(1).astype(str)
for key, value in row_name_map.items():
row_names = row_names.str.replace(key, value)
paper_df.index = [row_names, paper_df.index.get_level_values(2)]
return paper_df
|
paper_format_efficiency_gain_df
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(eff_gain_df):\n idxs = pd.IndexSlice[['std', 'std efficiency gain'], :, :]\n paper_df = copy.deepcopy(eff_gain_df.loc[idxs, :])\n means = eff_gain_df.xs('mean', level='calculation type').xs('value',\n level='result type')\n for col in ['samples', 'likelihood calls']:\n try:\n col_vals = []\n for val in means[col].values:\n col_vals += [int(np.rint(val)), np.nan]\n col_vals += [np.nan] * (paper_df.shape[0] - len(col_vals))\n paper_df[col] = col_vals\n except KeyError:\n pass\n row_name_map = {'std efficiency gain': 'Efficiency gain',\n 'St.Dev. efficiency gain': 'Efficiency gain', 'dynamic ': '', 'std':\n 'St.Dev.'}\n row_names = paper_df.index.get_level_values(0).astype(str\n ) + ' ' + paper_df.index.get_level_values(1).astype(str)\n for key, value in row_name_map.items():\n row_names = row_names.str.replace(key, value)\n paper_df.index = [row_names, paper_df.index.get_level_values(2)]\n return paper_df\n",
"role": "user"
},
{
"content": "paper_format_efficiency_gain_df",
"role": "assistant"
}
] |
def x(base_std, base_std_unc, meth_std, meth_std_unc, adjust=1):
ratio = base_std / meth_std
ratio_unc = array_ratio_std(base_std, base_std_unc, meth_std, meth_std_unc)
gain = ratio ** 2
gain_unc = 2 * ratio * ratio_unc
gain *= adjust
gain_unc *= adjust
return gain, gain_unc
|
get_eff_gain
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(base_std, base_std_unc, meth_std, meth_std_unc, adjust=1):\n ratio = base_std / meth_std\n ratio_unc = array_ratio_std(base_std, base_std_unc, meth_std, meth_std_unc)\n gain = ratio ** 2\n gain_unc = 2 * ratio * ratio_unc\n gain *= adjust\n gain_unc *= adjust\n return gain, gain_unc\n",
"role": "user"
},
{
"content": "get_eff_gain",
"role": "assistant"
}
] |
def x(values_array, true_values):
assert true_values.shape == (values_array.shape[1],)
errors = values_array - true_values[np.newaxis, :]
sq_errors = errors ** 2
sq_errors_mean = np.mean(sq_errors, axis=0)
sq_errors_mean_unc = np.std(sq_errors, axis=0, ddof=1) / np.sqrt(sq_errors
.shape[0])
rmse = np.sqrt(sq_errors_mean)
rmse_unc = 0.5 * (1 / rmse) * sq_errors_mean_unc
return rmse, rmse_unc
|
rmse_and_unc
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(values_array, true_values):\n assert true_values.shape == (values_array.shape[1],)\n errors = values_array - true_values[np.newaxis, :]\n sq_errors = errors ** 2\n sq_errors_mean = np.mean(sq_errors, axis=0)\n sq_errors_mean_unc = np.std(sq_errors, axis=0, ddof=1) / np.sqrt(sq_errors\n .shape[0])\n rmse = np.sqrt(sq_errors_mean)\n rmse_unc = 0.5 * (1 / rmse) * sq_errors_mean_unc\n return rmse, rmse_unc\n",
"role": "user"
},
{
"content": "rmse_and_unc",
"role": "assistant"
}
] |
def x(values_n, sigmas_n, values_d, sigmas_d):
std = np.sqrt((sigmas_n / values_n) ** 2 + (sigmas_d / values_d) ** 2)
std *= values_n / values_d
return std
|
array_ratio_std
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(values_n, sigmas_n, values_d, sigmas_d):\n std = np.sqrt((sigmas_n / values_n) ** 2 + (sigmas_d / values_d) ** 2)\n std *= values_n / values_d\n return std\n",
"role": "user"
},
{
"content": "array_ratio_std",
"role": "assistant"
}
] |
def x(ns_run, estimator_list, simulate=False):
logw = get_logw(ns_run, simulate=simulate)
output = np.zeros(len(estimator_list))
for i, est in enumerate(estimator_list):
output[i] = est(ns_run, logw=logw)
return output
|
run_estimators
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run, estimator_list, simulate=False):\n logw = get_logw(ns_run, simulate=simulate)\n output = np.zeros(len(estimator_list))\n for i, est in enumerate(estimator_list):\n output[i] = est(ns_run, logw=logw)\n return output\n",
"role": "user"
},
{
"content": "run_estimators",
"role": "assistant"
}
] |
def x(ns_run):
samples = np.zeros((ns_run['logl'].shape[0], 3 + ns_run['theta'].shape[1]))
samples[:, 0] = ns_run['logl']
samples[:, 1] = ns_run['thread_labels']
samples[:-1, 2] = np.diff(ns_run['nlive_array'])
samples[-1, 2] = -1
samples[:, 3:] = ns_run['theta']
return samples
|
array_given_run
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run):\n samples = np.zeros((ns_run['logl'].shape[0], 3 + ns_run['theta'].shape[1]))\n samples[:, 0] = ns_run['logl']\n samples[:, 1] = ns_run['thread_labels']\n samples[:-1, 2] = np.diff(ns_run['nlive_array'])\n samples[-1, 2] = -1\n samples[:, 3:] = ns_run['theta']\n return samples\n",
"role": "user"
},
{
"content": "array_given_run",
"role": "assistant"
}
] |
def x(samples, thread_min_max):
ns_run = {'logl': samples[:, 0], 'thread_labels': samples[:, 1],
'thread_min_max': thread_min_max, 'theta': samples[:, 3:]}
if np.all(~np.isnan(ns_run['thread_labels'])):
ns_run['thread_labels'] = ns_run['thread_labels'].astype(int)
assert np.array_equal(samples[:, 1], ns_run['thread_labels']
), """Casting thread labels from samples array to int has changed their values!
samples[:, 1]={}
thread_labels={}""".format(
samples[:, 1], ns_run['thread_labels'])
nlive_0 = (thread_min_max[:, 0] <= ns_run['logl'].min()).sum()
assert nlive_0 > 0, 'nlive_0={}'.format(nlive_0)
nlive_array = np.zeros(samples.shape[0]) + nlive_0
nlive_array[1:] += np.cumsum(samples[:-1, 2])
dup_th_starts = (thread_min_max[:, 0] == ns_run['logl'].min()).sum()
if dup_th_starts > 1:
nlive_array += 1 - nlive_array[-1]
n_logl_min = (ns_run['logl'] == ns_run['logl'].min()).sum()
nlive_array[:n_logl_min] = nlive_0
warnings.warn(
'duplicate starting logls: {} threads start at logl.min()={}, and {} points have logl=logl.min(). nlive_array may only be approximately correct.'
.format(dup_th_starts, ns_run['logl'].min(), n_logl_min),
UserWarning)
assert nlive_array.min(
) > 0, """nlive contains 0s or negative values. nlive_0={}
nlive_array = {}
thread_min_max={}""".format(
nlive_0, nlive_array, thread_min_max)
assert nlive_array[-1
] == 1, 'final point in nlive_array != 1.\nnlive_array = ' + str(
nlive_array)
ns_run['nlive_array'] = nlive_array
return ns_run
|
dict_given_run_array
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(samples, thread_min_max):\n ns_run = {'logl': samples[:, 0], 'thread_labels': samples[:, 1],\n 'thread_min_max': thread_min_max, 'theta': samples[:, 3:]}\n if np.all(~np.isnan(ns_run['thread_labels'])):\n ns_run['thread_labels'] = ns_run['thread_labels'].astype(int)\n assert np.array_equal(samples[:, 1], ns_run['thread_labels']\n ), \"\"\"Casting thread labels from samples array to int has changed their values!\nsamples[:, 1]={}\nthread_labels={}\"\"\".format(\n samples[:, 1], ns_run['thread_labels'])\n nlive_0 = (thread_min_max[:, 0] <= ns_run['logl'].min()).sum()\n assert nlive_0 > 0, 'nlive_0={}'.format(nlive_0)\n nlive_array = np.zeros(samples.shape[0]) + nlive_0\n nlive_array[1:] += np.cumsum(samples[:-1, 2])\n dup_th_starts = (thread_min_max[:, 0] == ns_run['logl'].min()).sum()\n if dup_th_starts > 1:\n nlive_array += 1 - nlive_array[-1]\n n_logl_min = (ns_run['logl'] == ns_run['logl'].min()).sum()\n nlive_array[:n_logl_min] = nlive_0\n warnings.warn(\n 'duplicate starting logls: {} threads start at logl.min()={}, and {} points have logl=logl.min(). nlive_array may only be approximately correct.'\n .format(dup_th_starts, ns_run['logl'].min(), n_logl_min),\n UserWarning)\n assert nlive_array.min(\n ) > 0, \"\"\"nlive contains 0s or negative values. nlive_0={}\nnlive_array = {}\nthread_min_max={}\"\"\".format(\n nlive_0, nlive_array, thread_min_max)\n assert nlive_array[-1\n ] == 1, 'final point in nlive_array != 1.\\nnlive_array = ' + str(\n nlive_array)\n ns_run['nlive_array'] = nlive_array\n return ns_run\n",
"role": "user"
},
{
"content": "dict_given_run_array",
"role": "assistant"
}
] |
def x(ns_run):
samples = array_given_run(ns_run)
unique_threads = np.unique(ns_run['thread_labels'])
assert ns_run['thread_min_max'].shape[0] == unique_threads.shape[0
], 'some threads have no points! {0} != {1}'.format(unique_threads.
shape[0], ns_run['thread_min_max'].shape[0])
threads = []
for i, th_lab in enumerate(unique_threads):
thread_array = samples[np.where(samples[:, 1] == th_lab)]
thread_array[:, 2] = 0
thread_array[-1, 2] = -1
min_max = np.reshape(ns_run['thread_min_max'][i, :], (1, 2))
assert min_max[0, 1] == thread_array[-1, 0
], 'thread max logl should equal logl of its final point!'
threads.append(dict_given_run_array(thread_array, min_max))
return threads
|
get_run_threads
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run):\n samples = array_given_run(ns_run)\n unique_threads = np.unique(ns_run['thread_labels'])\n assert ns_run['thread_min_max'].shape[0] == unique_threads.shape[0\n ], 'some threads have no points! {0} != {1}'.format(unique_threads.\n shape[0], ns_run['thread_min_max'].shape[0])\n threads = []\n for i, th_lab in enumerate(unique_threads):\n thread_array = samples[np.where(samples[:, 1] == th_lab)]\n thread_array[:, 2] = 0\n thread_array[-1, 2] = -1\n min_max = np.reshape(ns_run['thread_min_max'][i, :], (1, 2))\n assert min_max[0, 1] == thread_array[-1, 0\n ], 'thread max logl should equal logl of its final point!'\n threads.append(dict_given_run_array(thread_array, min_max))\n return threads\n",
"role": "user"
},
{
"content": "get_run_threads",
"role": "assistant"
}
] |
def x(run_list_in, **kwargs):
run_list = copy.deepcopy(run_list_in)
if len(run_list) == 1:
run = run_list[0]
else:
nthread_tot = 0
for i, _ in enumerate(run_list):
check_ns_run(run_list[i], **kwargs)
run_list[i]['thread_labels'] += nthread_tot
nthread_tot += run_list[i]['thread_min_max'].shape[0]
thread_min_max = np.vstack([run['thread_min_max'] for run in run_list])
samples_temp = np.vstack([array_given_run(run) for run in run_list])
samples_temp = samples_temp[np.argsort(samples_temp[:, 0])]
run = dict_given_run_array(samples_temp, thread_min_max)
run['output'] = {}
for key in ['nlike', 'ndead']:
try:
run['output'][key] = sum([temp['output'][key] for temp in
run_list_in])
except KeyError:
pass
check_ns_run(run, **kwargs)
return run
|
combine_ns_runs
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(run_list_in, **kwargs):\n run_list = copy.deepcopy(run_list_in)\n if len(run_list) == 1:\n run = run_list[0]\n else:\n nthread_tot = 0\n for i, _ in enumerate(run_list):\n check_ns_run(run_list[i], **kwargs)\n run_list[i]['thread_labels'] += nthread_tot\n nthread_tot += run_list[i]['thread_min_max'].shape[0]\n thread_min_max = np.vstack([run['thread_min_max'] for run in run_list])\n samples_temp = np.vstack([array_given_run(run) for run in run_list])\n samples_temp = samples_temp[np.argsort(samples_temp[:, 0])]\n run = dict_given_run_array(samples_temp, thread_min_max)\n run['output'] = {}\n for key in ['nlike', 'ndead']:\n try:\n run['output'][key] = sum([temp['output'][key] for temp in\n run_list_in])\n except KeyError:\n pass\n check_ns_run(run, **kwargs)\n return run\n",
"role": "user"
},
{
"content": "combine_ns_runs",
"role": "assistant"
}
] |
def x(threads, assert_birth_point=False):
thread_min_max = np.vstack([td['thread_min_max'] for td in threads])
assert len(threads) == thread_min_max.shape[0]
samples_temp = np.vstack([array_given_run(thread) for thread in threads])
samples_temp = samples_temp[np.argsort(samples_temp[:, 0])]
logl_starts = thread_min_max[:, 0]
state = np.random.get_state()
np.random.seed(0)
for logl_start in logl_starts[logl_starts != -np.inf]:
ind = np.where(samples_temp[:, 0] == logl_start)[0]
if assert_birth_point:
assert ind.shape == (1,), 'No unique birth point! ' + str(ind.shape
)
if ind.shape == (1,):
samples_temp[ind[0], 2] += 1
elif ind.shape == (0,):
ind_closest = np.argmin(np.abs(samples_temp[:, 0] - logl_start))
samples_temp[ind_closest, 2] += 1
else:
samples_temp[np.random.choice(ind), 2] += 1
np.random.set_state(state)
ns_run = dict_given_run_array(samples_temp, thread_min_max)
try:
check_ns_run_threads(ns_run)
except AssertionError:
ns_run['thread_labels'] = None
ns_run['thread_min_max'] = None
return ns_run
|
combine_threads
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(threads, assert_birth_point=False):\n thread_min_max = np.vstack([td['thread_min_max'] for td in threads])\n assert len(threads) == thread_min_max.shape[0]\n samples_temp = np.vstack([array_given_run(thread) for thread in threads])\n samples_temp = samples_temp[np.argsort(samples_temp[:, 0])]\n logl_starts = thread_min_max[:, 0]\n state = np.random.get_state()\n np.random.seed(0)\n for logl_start in logl_starts[logl_starts != -np.inf]:\n ind = np.where(samples_temp[:, 0] == logl_start)[0]\n if assert_birth_point:\n assert ind.shape == (1,), 'No unique birth point! ' + str(ind.shape\n )\n if ind.shape == (1,):\n samples_temp[ind[0], 2] += 1\n elif ind.shape == (0,):\n ind_closest = np.argmin(np.abs(samples_temp[:, 0] - logl_start))\n samples_temp[ind_closest, 2] += 1\n else:\n samples_temp[np.random.choice(ind), 2] += 1\n np.random.set_state(state)\n ns_run = dict_given_run_array(samples_temp, thread_min_max)\n try:\n check_ns_run_threads(ns_run)\n except AssertionError:\n ns_run['thread_labels'] = None\n ns_run['thread_min_max'] = None\n return ns_run\n",
"role": "user"
},
{
"content": "combine_threads",
"role": "assistant"
}
] |
def x(ns_run, simulate=False):
try:
logx = get_logx(ns_run['nlive_array'], simulate=simulate)
logw = np.zeros(ns_run['logl'].shape[0])
logw[1:-1] = log_subtract(logx[:-2], logx[2:]) - np.log(2)
logw[0] = log_subtract(0, scipy.special.logsumexp([logx[0], logx[1]
]) - np.log(2))
logw[-1] = scipy.special.logsumexp([logx[-2], logx[-1]]) - np.log(2)
logw += ns_run['logl']
return logw
except IndexError:
if ns_run['logl'].shape[0] == 1:
return copy.deepcopy(ns_run['logl'])
else:
raise
|
get_logw
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run, simulate=False):\n try:\n logx = get_logx(ns_run['nlive_array'], simulate=simulate)\n logw = np.zeros(ns_run['logl'].shape[0])\n logw[1:-1] = log_subtract(logx[:-2], logx[2:]) - np.log(2)\n logw[0] = log_subtract(0, scipy.special.logsumexp([logx[0], logx[1]\n ]) - np.log(2))\n logw[-1] = scipy.special.logsumexp([logx[-2], logx[-1]]) - np.log(2)\n logw += ns_run['logl']\n return logw\n except IndexError:\n if ns_run['logl'].shape[0] == 1:\n return copy.deepcopy(ns_run['logl'])\n else:\n raise\n",
"role": "user"
},
{
"content": "get_logw",
"role": "assistant"
}
] |
def x(ns_run, simulate=False):
logw = get_logw(ns_run, simulate=simulate)
return np.exp(logw - logw.max())
|
get_w_rel
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run, simulate=False):\n logw = get_logw(ns_run, simulate=simulate)\n return np.exp(logw - logw.max())\n",
"role": "user"
},
{
"content": "get_w_rel",
"role": "assistant"
}
] |
def x(nlive, simulate=False):
assert nlive.min(
) > 0, 'nlive contains zeros or negative values! nlive = ' + str(nlive)
if simulate:
logx_steps = np.log(np.random.random(nlive.shape)) / nlive
else:
logx_steps = -1 * nlive.astype(float) ** -1
return np.cumsum(logx_steps)
|
get_logx
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(nlive, simulate=False):\n assert nlive.min(\n ) > 0, 'nlive contains zeros or negative values! nlive = ' + str(nlive)\n if simulate:\n logx_steps = np.log(np.random.random(nlive.shape)) / nlive\n else:\n logx_steps = -1 * nlive.astype(float) ** -1\n return np.cumsum(logx_steps)\n",
"role": "user"
},
{
"content": "get_logx",
"role": "assistant"
}
] |
def x(loga, logb):
return loga + np.log(1 - np.exp(logb - loga))
|
log_subtract
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(loga, logb):\n return loga + np.log(1 - np.exp(logb - loga))\n",
"role": "user"
},
{
"content": "log_subtract",
"role": "assistant"
}
] |
def x(run, dup_assert=False, dup_warn=False):
assert isinstance(run, dict)
check_ns_run_members(run)
check_ns_run_logls(run, dup_assert=dup_assert, dup_warn=dup_warn)
check_ns_run_threads(run)
|
check_ns_run
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(run, dup_assert=False, dup_warn=False):\n assert isinstance(run, dict)\n check_ns_run_members(run)\n check_ns_run_logls(run, dup_assert=dup_assert, dup_warn=dup_warn)\n check_ns_run_threads(run)\n",
"role": "user"
},
{
"content": "check_ns_run",
"role": "assistant"
}
] |
def x(run):
run_keys = list(run.keys())
for key in ['logl', 'nlive_array', 'theta', 'thread_labels',
'thread_min_max']:
assert key in run_keys
run_keys.remove(key)
for key in ['output']:
try:
run_keys.remove(key)
except ValueError:
pass
assert not run_keys, 'Unexpected keys in ns_run: ' + str(run_keys)
for key in ['logl', 'nlive_array', 'theta', 'thread_labels',
'thread_min_max']:
assert isinstance(run[key], np.ndarray), key + ' is type ' + type(run
[key]).__name__
assert run['logl'].ndim == 1
assert run['logl'].shape == run['nlive_array'].shape
assert run['logl'].shape == run['thread_labels'].shape
assert run['theta'].ndim == 2
assert run['logl'].shape[0] == run['theta'].shape[0]
|
check_ns_run_members
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(run):\n run_keys = list(run.keys())\n for key in ['logl', 'nlive_array', 'theta', 'thread_labels',\n 'thread_min_max']:\n assert key in run_keys\n run_keys.remove(key)\n for key in ['output']:\n try:\n run_keys.remove(key)\n except ValueError:\n pass\n assert not run_keys, 'Unexpected keys in ns_run: ' + str(run_keys)\n for key in ['logl', 'nlive_array', 'theta', 'thread_labels',\n 'thread_min_max']:\n assert isinstance(run[key], np.ndarray), key + ' is type ' + type(run\n [key]).__name__\n assert run['logl'].ndim == 1\n assert run['logl'].shape == run['nlive_array'].shape\n assert run['logl'].shape == run['thread_labels'].shape\n assert run['theta'].ndim == 2\n assert run['logl'].shape[0] == run['theta'].shape[0]\n",
"role": "user"
},
{
"content": "check_ns_run_members",
"role": "assistant"
}
] |
def x(run, dup_assert=False, dup_warn=False):
assert np.array_equal(run['logl'], run['logl'][np.argsort(run['logl'])])
if dup_assert or dup_warn:
unique_logls, counts = np.unique(run['logl'], return_counts=True)
repeat_logls = run['logl'].shape[0] - unique_logls.shape[0]
msg = (
"""{} duplicate logl values (out of a total of {}). This may be caused by limited numerical precision in the output files.
repeated logls = {}
counts = {}
positions in list of {} unique logls = {}"""
.format(repeat_logls, run['logl'].shape[0], unique_logls[counts !=
1], counts[counts != 1], unique_logls.shape[0], np.where(counts !=
1)[0]))
if dup_assert:
assert repeat_logls == 0, msg
elif dup_warn:
if repeat_logls != 0:
warnings.warn(msg, UserWarning)
|
check_ns_run_logls
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(run, dup_assert=False, dup_warn=False):\n assert np.array_equal(run['logl'], run['logl'][np.argsort(run['logl'])])\n if dup_assert or dup_warn:\n unique_logls, counts = np.unique(run['logl'], return_counts=True)\n repeat_logls = run['logl'].shape[0] - unique_logls.shape[0]\n msg = (\n \"\"\"{} duplicate logl values (out of a total of {}). This may be caused by limited numerical precision in the output files.\nrepeated logls = {}\ncounts = {}\npositions in list of {} unique logls = {}\"\"\"\n .format(repeat_logls, run['logl'].shape[0], unique_logls[counts !=\n 1], counts[counts != 1], unique_logls.shape[0], np.where(counts !=\n 1)[0]))\n if dup_assert:\n assert repeat_logls == 0, msg\n elif dup_warn:\n if repeat_logls != 0:\n warnings.warn(msg, UserWarning)\n",
"role": "user"
},
{
"content": "check_ns_run_logls",
"role": "assistant"
}
] |
def x(run):
assert run['thread_labels'].dtype == int
uniq_th = np.unique(run['thread_labels'])
assert np.array_equal(np.asarray(range(run['thread_min_max'].shape[0])),
uniq_th), str(uniq_th)
assert np.any(run['thread_min_max'][:, 0] == -np.inf
), 'Run should have at least one thread which starts by sampling the ' + 'whole prior'
for th_lab in uniq_th:
inds = np.where(run['thread_labels'] == th_lab)[0]
th_info = 'thread label={}, first_logl={}, thread_min_max={}'.format(
th_lab, run['logl'][inds[0]], run['thread_min_max'][th_lab, :])
assert run['thread_min_max'][th_lab, 0] <= run['logl'][inds[0]
], 'First point in thread has logl less than thread min logl! ' + th_info + ', difference={}'.format(
run['logl'][inds[0]] - run['thread_min_max'][th_lab, 0])
assert run['thread_min_max'][th_lab, 1] == run['logl'][inds[-1]
], 'Last point in thread logl != thread end logl! ' + th_info
|
check_ns_run_threads
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(run):\n assert run['thread_labels'].dtype == int\n uniq_th = np.unique(run['thread_labels'])\n assert np.array_equal(np.asarray(range(run['thread_min_max'].shape[0])),\n uniq_th), str(uniq_th)\n assert np.any(run['thread_min_max'][:, 0] == -np.inf\n ), 'Run should have at least one thread which starts by sampling the ' + 'whole prior'\n for th_lab in uniq_th:\n inds = np.where(run['thread_labels'] == th_lab)[0]\n th_info = 'thread label={}, first_logl={}, thread_min_max={}'.format(\n th_lab, run['logl'][inds[0]], run['thread_min_max'][th_lab, :])\n assert run['thread_min_max'][th_lab, 0] <= run['logl'][inds[0]\n ], 'First point in thread has logl less than thread min logl! ' + th_info + ', difference={}'.format(\n run['logl'][inds[0]] - run['thread_min_max'][th_lab, 0])\n assert run['thread_min_max'][th_lab, 1] == run['logl'][inds[-1]\n ], 'Last point in thread logl != thread end logl! ' + th_info\n",
"role": "user"
},
{
"content": "check_ns_run_threads",
"role": "assistant"
}
] |
def x(ns_run, **kwargs):
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
|
count_samples
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run, **kwargs):\n kwargs.pop('logw', None)\n kwargs.pop('simulate', None)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n return ns_run['logl'].shape[0]\n",
"role": "user"
},
{
"content": "count_samples",
"role": "assistant"
}
] |
def x(ns_run, logw=None, simulate=False):
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
|
logz
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run, logw=None, simulate=False):\n if logw is None:\n logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)\n return scipy.special.logsumexp(logw)\n",
"role": "user"
},
{
"content": "logz",
"role": "assistant"
}
] |
def x(ns_run, logw=None, simulate=False):
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
|
evidence
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run, logw=None, simulate=False):\n if logw is None:\n logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)\n return np.exp(scipy.special.logsumexp(logw))\n",
"role": "user"
},
{
"content": "evidence",
"role": "assistant"
}
] |
def x(ns_run, logw=None, simulate=False, param_ind=0, handle_indexerror=False):
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return np.sum(w_relative * ns_run['theta'][:, param_ind]) / np.sum(
w_relative)
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
|
param_mean
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run, logw=None, simulate=False, param_ind=0, handle_indexerror=False):\n if logw is None:\n logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)\n w_relative = np.exp(logw - logw.max())\n try:\n return np.sum(w_relative * ns_run['theta'][:, param_ind]) / np.sum(\n w_relative)\n except IndexError:\n if handle_indexerror:\n return np.nan\n else:\n raise\n",
"role": "user"
},
{
"content": "param_mean",
"role": "assistant"
}
] |
def x(ns_run, logw=None, simulate=False, probability=0.5, param_ind=0):
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
|
param_cred
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run, logw=None, simulate=False, probability=0.5, param_ind=0):\n if logw is None:\n logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)\n w_relative = np.exp(logw - logw.max())\n return weighted_quantile(probability, ns_run['theta'][:, param_ind],\n w_relative)\n",
"role": "user"
},
{
"content": "param_cred",
"role": "assistant"
}
] |
def x(ns_run, logw=None, simulate=False, param_ind=0):
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
w_relative /= np.sum(w_relative)
return np.sum(w_relative * ns_run['theta'][:, param_ind] ** 2)
|
param_squared_mean
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run, logw=None, simulate=False, param_ind=0):\n if logw is None:\n logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)\n w_relative = np.exp(logw - logw.max())\n w_relative /= np.sum(w_relative)\n return np.sum(w_relative * ns_run['theta'][:, param_ind] ** 2)\n",
"role": "user"
},
{
"content": "param_squared_mean",
"role": "assistant"
}
] |
def x(ns_run, logw=None, simulate=False):
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
|
r_mean
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run, logw=None, simulate=False):\n if logw is None:\n logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)\n w_relative = np.exp(logw - logw.max())\n r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))\n return np.sum(w_relative * r) / np.sum(w_relative)\n",
"role": "user"
},
{
"content": "r_mean",
"role": "assistant"
}
] |
def x(ns_run, logw=None, simulate=False, probability=0.5):
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
|
r_cred
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ns_run, logw=None, simulate=False, probability=0.5):\n if logw is None:\n logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)\n w_relative = np.exp(logw - logw.max())\n r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))\n return weighted_quantile(probability, r, w_relative)\n",
"role": "user"
},
{
"content": "r_cred",
"role": "assistant"
}
] |
def x(func_in, **kwargs):
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs
), 'kwargs={0} and func_in.keywords={1} contain repeated keys'.format(
kwargs, func_in.keywords)
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = '{\\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {'count_samples': 'samples', 'logz':
'$\\mathrm{log} \\mathcal{Z}$', 'evidence': '$\\mathcal{Z}$',
'r_mean': '$\\overline{|\\theta|}$', 'param_mean':
'$\\overline{\\theta_' + ind_str + '}$', 'param_squared_mean':
'$\\overline{\\theta^2_' + ind_str + '}$'}
if probability == 0.5:
cred_str = '$\\mathrm{median}('
else:
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = '$\\mathrm{C.I.}_{' + percent_str + '\\%}('
latex_name_dict['param_cred'] = cred_str + '\\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + '|\\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' + func.
__name__,)
raise
|
get_latex_name
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(func_in, **kwargs):\n if isinstance(func_in, functools.partial):\n func = func_in.func\n assert not set(func_in.keywords) & set(kwargs\n ), 'kwargs={0} and func_in.keywords={1} contain repeated keys'.format(\n kwargs, func_in.keywords)\n kwargs.update(func_in.keywords)\n else:\n func = func_in\n param_ind = kwargs.pop('param_ind', 0)\n probability = kwargs.pop('probability', 0.5)\n kwargs.pop('handle_indexerror', None)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n ind_str = '{\\\\hat{' + str(param_ind + 1) + '}}'\n latex_name_dict = {'count_samples': 'samples', 'logz':\n '$\\\\mathrm{log} \\\\mathcal{Z}$', 'evidence': '$\\\\mathcal{Z}$',\n 'r_mean': '$\\\\overline{|\\\\theta|}$', 'param_mean': \n '$\\\\overline{\\\\theta_' + ind_str + '}$', 'param_squared_mean': \n '$\\\\overline{\\\\theta^2_' + ind_str + '}$'}\n if probability == 0.5:\n cred_str = '$\\\\mathrm{median}('\n else:\n percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')\n cred_str = '$\\\\mathrm{C.I.}_{' + percent_str + '\\\\%}('\n latex_name_dict['param_cred'] = cred_str + '\\\\theta_' + ind_str + ')$'\n latex_name_dict['r_cred'] = cred_str + '|\\\\theta|)$'\n try:\n return latex_name_dict[func.__name__]\n except KeyError as err:\n err.args = err.args + ('get_latex_name not yet set up for ' + func.\n __name__,)\n raise\n",
"role": "user"
},
{
"content": "get_latex_name",
"role": "assistant"
}
] |
def x(probability, values, weights):
assert 1 > probability > 0, 'credible interval prob= ' + str(probability
) + ' not in (0, 1)'
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - 0.5 * weights[sorted_inds]
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
weighted_quantile
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(probability, values, weights):\n assert 1 > probability > 0, 'credible interval prob= ' + str(probability\n ) + ' not in (0, 1)'\n assert values.shape == weights.shape\n assert values.ndim == 1\n assert weights.ndim == 1\n sorted_inds = np.argsort(values)\n quantiles = np.cumsum(weights[sorted_inds]) - 0.5 * weights[sorted_inds]\n quantiles /= np.sum(weights)\n return np.interp(probability, quantiles, values[sorted_inds])\n",
"role": "user"
},
{
"content": "weighted_quantile",
"role": "assistant"
}
] |
def x(run, **kwargs):
write_dead = kwargs.pop('write_dead', True)
write_stats = kwargs.pop('write_stats', True)
posteriors = kwargs.pop('posteriors', False)
equals = kwargs.pop('equals', False)
stats_means_errs = kwargs.pop('stats_means_errs', True)
fmt = kwargs.pop('fmt', '% .14E')
n_simulate = kwargs.pop('n_simulate', 100)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
mandatory_keys = ['file_root', 'base_dir']
for key in mandatory_keys:
assert key in run['output'], key + ' not in run["output"]'
root = os.path.join(run['output']['base_dir'], run['output']['file_root'])
if write_dead:
samples = run_dead_birth_array(run)
np.savetxt(root + '_dead-birth.txt', samples, fmt=fmt)
np.savetxt(root + '_dead.txt', samples[:, :-1], fmt=fmt)
if equals or posteriors:
w_rel = nestcheck.ns_run_utils.get_w_rel(run)
post_arr = np.zeros((run['theta'].shape[0], run['theta'].shape[1] + 2))
post_arr[:, 0] = w_rel
post_arr[:, 1] = -2 * run['logl']
post_arr[:, 2:] = run['theta']
if posteriors:
np.savetxt(root + '.txt', post_arr, fmt=fmt)
run['output']['nposterior'] = post_arr.shape[0]
else:
run['output']['nposterior'] = 0
if equals:
inds = np.where(w_rel > np.random.random(w_rel.shape[0]))[0]
np.savetxt(root + '_equal_weights.txt', post_arr[inds, 1:], fmt=fmt)
run['output']['nequals'] = inds.shape[0]
else:
run['output']['nequals'] = 0
if write_stats:
run['output']['ndead'] = run['logl'].shape[0]
if stats_means_errs:
estimators = [e.logz]
for i in range(run['theta'].shape[1]):
estimators.append(functools.partial(e.param_mean, param_ind=i))
values = nestcheck.ns_run_utils.run_estimators(run, estimators)
stds = nestcheck.error_analysis.run_std_bootstrap(run,
estimators, n_simulate=n_simulate)
run['output']['logZ'] = values[0]
run['output']['logZerr'] = stds[0]
run['output']['param_means'] = list(values[1:])
run['output']['param_mean_errs'] = list(stds[1:])
write_stats_file(run['output'])
|
write_run_output
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(run, **kwargs):\n write_dead = kwargs.pop('write_dead', True)\n write_stats = kwargs.pop('write_stats', True)\n posteriors = kwargs.pop('posteriors', False)\n equals = kwargs.pop('equals', False)\n stats_means_errs = kwargs.pop('stats_means_errs', True)\n fmt = kwargs.pop('fmt', '% .14E')\n n_simulate = kwargs.pop('n_simulate', 100)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n mandatory_keys = ['file_root', 'base_dir']\n for key in mandatory_keys:\n assert key in run['output'], key + ' not in run[\"output\"]'\n root = os.path.join(run['output']['base_dir'], run['output']['file_root'])\n if write_dead:\n samples = run_dead_birth_array(run)\n np.savetxt(root + '_dead-birth.txt', samples, fmt=fmt)\n np.savetxt(root + '_dead.txt', samples[:, :-1], fmt=fmt)\n if equals or posteriors:\n w_rel = nestcheck.ns_run_utils.get_w_rel(run)\n post_arr = np.zeros((run['theta'].shape[0], run['theta'].shape[1] + 2))\n post_arr[:, 0] = w_rel\n post_arr[:, 1] = -2 * run['logl']\n post_arr[:, 2:] = run['theta']\n if posteriors:\n np.savetxt(root + '.txt', post_arr, fmt=fmt)\n run['output']['nposterior'] = post_arr.shape[0]\n else:\n run['output']['nposterior'] = 0\n if equals:\n inds = np.where(w_rel > np.random.random(w_rel.shape[0]))[0]\n np.savetxt(root + '_equal_weights.txt', post_arr[inds, 1:], fmt=fmt)\n run['output']['nequals'] = inds.shape[0]\n else:\n run['output']['nequals'] = 0\n if write_stats:\n run['output']['ndead'] = run['logl'].shape[0]\n if stats_means_errs:\n estimators = [e.logz]\n for i in range(run['theta'].shape[1]):\n estimators.append(functools.partial(e.param_mean, param_ind=i))\n values = nestcheck.ns_run_utils.run_estimators(run, estimators)\n stds = nestcheck.error_analysis.run_std_bootstrap(run,\n estimators, n_simulate=n_simulate)\n run['output']['logZ'] = values[0]\n run['output']['logZerr'] = stds[0]\n run['output']['param_means'] = list(values[1:])\n run['output']['param_mean_errs'] = list(stds[1:])\n write_stats_file(run['output'])\n",
"role": "user"
},
{
"content": "write_run_output",
"role": "assistant"
}
] |
def x(run, **kwargs):
nestcheck.ns_run_utils.check_ns_run(run, **kwargs)
threads = nestcheck.ns_run_utils.get_run_threads(run)
samp_arrays = []
ndim = run['theta'].shape[1]
for th in threads:
samp_arr = np.zeros((th['theta'].shape[0], ndim + 2))
samp_arr[:, :ndim] = th['theta']
samp_arr[:, ndim] = th['logl']
samp_arr[1:, ndim + 1] = th['logl'][:-1]
if th['thread_min_max'][0, 0] == -np.inf:
samp_arr[0, ndim + 1] = -1e+30
else:
samp_arr[0, ndim + 1] = th['thread_min_max'][0, 0]
samp_arrays.append(samp_arr)
samples = np.vstack(samp_arrays)
samples = samples[np.argsort(samples[:, ndim]), :]
return samples
|
run_dead_birth_array
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(run, **kwargs):\n nestcheck.ns_run_utils.check_ns_run(run, **kwargs)\n threads = nestcheck.ns_run_utils.get_run_threads(run)\n samp_arrays = []\n ndim = run['theta'].shape[1]\n for th in threads:\n samp_arr = np.zeros((th['theta'].shape[0], ndim + 2))\n samp_arr[:, :ndim] = th['theta']\n samp_arr[:, ndim] = th['logl']\n samp_arr[1:, ndim + 1] = th['logl'][:-1]\n if th['thread_min_max'][0, 0] == -np.inf:\n samp_arr[0, ndim + 1] = -1e+30\n else:\n samp_arr[0, ndim + 1] = th['thread_min_max'][0, 0]\n samp_arrays.append(samp_arr)\n samples = np.vstack(samp_arrays)\n samples = samples[np.argsort(samples[:, ndim]), :]\n return samples\n",
"role": "user"
},
{
"content": "run_dead_birth_array",
"role": "assistant"
}
] |
def x(run_output_dict):
mandatory_keys = ['file_root', 'base_dir']
for key in mandatory_keys:
assert key in run_output_dict, key + ' not in run_output_dict'
default_output = {'logZ': 0.0, 'logZerr': 0.0, 'logZs': [0.0],
'logZerrs': [0.0], 'ncluster': 1, 'nposterior': 0, 'nequals': 0,
'ndead': 0, 'nlike': 0, 'nlive': 0, 'avnlike': 0.0, 'avnlikeslice':
0.0, 'param_means': [0.0, 0.0, 0.0], 'param_mean_errs': [0.0, 0.0, 0.0]
}
allowed_keys = set(mandatory_keys) | set(default_output.keys())
assert set(run_output_dict.keys()).issubset(allowed_keys
), 'Input dict contains unexpected keys: {}'.format(set(
run_output_dict.keys()) - allowed_keys)
output = copy.deepcopy(run_output_dict)
for key, value in default_output.items():
if key not in output:
output[key] = value
file_lines = ['Evidence estimates:', '===================',
' - The evidence Z is a log-normally distributed, with location and scale parameters mu and sigma.'
, ' - We denote this as log(Z) = mu +/- sigma.', '',
'Global evidence:', '----------------', '',
'log(Z) = {0} +/- {1}'.format(output['logZ'], output[
'logZerr']), '', '', 'Local evidences:', '----------------', '']
for i, (lz, lzerr) in enumerate(zip(output['logZs'], output['logZerrs'])):
file_lines.append('log(Z_ {0}) = {1} +/- {2}'.format(str(i + 1)
.rjust(2), lz, lzerr))
file_lines += ['', '', 'Run-time information:', '---------------------',
'', ' ncluster: 0 / 1', ' nposterior: {0}'.
format(output['nposterior']), ' nequals: {0}'.format(
output['nequals']), ' ndead: {0}'.format(output['ndead']),
' nlive: {0}'.format(output['nlive']),
' nlike: {0}'.format(output['nlike']),
' <nlike>: {0} ( {1} per slice )'.format(output[
'avnlike'], output['avnlikeslice']), '', '',
'Dim No. Mean Sigma']
for i, (mean, meanerr) in enumerate(zip(output['param_means'], output[
'param_mean_errs'])):
file_lines.append('{0} {1} +/- {2}'.format(str(i + 1).ljust(3),
mean, meanerr))
file_path = os.path.join(output['base_dir'], output['file_root'] + '.stats'
)
with open(file_path, 'w') as stats_file:
stats_file.writelines('{}\n'.format(line) for line in file_lines)
return output
|
write_stats_file
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(run_output_dict):\n mandatory_keys = ['file_root', 'base_dir']\n for key in mandatory_keys:\n assert key in run_output_dict, key + ' not in run_output_dict'\n default_output = {'logZ': 0.0, 'logZerr': 0.0, 'logZs': [0.0],\n 'logZerrs': [0.0], 'ncluster': 1, 'nposterior': 0, 'nequals': 0,\n 'ndead': 0, 'nlike': 0, 'nlive': 0, 'avnlike': 0.0, 'avnlikeslice':\n 0.0, 'param_means': [0.0, 0.0, 0.0], 'param_mean_errs': [0.0, 0.0, 0.0]\n }\n allowed_keys = set(mandatory_keys) | set(default_output.keys())\n assert set(run_output_dict.keys()).issubset(allowed_keys\n ), 'Input dict contains unexpected keys: {}'.format(set(\n run_output_dict.keys()) - allowed_keys)\n output = copy.deepcopy(run_output_dict)\n for key, value in default_output.items():\n if key not in output:\n output[key] = value\n file_lines = ['Evidence estimates:', '===================',\n ' - The evidence Z is a log-normally distributed, with location and scale parameters mu and sigma.'\n , ' - We denote this as log(Z) = mu +/- sigma.', '',\n 'Global evidence:', '----------------', '',\n 'log(Z) = {0} +/- {1}'.format(output['logZ'], output[\n 'logZerr']), '', '', 'Local evidences:', '----------------', '']\n for i, (lz, lzerr) in enumerate(zip(output['logZs'], output['logZerrs'])):\n file_lines.append('log(Z_ {0}) = {1} +/- {2}'.format(str(i + 1)\n .rjust(2), lz, lzerr))\n file_lines += ['', '', 'Run-time information:', '---------------------',\n '', ' ncluster: 0 / 1', ' nposterior: {0}'.\n format(output['nposterior']), ' nequals: {0}'.format(\n output['nequals']), ' ndead: {0}'.format(output['ndead']),\n ' nlive: {0}'.format(output['nlive']),\n ' nlike: {0}'.format(output['nlike']),\n ' <nlike>: {0} ( {1} per slice )'.format(output[\n 'avnlike'], output['avnlikeslice']), '', '',\n 'Dim No. Mean Sigma']\n for i, (mean, meanerr) in enumerate(zip(output['param_means'], output[\n 'param_mean_errs'])):\n file_lines.append('{0} {1} +/- {2}'.format(str(i + 1).ljust(3),\n mean, meanerr))\n file_path = os.path.join(output['base_dir'], output['file_root'] + '.stats'\n )\n with open(file_path, 'w') as stats_file:\n stats_file.writelines('{}\\n'.format(line) for line in file_lines)\n return output\n",
"role": "user"
},
{
"content": "write_stats_file",
"role": "assistant"
}
] |
def x(run_list, estimator_list, estimator_names, n_simulate=100, **kwargs):
thread_pvalue = kwargs.pop('thread_pvalue', False)
bs_stat_dist = kwargs.pop('bs_stat_dist', False)
parallel = kwargs.pop('parallel', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert len(estimator_list) == len(estimator_names
), 'len(estimator_list) = {0} != len(estimator_names = {1}'.format(len
(estimator_list), len(estimator_names))
df = estimator_values_df(run_list, estimator_list, parallel=parallel,
estimator_names=estimator_names)
df.index = df.index.map(str)
df['calculation type'] = 'values'
df.set_index('calculation type', drop=True, append=True, inplace=True)
df = df.reorder_levels(['calculation type', 'run'])
bs_vals_df = bs_values_df(run_list, estimator_list, estimator_names,
n_simulate, parallel=parallel)
bs_std_df = bs_vals_df.applymap(lambda x: np.std(x, ddof=1))
bs_std_df.index.name = 'run'
bs_std_df['calculation type'] = 'bootstrap std'
bs_std_df.set_index('calculation type', drop=True, append=True, inplace
=True)
bs_std_df = bs_std_df.reorder_levels(['calculation type', 'run'])
df = pd.concat([df, bs_std_df])
if thread_pvalue:
t_vals_df = thread_values_df(run_list, estimator_list,
estimator_names, parallel=parallel)
t_d_df = pairwise_dists_on_cols(t_vals_df, earth_mover_dist=False,
energy_dist=False)
t_d_df = t_d_df.xs('ks pvalue', level='calculation type',
drop_level=False)
t_d_df.index.set_levels(['thread ks pvalue'], level=
'calculation type', inplace=True)
df = pd.concat([df, t_d_df])
if bs_stat_dist:
b_d_df = pairwise_dists_on_cols(bs_vals_df)
dists = ['ks distance', 'earth mover distance', 'energy distance']
b_d_df = b_d_df.loc[pd.IndexSlice[dists, :], :]
new_ind = ['bootstrap ' + b_d_df.index.get_level_values(
'calculation type'), b_d_df.index.get_level_values('run')]
b_d_df.set_index(new_ind, inplace=True)
df = pd.concat([df, b_d_df])
return df
|
run_list_error_values
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(run_list, estimator_list, estimator_names, n_simulate=100, **kwargs):\n thread_pvalue = kwargs.pop('thread_pvalue', False)\n bs_stat_dist = kwargs.pop('bs_stat_dist', False)\n parallel = kwargs.pop('parallel', True)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n assert len(estimator_list) == len(estimator_names\n ), 'len(estimator_list) = {0} != len(estimator_names = {1}'.format(len\n (estimator_list), len(estimator_names))\n df = estimator_values_df(run_list, estimator_list, parallel=parallel,\n estimator_names=estimator_names)\n df.index = df.index.map(str)\n df['calculation type'] = 'values'\n df.set_index('calculation type', drop=True, append=True, inplace=True)\n df = df.reorder_levels(['calculation type', 'run'])\n bs_vals_df = bs_values_df(run_list, estimator_list, estimator_names,\n n_simulate, parallel=parallel)\n bs_std_df = bs_vals_df.applymap(lambda x: np.std(x, ddof=1))\n bs_std_df.index.name = 'run'\n bs_std_df['calculation type'] = 'bootstrap std'\n bs_std_df.set_index('calculation type', drop=True, append=True, inplace\n =True)\n bs_std_df = bs_std_df.reorder_levels(['calculation type', 'run'])\n df = pd.concat([df, bs_std_df])\n if thread_pvalue:\n t_vals_df = thread_values_df(run_list, estimator_list,\n estimator_names, parallel=parallel)\n t_d_df = pairwise_dists_on_cols(t_vals_df, earth_mover_dist=False,\n energy_dist=False)\n t_d_df = t_d_df.xs('ks pvalue', level='calculation type',\n drop_level=False)\n t_d_df.index.set_levels(['thread ks pvalue'], level=\n 'calculation type', inplace=True)\n df = pd.concat([df, t_d_df])\n if bs_stat_dist:\n b_d_df = pairwise_dists_on_cols(bs_vals_df)\n dists = ['ks distance', 'earth mover distance', 'energy distance']\n b_d_df = b_d_df.loc[pd.IndexSlice[dists, :], :]\n new_ind = ['bootstrap ' + b_d_df.index.get_level_values(\n 'calculation type'), b_d_df.index.get_level_values('run')]\n b_d_df.set_index(new_ind, inplace=True)\n df = pd.concat([df, b_d_df])\n return df\n",
"role": "user"
},
{
"content": "run_list_error_values",
"role": "assistant"
}
] |
def x(run_list, estimator_list, **kwargs):
estimator_names = kwargs.pop('estimator_names', [('est_' + str(i)) for
i in range(len(estimator_list))])
parallel = kwargs.pop('parallel', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
values_list = pu.parallel_apply(nestcheck.ns_run_utils.run_estimators,
run_list, func_args=(estimator_list,), parallel=parallel)
df = pd.DataFrame(np.stack(values_list, axis=0))
df.columns = estimator_names
df.index.name = 'run'
return df
|
estimator_values_df
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(run_list, estimator_list, **kwargs):\n estimator_names = kwargs.pop('estimator_names', [('est_' + str(i)) for\n i in range(len(estimator_list))])\n parallel = kwargs.pop('parallel', True)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n values_list = pu.parallel_apply(nestcheck.ns_run_utils.run_estimators,\n run_list, func_args=(estimator_list,), parallel=parallel)\n df = pd.DataFrame(np.stack(values_list, axis=0))\n df.columns = estimator_names\n df.index.name = 'run'\n return df\n",
"role": "user"
},
{
"content": "estimator_values_df",
"role": "assistant"
}
] |
def x(error_values, **summary_df_kwargs):
df = pf.summary_df_from_multi(error_values, **summary_df_kwargs)
imp_std, imp_std_unc, imp_frac, imp_frac_unc = (nestcheck.
error_analysis.implementation_std(df.loc['values std', 'value'], df
.loc['values std', 'uncertainty'], df.loc['bootstrap std mean',
'value'], df.loc['bootstrap std mean', 'uncertainty']))
df.loc[('implementation std', 'value'), df.columns] = imp_std
df.loc[('implementation std', 'uncertainty'), df.columns] = imp_std_unc
df.loc[('implementation std frac', 'value'), :] = imp_frac
df.loc[('implementation std frac', 'uncertainty'), :] = imp_frac_unc
if 'values rmse' in set(df.index.get_level_values('calculation type')):
imp_rmse, imp_rmse_unc, imp_frac, imp_frac_unc = (nestcheck.
error_analysis.implementation_std(df.loc['values rmse', 'value'
], df.loc['values rmse', 'uncertainty'], df.loc[
'bootstrap std mean', 'value'], df.loc['bootstrap std mean',
'uncertainty']))
df.loc[('implementation rmse', 'value'), df.columns] = imp_rmse
df.loc[('implementation rmse', 'uncertainty'), df.columns
] = imp_rmse_unc
df.loc[('implementation rmse frac', 'value'), :] = imp_frac
df.loc[('implementation rmse frac', 'uncertainty'), :] = imp_frac_unc
calcs_to_keep = ['true values', 'values mean', 'values std',
'values rmse', 'bootstrap std mean', 'implementation std',
'implementation std frac', 'implementation rmse',
'implementation rmse frac', 'thread ks pvalue mean',
'bootstrap ks distance mean', 'bootstrap energy distance mean',
'bootstrap earth mover distance mean']
df = pd.concat([df.xs(calc, level='calculation type', drop_level=False) for
calc in calcs_to_keep if calc in df.index.get_level_values(
'calculation type')])
return df
|
error_values_summary
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(error_values, **summary_df_kwargs):\n df = pf.summary_df_from_multi(error_values, **summary_df_kwargs)\n imp_std, imp_std_unc, imp_frac, imp_frac_unc = (nestcheck.\n error_analysis.implementation_std(df.loc['values std', 'value'], df\n .loc['values std', 'uncertainty'], df.loc['bootstrap std mean',\n 'value'], df.loc['bootstrap std mean', 'uncertainty']))\n df.loc[('implementation std', 'value'), df.columns] = imp_std\n df.loc[('implementation std', 'uncertainty'), df.columns] = imp_std_unc\n df.loc[('implementation std frac', 'value'), :] = imp_frac\n df.loc[('implementation std frac', 'uncertainty'), :] = imp_frac_unc\n if 'values rmse' in set(df.index.get_level_values('calculation type')):\n imp_rmse, imp_rmse_unc, imp_frac, imp_frac_unc = (nestcheck.\n error_analysis.implementation_std(df.loc['values rmse', 'value'\n ], df.loc['values rmse', 'uncertainty'], df.loc[\n 'bootstrap std mean', 'value'], df.loc['bootstrap std mean',\n 'uncertainty']))\n df.loc[('implementation rmse', 'value'), df.columns] = imp_rmse\n df.loc[('implementation rmse', 'uncertainty'), df.columns\n ] = imp_rmse_unc\n df.loc[('implementation rmse frac', 'value'), :] = imp_frac\n df.loc[('implementation rmse frac', 'uncertainty'), :] = imp_frac_unc\n calcs_to_keep = ['true values', 'values mean', 'values std',\n 'values rmse', 'bootstrap std mean', 'implementation std',\n 'implementation std frac', 'implementation rmse',\n 'implementation rmse frac', 'thread ks pvalue mean',\n 'bootstrap ks distance mean', 'bootstrap energy distance mean',\n 'bootstrap earth mover distance mean']\n df = pd.concat([df.xs(calc, level='calculation type', drop_level=False) for\n calc in calcs_to_keep if calc in df.index.get_level_values(\n 'calculation type')])\n return df\n",
"role": "user"
},
{
"content": "error_values_summary",
"role": "assistant"
}
] |
def x(run_list, estimator_list, estimator_names, n_simulate, **kwargs):
true_values = kwargs.pop('true_values', None)
include_true_values = kwargs.pop('include_true_values', False)
include_rmse = kwargs.pop('include_rmse', False)
error_values = run_list_error_values(run_list, estimator_list,
estimator_names, n_simulate, **kwargs)
return error_values_summary(error_values, true_values=true_values,
include_true_values=include_true_values, include_rmse=include_rmse)
|
run_list_error_summary
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(run_list, estimator_list, estimator_names, n_simulate, **kwargs):\n true_values = kwargs.pop('true_values', None)\n include_true_values = kwargs.pop('include_true_values', False)\n include_rmse = kwargs.pop('include_rmse', False)\n error_values = run_list_error_values(run_list, estimator_list,\n estimator_names, n_simulate, **kwargs)\n return error_values_summary(error_values, true_values=true_values,\n include_true_values=include_true_values, include_rmse=include_rmse)\n",
"role": "user"
},
{
"content": "run_list_error_summary",
"role": "assistant"
}
] |
def x(run_list, estimator_list, estimator_names, n_simulate, **kwargs):
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'desc': 'bs values'})
assert len(estimator_list) == len(estimator_names
), 'len(estimator_list) = {0} != len(estimator_names = {1}'.format(len
(estimator_list), len(estimator_names))
bs_values_list = pu.parallel_apply(nestcheck.error_analysis.
run_bootstrap_values, run_list, func_args=(estimator_list,),
func_kwargs={'n_simulate': n_simulate}, tqdm_kwargs=tqdm_kwargs, **
kwargs)
df = pd.DataFrame()
for i, name in enumerate(estimator_names):
df[name] = [arr[i, :] for arr in bs_values_list]
for vals_shape in df.loc[0].apply(lambda x: x.shape).values:
assert vals_shape == (n_simulate,), 'Should be n_simulate=' + str(
n_simulate
) + ' values in ' + 'each cell. The cell contains array with shape ' + str(
vals_shape)
return df
|
bs_values_df
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(run_list, estimator_list, estimator_names, n_simulate, **kwargs):\n tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'desc': 'bs values'})\n assert len(estimator_list) == len(estimator_names\n ), 'len(estimator_list) = {0} != len(estimator_names = {1}'.format(len\n (estimator_list), len(estimator_names))\n bs_values_list = pu.parallel_apply(nestcheck.error_analysis.\n run_bootstrap_values, run_list, func_args=(estimator_list,),\n func_kwargs={'n_simulate': n_simulate}, tqdm_kwargs=tqdm_kwargs, **\n kwargs)\n df = pd.DataFrame()\n for i, name in enumerate(estimator_names):\n df[name] = [arr[i, :] for arr in bs_values_list]\n for vals_shape in df.loc[0].apply(lambda x: x.shape).values:\n assert vals_shape == (n_simulate,), 'Should be n_simulate=' + str(\n n_simulate\n ) + ' values in ' + 'each cell. The cell contains array with shape ' + str(\n vals_shape)\n return df\n",
"role": "user"
},
{
"content": "bs_values_df",
"role": "assistant"
}
] |
def x(run_list, estimator_list, estimator_names, **kwargs):
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'desc': 'thread values'})
assert len(estimator_list) == len(estimator_names
), 'len(estimator_list) = {0} != len(estimator_names = {1}'.format(len
(estimator_list), len(estimator_names))
thread_vals_arrays = pu.parallel_apply(nestcheck.error_analysis.
run_thread_values, run_list, func_args=(estimator_list,),
tqdm_kwargs=tqdm_kwargs, **kwargs)
df = pd.DataFrame()
for i, name in enumerate(estimator_names):
df[name] = [arr[i, :] for arr in thread_vals_arrays]
for vals_shape in df.loc[0].apply(lambda x: x.shape).values:
assert vals_shape == (run_list[0]['thread_min_max'].shape[0],
), 'Should be nlive=' + str(run_list[0]['thread_min_max'].shape[0]
) + ' values in each cell. The cell contains array with shape ' + str(
vals_shape)
return df
|
thread_values_df
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(run_list, estimator_list, estimator_names, **kwargs):\n tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'desc': 'thread values'})\n assert len(estimator_list) == len(estimator_names\n ), 'len(estimator_list) = {0} != len(estimator_names = {1}'.format(len\n (estimator_list), len(estimator_names))\n thread_vals_arrays = pu.parallel_apply(nestcheck.error_analysis.\n run_thread_values, run_list, func_args=(estimator_list,),\n tqdm_kwargs=tqdm_kwargs, **kwargs)\n df = pd.DataFrame()\n for i, name in enumerate(estimator_names):\n df[name] = [arr[i, :] for arr in thread_vals_arrays]\n for vals_shape in df.loc[0].apply(lambda x: x.shape).values:\n assert vals_shape == (run_list[0]['thread_min_max'].shape[0],\n ), 'Should be nlive=' + str(run_list[0]['thread_min_max'].shape[0]\n ) + ' values in each cell. The cell contains array with shape ' + str(\n vals_shape)\n return df\n",
"role": "user"
},
{
"content": "thread_values_df",
"role": "assistant"
}
] |
def x(df_in, earth_mover_dist=True, energy_dist=True):
df = pd.DataFrame()
for col in df_in.columns:
df[col] = nestcheck.error_analysis.pairwise_distances(df_in[col].
values, earth_mover_dist=earth_mover_dist, energy_dist=energy_dist)
return df
|
pairwise_dists_on_cols
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(df_in, earth_mover_dist=True, energy_dist=True):\n df = pd.DataFrame()\n for col in df_in.columns:\n df[col] = nestcheck.error_analysis.pairwise_distances(df_in[col].\n values, earth_mover_dist=earth_mover_dist, energy_dist=energy_dist)\n return df\n",
"role": "user"
},
{
"content": "pairwise_dists_on_cols",
"role": "assistant"
}
] |
def x(cols):
def x(s):
b = '' if s == '*' or not s else '`'
return [_ for _ in [b + (s or '') + b] if _]
formatted = []
for c in cols:
if c[0] == '#':
formatted.append(c[1:])
elif c.startswith('(') and c.endswith(')'):
formatted.append(c)
else:
formatted.append('.'.join(bt(c.split('.')[0]) + bt('.'.join(c.
split('.')[1:]))))
return ', '.join(formatted)
|
backtick_columns
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(cols):\n\n def x(s):\n b = '' if s == '*' or not s else '`'\n return [_ for _ in [b + (s or '') + b] if _]\n formatted = []\n for c in cols:\n if c[0] == '#':\n formatted.append(c[1:])\n elif c.startswith('(') and c.endswith(')'):\n formatted.append(c)\n else:\n formatted.append('.'.join(bt(c.split('.')[0]) + bt('.'.join(c.\n split('.')[1:]))))\n return ', '.join(formatted)\n",
"role": "user"
},
{
"content": "backtick_columns",
"role": "assistant"
}
] |
def x(self, value, columnname=False, placeholder='%s'):
if not isinstance(value, dict):
raise TypeError('Input value should be a dictionary')
q = []
a = []
for k, v in value.items():
if k[0] == '#':
q.append(' = '.join([self._backtick(k[1:]), str(v)]) if
columnname else v)
else:
q.append(' = '.join([self._backtick(k), placeholder]) if
columnname else placeholder)
a.append(v)
return ', '.join(q), tuple(a)
|
value_parser
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, value, columnname=False, placeholder='%s'):\n if not isinstance(value, dict):\n raise TypeError('Input value should be a dictionary')\n q = []\n a = []\n for k, v in value.items():\n if k[0] == '#':\n q.append(' = '.join([self._backtick(k[1:]), str(v)]) if\n columnname else v)\n else:\n q.append(' = '.join([self._backtick(k), placeholder]) if\n columnname else placeholder)\n a.append(v)\n return ', '.join(q), tuple(a)\n",
"role": "user"
},
{
"content": "value_parser",
"role": "assistant"
}
] |
def x(self, columns):
return columns if self.isstr(columns) else self._backtick_columns(columns)
|
by_columns
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, columns):\n return columns if self.isstr(columns) else self._backtick_columns(columns)\n",
"role": "user"
},
{
"content": "by_columns",
"role": "assistant"
}
] |
def x(self, table, columns=None, join=None, where=None, group=None, having=
None, order=None, limit=None, iterator=False, fetch=True):
if not columns:
columns = ['*']
where_q, _args = self._where_parser(where)
_sql = ''.join(['SELECT ', self._backtick_columns(columns), ' FROM ',
self._tablename_parser(table)['formatted_tablename'], self.
_join_parser(join), where_q, ' GROUP BY ' + self._by_columns(group) if
group else '', ' HAVING ' + having if having else '', ' ORDER BY ' +
self._by_columns(order) if order else '', self._limit_parser(limit),
';'])
if self.debug:
return self.cur.mogrify(_sql, _args)
execute_result = self.cur.execute(_sql, _args)
if not fetch:
return execute_result
if self.cursorclass in (pymysql.cursors.SSCursor, pymysql.cursors.
SSDictCursor):
return self.cur
if iterator:
return self._yield_result()
return self.cur.fetchall()
|
select
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, table, columns=None, join=None, where=None, group=None, having=\n None, order=None, limit=None, iterator=False, fetch=True):\n if not columns:\n columns = ['*']\n where_q, _args = self._where_parser(where)\n _sql = ''.join(['SELECT ', self._backtick_columns(columns), ' FROM ',\n self._tablename_parser(table)['formatted_tablename'], self.\n _join_parser(join), where_q, ' GROUP BY ' + self._by_columns(group) if\n group else '', ' HAVING ' + having if having else '', ' ORDER BY ' +\n self._by_columns(order) if order else '', self._limit_parser(limit),\n ';'])\n if self.debug:\n return self.cur.mogrify(_sql, _args)\n execute_result = self.cur.execute(_sql, _args)\n if not fetch:\n return execute_result\n if self.cursorclass in (pymysql.cursors.SSCursor, pymysql.cursors.\n SSDictCursor):\n return self.cur\n if iterator:\n return self._yield_result()\n return self.cur.fetchall()\n",
"role": "user"
},
{
"content": "select",
"role": "assistant"
}
] |
def x(self, limit, offset=0, **kwargs):
start = offset
while True:
result = self.select(limit=[start, limit], **kwargs)
start += limit
if result:
yield result
else:
break
if self.debug:
break
|
select_page
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, limit, offset=0, **kwargs):\n start = offset\n while True:\n result = self.select(limit=[start, limit], **kwargs)\n start += limit\n if result:\n yield result\n else:\n break\n if self.debug:\n break\n",
"role": "user"
},
{
"content": "select_page",
"role": "assistant"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.