repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/twitter_button/models.py | from cms.models import CMSPlugin
from django.db import models
from django.utils.translation import ugettext_lazy as _
class TwitterButtonPluginModel(CMSPlugin):
tweet_text = models.CharField(_("Tweet text"), default=None, blank=True, max_length=60, help_text=_("Leave blank to use page title"))
hash_tag = models.CharField(_("Hash tag"), default=None, blank=True, max_length=60, help_text=_("Leave blank for none"))
large_button = models.BooleanField(_("Large button"), default=False)
def __unicode__(self):
return "'Tweet' button"
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/google_font/models.py | from cms.models import CMSPlugin
from cms.models.pagemodel import Page
from django.db import models
from django.utils.translation import ugettext_lazy as _
class GoogleFontPluginModel(CMSPlugin):
font_family_pluses = models.CharField(_("font family name"), default='Eagle+Lake', max_length=100, help_text=_("You must use plus signs instead of spaces here. Add :b or :i for bold or italics styles. Separate multiple font families with a | symbol. See https://developers.google.com/webfonts/docs/getting_started for more."))
def __unicode__(self):
return self.font_family_pluses
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/text_minimal_markup/models.py | # Plugin models
from cms.models.pluginmodel import CMSPlugin
from cms.models.pagemodel import Page
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from os.path import basename
from django.db import models
class TextMinimalMarkupPluginModel(CMSPlugin):
title = models.CharField(max_length=120, blank=True, help_text=_("Optional heading"))
promo_text = models.TextField(_("Text"), blank=True, help_text=_("Websites and email addresses will become linkable. HTML symbol codes are allowed, e.g. © for ©"))
def __unicode__(self):
return self.title
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/hbar/cms_plugins.py | from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models.pluginmodel import CMSPlugin
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from models import *
bootstrap_module_name = _("Widgets")
layout_module_name = _("Layout elements")
generic_module_name = _("Generic")
meta_module_name = _("Meta elements")
class HBarPlugin(CMSPluginBase):
model = CMSPlugin
name = _("Horizontal line")
module = layout_module_name
render_template = "hbar_plugin.html"
def render(self, context, instance, placeholder):
context['instance'] = instance
return context
plugin_pool.register_plugin(HBarPlugin)
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/google_analytics/cms_plugins.py | <reponame>RacingTadpole/cmsplugin-rt<filename>cmsplugin_rt/google_analytics/cms_plugins.py
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.utils.translation import ugettext_lazy as _
from models import *
bootstrap_module_name = _("Widgets")
layout_module_name = _("Layout elements")
generic_module_name = _("Generic")
meta_module_name = _("Meta elements")
class GoogleAnalyticsPlugin(CMSPluginBase):
model = GoogleAnalyticsPluginModel
name = "Google analytics"
render_template = 'google_analytics_plugin.html'
module = meta_module_name
def render(self, context, instance, placeholder):
context.update({'instance': instance, 'name': self.name})
return context
plugin_pool.register_plugin(GoogleAnalyticsPlugin)
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/resizeable_picture/models.py | # Plugin models
from cms.models.pluginmodel import CMSPlugin
from cms.models.pagemodel import Page
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from os.path import basename
from django.db import models
class ResizeablePicturePluginModel(CMSPlugin):
"""
A Resizeable Picture with or without a link
"""
CENTER = "center"
LEFT = "left"
RIGHT = "right"
FLOAT_CHOICES = ((CENTER, _("center")),
(LEFT, _("left")),
(RIGHT, _("right")),
)
image = models.ImageField(_("image"), upload_to=CMSPlugin.get_media_path)
url = models.CharField(_("link"), max_length=255, blank=True, null=True, help_text=_("if present image will be clickable"))
page_link = models.ForeignKey(Page, verbose_name=_("page"), null=True, blank=True, help_text=_("if present image will be clickable"))
alt = models.CharField(_("alternate text"), max_length=255, blank=True, null=True, help_text=_("textual description of the image"))
longdesc = models.CharField(_("long description"), max_length=255, blank=True, null=True, help_text=_("additional description of the image"))
float = models.CharField(_("side"), max_length=10, blank=True, null=True, choices=FLOAT_CHOICES)
img_width = models.CharField(_("width"), max_length=10, blank=True, help_text=_("if present, image will be scaled to this width (in pixels), e.g. 100. Alternatively enter a percentage of the view, e.g. 100%; the picture will rescale if the window's size changes"))
img_max_width = models.CharField(_("max width"), max_length=10, blank=True, help_text=_("if present, image will not exceed this width (in pixels), e.g. 100. More commonly, enter a percentage of the view, e.g. 100% would prevent the picture from being wider than the view."))
img_height = models.CharField(_("height"), max_length=10, blank=True, help_text=_("if present, image will be scaled to this height (in pixels), e.g. 80."))
img_max_height = models.CharField(_("max height"), max_length=10, blank=True, help_text=_("if present, image will not exceed this height (in pixels), e.g. 80. Or enter a percentage of the view's height."))
def __unicode__(self):
if self.alt:
return self.alt[:40]
elif self.image:
# added if, because it raised attribute error when file wasn't defined
try:
return u"%s" % basename(self.image.path)
except:
pass
return "<empty>"
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/facebook_button/cms_plugins.py | <filename>cmsplugin_rt/facebook_button/cms_plugins.py
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.utils.translation import ugettext_lazy as _
from models import *
class BasePlugin(CMSPluginBase):
name = None
module = _("Social Networking")
def render(self, context, instance, placeholder):
context.update({'instance': instance, 'name': self.name})
return context
class FacebookButtonPlugin(BasePlugin):
model = FacebookButtonPluginModel
name = "Facebook 'like' button"
render_template = 'facebook_button_plugin.html'
plugin_pool.register_plugin(FacebookButtonPlugin)
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/rt_carousel/migrations/0001_initial.py | <filename>cmsplugin_rt/rt_carousel/migrations/0001_initial.py
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'RTCarouselPluginModel'
db.create_table(u'cmsplugin_rtcarouselpluginmodel', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('height', self.gf('django.db.models.fields.IntegerField')(default=480)),
('margin', self.gf('django.db.models.fields.CharField')(default='40px 0 40px 0', max_length=50, blank=True)),
('display_as', self.gf('django.db.models.fields.CharField')(default='vanilla carousel', max_length=30)),
('mini', self.gf('django.db.models.fields.BooleanField')(default=False)),
('animated', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'rt_carousel', ['RTCarouselPluginModel'])
def backwards(self, orm):
# Deleting model 'RTCarouselPluginModel'
db.delete_table(u'cmsplugin_rtcarouselpluginmodel')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'rt_carousel.rtcarouselpluginmodel': {
'Meta': {'object_name': 'RTCarouselPluginModel', 'db_table': "u'cmsplugin_rtcarouselpluginmodel'", '_ormbases': ['cms.CMSPlugin']},
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'display_as': ('django.db.models.fields.CharField', [], {'default': "'vanilla carousel'", 'max_length': '30'}),
'height': ('django.db.models.fields.IntegerField', [], {'default': '480'}),
'margin': ('django.db.models.fields.CharField', [], {'default': "'40px 0 40px 0'", 'max_length': '50', 'blank': 'True'}),
'mini': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
}
}
complete_apps = ['rt_carousel'] |
RacingTadpole/cmsplugin-rt | cmsplugin_rt/navbar/models.py | # Plugin models
from cms.models.pluginmodel import CMSPlugin
from cms.models.pagemodel import Page
from django.utils.translation import ugettext_lazy as _
from django.db import models
class NavbarPluginModel(CMSPlugin):
DISPLAY_CHOICES =(("", _("default")),
("navbar-fixed-top", _("fixed to top")),
("navbar-fixed-bottom", _("fixed to bottom")),
("navbar-static-top", _("static top")),
)
USER_ICON_CHOICES = (("",_("grey")),
("icon-white",_("white")),
)
navbar_type = models.CharField(_("navbar type"), max_length=64, blank=True, default="navbar-fixed-top", choices=DISPLAY_CHOICES)
inverted = models.BooleanField(default=False)
brand = models.CharField(max_length=80, default='', blank=True)
link_to_children = models.BooleanField(default=True,
help_text=_("Show links to all navigable children of the home page. NOTE: You must set the home page's id to 'home' under the advanced settings. Will not work if there is a 'softroot' in your CMS."))
icon_type = models.CharField(_("user actions icon type"), max_length=24, blank=True, choices=USER_ICON_CHOICES)
def __unicode__(self):
if self.brand:
return self.brand
else:
return ""
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/button/templatetags/allow_special.py | from django import template
register = template.Library()
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
import re
@register.filter(needs_autoescape=True)
def allow_special(text, autoescape=None):
"""This filter turns any http(s)://www.* or www.* into a link,
and any mailto:<EMAIL> into a clickable email field <EMAIL>.
It also allows any &xxxxx; code through to be rendered in html
"""
if autoescape:
esc = conditional_escape
else:
esc = lambda x: x
addr = r'[A-Za-z0-9\/\.\-]+\.[A-Za-z0-9\/\.\-]+[A-Za-z0-9]'
mailaddr = r'[A-Za-z0-9\._]+@[A-Za-z0-9\._]+[A-Za-z0-9]'
phttp = re.compile(r'(?P<links>https?://'+addr+')')
pwww = re.compile(r'(?P<links>www\.'+addr+')')
#pmail = re.compile(r'mailto:(?P<mail>[A-Za-z0-9\._@]+[A-Za-z0-9])')
pmail = re.compile(r'(?P<mail>'+mailaddr+')')
special = r'&#?[0-9a-z]{0,8};'
pspecial = re.compile(special)
#p = re.compile(r'(https?://www\.'+addr+'|www\.'+addr+'|mailto:[A-Za-z0-9\._@]+[A-Za-z0-9]|'+special+')')
p = re.compile(r'(https?://'+addr+'|www\.'+addr+'|'+mailaddr+'|'+special+')')
result = ""
for field in p.split(text):
if phttp.match(field):
result += "<a href='%s' target='_blank'>%s</a>" % (field, field)
elif pwww.match(field):
result += "<a href='http://%s' target='_blank'>%s</a>" % (field, field)
elif pmail.match(field):
result += "<a href='mailto:%s'>%s</a>" % (field, field)
elif pspecial.match(field):
result += field
else:
result += esc(field)
return mark_safe(result)
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/style_modifier/models.py | <filename>cmsplugin_rt/style_modifier/models.py
# Plugin models
from cms.models.pluginmodel import CMSPlugin
from cms.models.pagemodel import Page
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from os.path import basename
from django.db import models
from django.conf import settings
class StyleModifierPluginModel(CMSPlugin):
"""
Adds a style element to change the css styling on the fly
"""
GENERIC_CLASSES = (("body", _("body (ie. everything)")),
("p", _("paragraphs")),
("hr", _("horizontal lines")),
(".plugin_picture", _("picture plugins")),
(".footer", _("footer")),
(".footer a, .footer a:link, .footer a:visited, .footer a:hover, .footer a:active", _("footer links")),
)
BOOTSTRAP_CLASSES = (
(".navbar,#navbar", _("navigation bar")),
("#navbar li > a", _("navigation bar links")),
("#navbar .active a", _("navigation bar active links")),
("#navbar a:hover, #navbar a:focus", _("navigation bar hover state")),
(".dropdown-menu", _("dropdown menus")),
("#navbar .dropdown-menu li > a", _("navbar dropdown links")),
(".hero-unit", _("hero")),
(".container, .container-fluid", _("containers")),
(".btn", _("buttons")),
(".btn:hover, .btn:active, .btn.focus, .btn.disabled, .btn[disabled]", _("active buttons")),
(".btn.btn-primary", _("primary buttons")),
(".btn.btn-primary:hover, .btn.btn-primary:active, .btn.btn-primary.focus", _("active primary buttons")),
(".btn-link, .btn-link:hover, .btn-link:active, .btn-link:focus", _("links in forms")),
(".well", _("wells")),
)
JQUERY_MOBILE_CLASSES = (
(".ui-body-b,.ui-dialog.ui-overlay-b", _("background body")),
(".ui-btn-up-b", _("buttons and bars")),
(".ui-btn-hover-b", _("buttons and bars, hover state")),
)
front_end = getattr(settings,'RT_FRONT_END_FRAMEWORK','BOOTSTRAP').upper()
extra_classes = getattr(settings,'RT_MORE_STYLE_CLASSES',())
if (front_end=="BOOTSTRAP"):
CLASS_CHOICES = GENERIC_CLASSES + (("",_("---------")),) + BOOTSTRAP_CLASSES + (("",_("---------")),) + extra_classes
elif (front_end=="JQUERY-MOBILE"):
CLASS_CHOICES = GENERIC_CLASSES + (("",_("---------")),) + JQUERY_MOBILE_CLASSES + (("",_("---------")),) + extra_classes
else:
CLASS_CHOICES = GENERIC_CLASSES + (("",_("---------")),) + extra_classes
ALIGN_CHOICES = (("left", _("left")),
("center", _("centre")),
("right", _("right"))
)
mod_class = models.CharField(_("class to modify"), max_length=120, choices=CLASS_CHOICES)
background_image = models.ImageField(_("background image"), upload_to=CMSPlugin.get_media_path, blank=True, help_text=_("Leave blank for none."))
background_color = models.CharField(max_length=32, blank=True, help_text=_("Use a simple name, e.g. green or darkgreen (no spaces!), or an RGB code like #f2f2f0. Leave blank for default."))
top_gradient_color = models.CharField(max_length=32, blank=True, help_text=_("Use a simple name or RGB code. Leave blank for default."))
bottom_gradient_color = models.CharField(max_length=32, blank=True, help_text=_("Use a simple name or RGB code. Leave blank for default."))
font_family = models.CharField(max_length=64, blank=True, help_text=_("Leave blank for default."))
text_color = models.CharField(max_length=32, blank=True, help_text=_("Use a simple name or RGB code. Leave blank for default."))
text_shadow = models.CharField(max_length=32, blank=True, help_text=_("Horizontal and vertical shadow distance (and optional fuzziness) followed by a color, e.g. 2px 2px black. Leave blank for default."))
text_align = models.CharField(max_length=32, blank=True, choices=ALIGN_CHOICES, help_text=_("Leave blank for default."))
freeform = models.CharField(max_length=96, blank=True, help_text=_("Enter your own css if desired, e.g. padding: 5px;"))
def __unicode__(self):
for (x,y) in self.CLASS_CHOICES:
if self.mod_class==x:
return y.decode()
return self.mod_class
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/google_analytics/models.py | <filename>cmsplugin_rt/google_analytics/models.py<gh_stars>1-10
from cms.models import CMSPlugin
from cms.models.pagemodel import Page
from django.db import models
from django.utils.translation import ugettext_lazy as _
class GoogleAnalyticsPluginModel(CMSPlugin):
account_code = models.CharField(_("Account code"), default='UA-xxxxxxxx-x', max_length=50)
domain_name = models.CharField(_("Domain name"), default='example.com', max_length=60)
subdomains = models.BooleanField(_("Track subdomains"), default=False)
def __unicode__(self):
return self.account_code
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/rt_carousel/cms_plugins.py | from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models.pluginmodel import CMSPlugin
from models import RTCarouselPluginModel
bootstrap_module_name = _("Widgets")
layout_module_name = _("Layout elements")
generic_module_name = _("Generic")
meta_module_name = _("Meta elements")
class RTCarouselPlugin(CMSPluginBase):
model = RTCarouselPluginModel
name = _("Carousel")
module = bootstrap_module_name
render_template = "rt_carousel_plugin.html"
admin_preview = False
def render(self, context, instance, placeholder):
import uuid
context['uuid'] = uuid.uuid4().hex[:4] # allows for multiple onscreen
max_width = 320 # an approximate width for the image - depends on the screensize of course
max_width_mini_carousel = 480
context['instance'] = instance
context['display_as'] = instance.display_as
if instance.content_group and instance.content_group._meta.many_to_many:
m2m_fieldname = instance.content_group._meta.many_to_many[0].name
context['item_list'] = getattr(instance.content_group, m2m_fieldname).all()
context['flagship_margin_top_list'] = [(item.flagship_height and int(max(instance.height - item.flagship_height * min(1,max_width/item.flagship_width), 0)/2) or 0) for item in context['item_list'] ]
context['mini_flagship_margin_top_list'] = [(item.flagship_height and int(max(instance.height - item.flagship_height * min(1,max_width_mini_carousel/item.flagship_width), 0)/2) or 0) for item in context['item_list'] ]
for i in range(len(context['flagship_margin_top_list'])):
context['item_list'][i].flagship_margin_top = context['flagship_margin_top_list'][i]
context['item_list'][i].mini_flagship_margin_top = context['mini_flagship_margin_top_list'][i]
return context
plugin_pool.register_plugin(RTCarouselPlugin)
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/button_appstore/models.py | <gh_stars>1-10
# Plugin models
from cms.models.pluginmodel import CMSPlugin
from cms.models.pagemodel import Page
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.conf import settings
class ButtonAppstorePluginModel(CMSPlugin):
button_link = models.URLField(_("Direct link"), help_text=_("e.g. https://itunes.apple.com/us/app/aquizzical/id627435810?mt=8&uo=4"))
def __unicode__(self):
return u"Download from iTunes"
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/open_graph/models.py | <gh_stars>1-10
# Plugin models
from cms.models.pluginmodel import CMSPlugin
from cms.models.pagemodel import Page
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from os.path import basename
from django.db import models
class OpenGraphPluginModel(CMSPlugin):
og_title = models.CharField(_("title"), max_length=255, help_text=_("Page title"))
og_type = models.CharField(_("type"), max_length=60, default="website", help_text=_("Only certain types, e.g. 'website', are allowed. See http://developers.facebook.com/docs/opengraphprotocol/#types for more info."))
og_url = models.CharField(_("permanent URL"), default="http://", max_length=255, help_text=_("Include the http:// or https://."))
og_image = models.ImageField(_("icon"), upload_to=CMSPlugin.get_media_path, help_text=_("A square icon (over 200x200 pixels is recommended)."))
fb_app_id = models.CharField(_("Facebook app ID"), max_length=80, blank=True, help_text=_("Required for Facebook."))
og_site_name = models.CharField(_("site name"), max_length=255, blank=True, help_text=_("Optional name for your site."))
og_description = models.CharField(_("description"), max_length=255, blank=True, help_text=_("Optional one to two sentence description of your page."))
def __unicode__(self):
return self.og_title
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/google_font/cms_plugins.py | from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.utils.translation import ugettext_lazy as _
from models import *
bootstrap_module_name = _("Widgets")
layout_module_name = _("Layout elements")
generic_module_name = _("Generic")
meta_module_name = _("Meta elements")
class GoogleFontPlugin(CMSPluginBase):
model = GoogleFontPluginModel
name = "Google font"
render_template = 'google_font_plugin.html'
module = meta_module_name
def render(self, context, instance, placeholder):
context.update({'instance': instance, 'name': self.name})
return context
plugin_pool.register_plugin(GoogleFontPlugin)
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/mailchimp_form/models.py | from cms.models import CMSPlugin
from django.db import models
from django.utils.translation import ugettext_lazy as _
class MailChimpPluginModel(CMSPlugin):
CLASS_CHOICES =(("", _("default")),
("btn-primary", _("primary")),
("btn-info", _("info")),
("btn-success", _("success")),
("btn-warning", _("warning")),
("btn-danger", _("danger")),
("btn-inverse", _("inverse")),
("btn-link", _("link")),
)
SIZE_CHOICES = (("", _("default")),
("btn-large", _("large")),
("btn-small", _("small")),
("btn-mini", _("mini")),
)
title = models.CharField(max_length=120, blank=True, default="Subscribe to our mailing list")
form_action = models.URLField(help_text=_("Please paste this in from the 'action=' part of the embedded code provided by mailchimp.com, e.g. http://yourname.us6.list-manage.com/subscribe/post?u=c73da2d768ef08412d44553ac&amp;id=65df681376"))
subscribe_text = models.CharField(max_length=60, default="Subscribe")
button_type = models.CharField(_("button type"), max_length=16, blank=True, choices=CLASS_CHOICES)
button_size = models.CharField(_("button size"), max_length=16, blank=True, choices=SIZE_CHOICES)
def __unicode__(self):
return self.title
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/facebook_button/migrations/0001_initial.py | <reponame>RacingTadpole/cmsplugin-rt<gh_stars>1-10
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FacebookButtonPluginModel'
db.create_table('cmsplugin_facebookbuttonpluginmodel', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('layout', self.gf('django.db.models.fields.CharField')(default='standard', max_length=50)),
('url', self.gf('django.db.models.fields.CharField')(default='http://', max_length=255, blank=True)),
('send', self.gf('django.db.models.fields.BooleanField')(default=True)),
('show_faces', self.gf('django.db.models.fields.BooleanField')(default=True)),
('width', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=None, null=True, blank=True)),
('verb', self.gf('django.db.models.fields.CharField')(default='like', max_length=50)),
('font', self.gf('django.db.models.fields.CharField')(default='verdana', max_length=50)),
('color_scheme', self.gf('django.db.models.fields.CharField')(default='light', max_length=50)),
))
db.send_create_signal('facebook_button', ['FacebookButtonPluginModel'])
def backwards(self, orm):
# Deleting model 'FacebookButtonPluginModel'
db.delete_table('cmsplugin_facebookbuttonpluginmodel')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 25, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'facebook-button.facebookbuttonpluginmodel': {
'Meta': {'object_name': 'FacebookButtonPluginModel', 'db_table': "'cmsplugin_facebookbuttonpluginmodel'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'color_scheme': ('django.db.models.fields.CharField', [], {'default': "'light'", 'max_length': '50'}),
'font': ('django.db.models.fields.CharField', [], {'default': "'verdana'", 'max_length': '50'}),
'layout': ('django.db.models.fields.CharField', [], {'default': "'standard'", 'max_length': '50'}),
'send': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_faces': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'default': "'http://'", 'max_length': '255', 'blank': 'True'}),
'verb': ('django.db.models.fields.CharField', [], {'default': "'like'", 'max_length': '50'}),
'width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['facebook_button']
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/meta_icons/models.py | <gh_stars>1-10
# Plugin models
from cms.models.pluginmodel import CMSPlugin
from cms.models.pagemodel import Page
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from os.path import basename
from django.db import models
class MetaIconsPluginModel(CMSPlugin):
fav_icon = models.ImageField(_("Bookmark or 'fav' icon"), upload_to=CMSPlugin.get_media_path, blank=True, help_text=_("A small square icon (either 16x16 or 32x32 pixels is best). Leave blank for none."))
touch_icon = models.ImageField(_("Apple touch icon"), upload_to=CMSPlugin.get_media_path, blank=True, help_text=_("A square icon for mobile device home pages. For retina iPad, use 144x144 pixels. Leave blank to use the bookmark icon."))
def __unicode__(self):
try:
if self.fav_icon:
return u"%s" % basename(self.fav_icon.path)
elif self.touch_icon:
return u"%s" % basename(self.touch_icon.path)
else:
return "No icons"
except:
pass
return "<empty>"
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/self_calc_pagination/cms_plugins.py | from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models.pluginmodel import CMSPlugin
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from models import *
bootstrap_module_name = _("Widgets")
layout_module_name = _("Layout elements")
generic_module_name = _("Generic")
meta_module_name = _("Meta elements")
class SelfCalcPaginationPlugin(CMSPluginBase):
model = CMSPlugin
name = _("Pagination")
module = bootstrap_module_name
render_template = "self_calc_pagination_plugin.html"
def render(self, context, instance, placeholder):
return context
plugin_pool.register_plugin(SelfCalcPaginationPlugin)
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/button/cms_plugins.py | <reponame>RacingTadpole/cmsplugin-rt
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models.pluginmodel import CMSPlugin
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from .models import ButtonPluginModel
from .forms import ButtonForm
bootstrap_module_name = _("Widgets")
layout_module_name = _("Layout elements")
generic_module_name = _("Generic")
meta_module_name = _("Meta elements")
class ButtonPlugin(CMSPluginBase):
model = ButtonPluginModel
form = ButtonForm
name = _("Button")
#module = bootstrap_module_name
render_template = "button_plugin.html"
text_enabled = True
def render(self, context, instance, placeholder):
context['instance'] = instance
if instance.page_link:
context['link'] = instance.page_link.get_absolute_url()
else:
context['link'] = instance.button_link
return context
plugin_pool.register_plugin(ButtonPlugin)
|
RacingTadpole/cmsplugin-rt | setup.py | import os
from setuptools import setup, find_packages
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name = 'cmsplugin-rt',
version = '0.5.1',
packages = find_packages(), #'cmsplugin_rt', #find_packages(),
include_package_data = True,
license = 'BSD License', # example license
description = 'This package contains a number of basic plugins to kick start your DjangoCMS project, such as Twitter Bootstrap navbar and buttons, Facebook and Twitter buttons, a Style Modifier, Google Analytics tracking code, Google fonts, meta tags and resizable pictures.',
long_description = README,
keywords = "button meta twitter bootstrap style modifier racing tadpole",
url = 'https://github.com/RacingTadpole/cmsplugin-rt',
author = '<NAME>',
author_email = '<EMAIL>',
classifiers = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
zip_safe = False,
)
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/rt_carousel/models.py | # Plugin models
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from cms.models.pluginmodel import CMSPlugin
CAROUSEL_DISPLAY_CHOICES = (
('bootstrap', _('Bootstrap carousel')),
('list', _('List')),
)
allowed_models = getattr(settings, 'ALLOWED_MODELS_IN_RT_CAROUSEL', [])
# ALLOWED_MODELS_IN_RT_CAROUSEL must be a list of dictionaries with keys:
# app_label and model, e.g.
# ALLOWED_MODELS_IN_RT_CAROUSEL = [{'app_label':'sngapp', 'model':'gamegroup'},]
fk_models = None
if allowed_models:
fk_models = models.Q(app_label = allowed_models[0]['app_label'].lower(), model = allowed_models[0]['model'].lower())
for m in allowed_models[1:]:
fk_models = fk_models | models.Q(app_label = m['app_label'].lower(), model = m['model'].lower())
class RTCarouselPluginModel(CMSPlugin):
# content_group must point to a model with a ManyToMany field.
# The Carousel will find this field and present the items in it.
# Displaying the id as part of its description helps because
# the admin panel will ask for the type and id directly.
# e.g.
# class GameGroup(models.Model):
# name = models.CharField(max_length=36)
# games = models.ManyToManyField(Game, through="GameGroupMember")
# def __unicode__(self):
# return "{0} (id {1})".format(self.name, self.id)
#
# and
# class GameGroupMember(models.Model):
# group = models.ForeignKey(GameGroup)
# game = models.ForeignKey(Game)
# position = models.PositiveIntegerField(null=True, blank=True)
content_type = models.ForeignKey(ContentType, limit_choices_to = fk_models)
object_id = models.PositiveIntegerField()
content_group = generic.GenericForeignKey('content_type', 'object_id')
height = models.IntegerField(_("height"), default=480, help_text=_("Please enter height in pixels"))
margin = models.CharField(_("margins"), max_length=50, blank=True, default="40px 0 40px 0", help_text=_("Please enter in css format: top right bottom left, e.g. 40px 0 40px 0"))
display_as = models.CharField(max_length=30, default=CAROUSEL_DISPLAY_CHOICES[0][0], choices=CAROUSEL_DISPLAY_CHOICES)
mini = models.BooleanField()
animated = models.BooleanField(default=True)
def __unicode__(self):
return str(self.height) + u"px " + (self.animated and u"animated " or u"") + (self.mini and u"mini " or u"") + self.display_as
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/navbar/cms_plugins.py | <gh_stars>1-10
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models.pluginmodel import CMSPlugin
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from models import *
bootstrap_module_name = _("Widgets")
layout_module_name = _("Layout elements")
generic_module_name = _("Generic")
meta_module_name = _("Meta elements")
class NavbarPlugin(CMSPluginBase):
model = NavbarPluginModel
name = _("Navigation bar")
module = bootstrap_module_name
render_template = "navbar_plugin.html"
admin_preview = False
def render(self, context, instance, placeholder):
context['instance'] = instance
return context
plugin_pool.register_plugin(NavbarPlugin)
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/meta_icons/cms_plugins.py | <reponame>RacingTadpole/cmsplugin-rt
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models.pluginmodel import CMSPlugin
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from models import *
bootstrap_module_name = _("Widgets")
layout_module_name = _("Layout elements")
generic_module_name = _("Generic")
meta_module_name = _("Meta elements")
class MetaIconsPlugin(CMSPluginBase):
model = MetaIconsPluginModel
name = _("Website icons")
module = meta_module_name
render_template = "meta_icons_plugin.html"
def render(self, context, instance, placeholder):
if not instance.touch_icon:
instance.touch_icon = instance.fav_icon
context['instance'] = instance
return context
plugin_pool.register_plugin(MetaIconsPlugin)
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/button/models.py | <reponame>RacingTadpole/cmsplugin-rt<filename>cmsplugin_rt/button/models.py
# Plugin models
from cms.models.pluginmodel import CMSPlugin
from cms.models.pagemodel import Page
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.conf import settings
class ButtonPluginModel(CMSPlugin):
front_end = getattr(settings,'RT_FRONT_END_FRAMEWORK','BOOTSTRAP').upper()
if (front_end=="BOOTSTRAP"):
CLASS_CHOICES =(("", _("default")),
("btn-primary", _("primary")),
("btn-info", _("info")),
("btn-success", _("success")),
("btn-warning", _("warning")),
("btn-danger", _("danger")),
("btn-inverse", _("inverse")),
("btn-link", _("link")),
)
SIZE_CHOICES = (("", _("default")),
("btn-large", _("large")),
("btn-small", _("small")),
("btn-mini", _("mini")),
)
elif (front_end=="JQUERY-MOBILE"):
CLASS_CHOICES =(("", _("default")),
("inline", _("inline")),
)
SIZE_CHOICES = (("", _("default")),
("btn-mini", _("mini")),
)
else:
CLASS_CHOICES = ()
SIZE_CHOICES = ()
button_type = models.CharField(_("button type"), max_length=16, blank=True, choices=CLASS_CHOICES)
button_size = models.CharField(_("button size"), max_length=16, blank=True, choices=SIZE_CHOICES)
button_link = models.CharField(max_length=240, default='', blank=True)
page_link = models.ForeignKey(Page, verbose_name=_("page"), blank=True, null=True, help_text=_("A link to a page overrides the above button link."))
button_text = models.CharField(max_length=80, default='Click here', help_text=_("HTML symbol codes are allowed, e.g. &hearts; for ♥."))
arrows = models.BooleanField()
def __unicode__(self):
return self.button_text
search_fields = ('button_text','button_link',)
|
RacingTadpole/cmsplugin-rt | cmsplugin_rt/facebook_button/models.py | <filename>cmsplugin_rt/facebook_button/models.py
from cms.models import CMSPlugin
from cms.models.pagemodel import Page
from django.db import models
from django.utils.translation import ugettext_lazy as _
LAYOUT_CHOICES = [
('standard', _('Standard')),
('button_count', _('Button count')),
('box_count', _('Box count')),
]
VERB_CHOICES = [
('like', _('Like')),
('recommend', _('Recommend')),
]
FONT_CHOICES = [
('arial', _('Arial')),
('lucida grande', _('Lucida grande')),
('segoe ui', _('Segoe ui')),
('tahoma', _('Tahoma')),
('trebuchet ms', _('Trebuchet ms')),
('verdana', _('Verdana')),
]
COLOR_CHOICES = [
('light', _('light')),
('dark', _('dark')),
]
class FacebookButtonPluginModel(CMSPlugin):
layout = models.CharField(_("Layout Style"), choices=LAYOUT_CHOICES, default='standard', max_length=50)
url = models.CharField(_("permanent URL"), default='http://', max_length=255, blank=True, help_text=_("Leave blank to use this page. Include the http:// or https:// prefix."))
send = models.BooleanField(_("Send button"), default=True)
show_faces = models.BooleanField(_("Show Faces"), default=True,
help_text=_("Show profile pictures below the like button"))
width = models.PositiveSmallIntegerField(_("Width"), default=None, null=True,
blank=True, help_text=_("Leave empty for auto scaling"))
verb = models.CharField(_("Verb to display"), choices=VERB_CHOICES, default='like', max_length=50)
font = models.CharField(_("Font"), choices=FONT_CHOICES, default='verdana', max_length=50)
color_scheme = models.CharField(_("Color Scheme"), choices=COLOR_CHOICES, default='light', max_length=50)
def __unicode__(self):
return "Facebook button"
|
RacingTadpole/cmsplugin-rt | tests/runtests.py | import os, sys
from django.conf import settings
DIRNAME = os.path.dirname(__file__)
settings.configure(DEBUG=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
#ROOT_URLCONF='myapp.urls',
CMS_TEMPLATES = ( ('template_for_tests.html', 'Test template'), ),
CMS_MODERATOR = False,
CMS_PERMISSION = False,
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
'cms.context_processors.media',
'sekizai.context_processors.sekizai',
),
INSTALLED_APPS = (
#'cmsplugin-rt.cmsplugin_rt',
'cmsplugin_rt',
'cmsplugin_rt.button',
#'cmsplugin_rt.facebook_button',
#'cmsplugin_rt.hbar',
#'cmsplugin_rt.mailchimp_form',
#'cmsplugin_rt.meta_icons',
#'cmsplugin_rt.open_graph',
#'cmsplugin_rt.resizeable_picture',
#'cmsplugin_rt.self_calc_pagination',
#'cmsplugin_rt.spacer',
#'cmsplugin_rt.style_modifier',
#'cmsplugin_rt.text_minimal_markup',
#'cmsplugin_rt.twitter_button',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
#'django.contrib.markup',
'south',
'cms',
'mptt',
'menus',
'sekizai',
'cms.plugins.file',
'cms.plugins.link',
'cms.plugins.picture',
'cms.plugins.text',
'cms.plugins.video',
),
)
#from cms.test_utils.util.context_managers import SettingsOverride
from django.test.simple import DjangoTestSuiteRunner
test_runner = DjangoTestSuiteRunner(verbosity=2)
failures = test_runner.run_tests(['cmsplugin_rt', ])
if failures:
sys.exit(failures)
|
christine-berlin/Capstone_WindPowerPredictions | modeling/features.py | <gh_stars>0
import pandas as pd
def get_feature_combinations():
"""Returns a dictionary with different feature combinations
of the dataframe.
Returns:
(dict): Dictionary with different feature combinations.
"""
data = pd.read_csv('../data/GEFCom2014Data/Wind/clean_data.csv', parse_dates=['TIMESTAMP'], index_col='TIMESTAMP')
features = data.columns.to_list()
features = [var for var in features
if var not in ('ZONEID', 'TARGETVAR', 'TIMESTAMP')]
feature_dict = {}
feature_dict['all'] = features
feature_dict['no_deg'] = [var for var in features
if var not in ('WD100', 'WD10')]
feature_dict['no_deg_norm'] = [var for var in features
if var not in ('WD100', 'WD10', 'U100NORM', 'V100NORM')]
feature_dict['no_comp'] = [var for var in features
if var not in ('U10', 'U100', 'U100NORM', 'V10', 'V100', 'V100NORM')]
feature_dict['no_comp_plus_100Norm'] = [var for var in features
if var not in ('U10', 'U100', 'V10', 'V100')]
feature_dict['no_ten'] = [var for var in features
if 'WD10CARD' not in var
and var not in ('U10', 'V10', 'WS10', 'WD10')]
feature_dict['no_card'] = [var for var in features if 'CARD' not in var]
feature_dict['no_card_100Norm'] = [var for var in features
if 'CARD' not in var and var not in ('U100NORM', 'V100NORM')]
feature_dict['only_ws'] = ['WS100']
return feature_dict
|
christine-berlin/Capstone_WindPowerPredictions | modeling/functions.py | <reponame>christine-berlin/Capstone_WindPowerPredictions
"""Functions for:
- logging with MLflow,
- modelling,
- hyperparameter tuning,
- finding best feature combinations,
"""
import warnings
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
import mlflow
from modeling.config import EXPERIMENT_NAME
TRACKING_URI = open("../.mlflow_uri").read().strip()
warnings.filterwarnings('ignore')
def log_to_mlflow(
ZONEID=None, Model=None, features=None, train_RMSE=None,test_RMSE=None,
hyperparameter=None, model_parameters=None, scaler=None):
"""Logs to mlflow.
Args:
ZONEID (int): Zone number. (Default value = None)
Model (str): Name of model. (Default value = None)
features (list): List of features used. (Default value = None)
train_RMSE (float): RMSE score of train data set. (Default value = None)
test_RMSE (float): RMSE score of test data set. (Default value = None)
hyperparameter (dict): Dictionary of the hyperparameters.
(Default value = None)
model_parameters (dict): Dictionary with the model parameters.
(Default value = None)
scaler (sklearn.scaler): Scaler that was applied to the data.
(Default value = None)
Returns:
None
"""
mlflow.set_tracking_uri(TRACKING_URI)
mlflow.set_experiment(EXPERIMENT_NAME)
params = {}
if model_parameters:
params['model parameters'] = model_parameters
if hyperparameter:
params['hyperparameter'] = hyperparameter
mlflow.start_run()
run = mlflow.active_run()
print(f"\nActive run_id: {run.info.run_id}")
if ZONEID:
mlflow.set_tag("ZONEID", ZONEID)
if Model:
mlflow.set_tag("Model", Model)
if features:
mlflow.set_tag("features", features)
mlflow.set_tag("n_features", len(features))
if scaler:
mlflow.set_tag("scaler", scaler.__class__.__name__)
if train_RMSE:
mlflow.log_metric("train-RMSE", train_RMSE)
if test_RMSE:
mlflow.log_metric("test-RMSE", test_RMSE)
if params:
mlflow.log_params(params)
mlflow.end_run()
def adjusted_RMSE(y_test, y_pred):
"""Computes the RMSE after the values in y_pred have been adjusted to the
interval [0, 1].
Args:
y_test (numpy.array): Array with the target of the test data set.
y_pred (numpy.array): Array with the (unadjusted) values of the
prediction of the targer variable.
Returns:
float: The adjusted RMSE between y_test and y_pred.
"""
y_pred = [1 if value >= 1 else 0 if value <= 0 else value
for value in y_pred]
return mean_squared_error(y_test, y_pred, squared=False)
def get_bestfeatures(df):
"""Get the best feature combination for a model and for each zone.
Best means, best result in CV.
Args:
df (pd.DataFrame): Contains the test/train-score for one model,
10 zones and all feature combinations.
Returns:
(pd.DataFrame): Contains the test/train-score for one model,
10 zones for best feature combination.
"""
df_results = pd.DataFrame()
for zone in df.index.unique():
df_zone = df.loc[zone]
df_results = pd.concat([df_results,
df_zone[df_zone.CV == df_zone.CV.min()]])
return df_results
def result_to_df(model_dict, testscore, trainscore, cv_score, fc):
"""Stores the results of the modelling as a Pandas Dataframe.
Args:
model_dict (dict): Dictionary with the models.
testscore (dict): Dictionary with the scores of the test data.
trainscore (dict): Dictionary with the scores of the train data.
cv_score (list): List with the score of the cross-validation.
fc (list): List with the features used in the fitting.
Returns:
(pd.DataFrame): Dataframe with results.
"""
df_results = pd.DataFrame(pd.Series([model_dict[i].get_params()
for i in range(1, 11)]),
columns=['BEST_PARAMS'])
df_results['CV'] = pd.Series([cv_score[i] for i in range(1,11)])
df_results['ZONE'] = df_results.index
df_results.ZONE = df_results.ZONE.apply(lambda x: f'ZONE{x+1}')
df_results = df_results.set_index('ZONE')
df_results['MODEL'] = model_dict[1].__class__.__name__
df_results['FC'] = fc
df_results = df_results.join(pd.DataFrame.from_dict(
testscore,
orient='index',
columns=['TESTSCORE'])) # leave out TOTAL
df_results = df_results.join(pd.DataFrame.from_dict(
trainscore,
orient='index',
columns=['TRAINSCORE']))
return df_results
def scaler_func(X_train, X_test, scaler):
"""Scales the train and test data with the provided scaler.
Args:
X_train (pd.DataFrame): Dataframe with the train data.
X_test (pd.DataFrame): Dataframe with the test data.
scaler (sklearn.Scaler): MinMaxScaler, StandardScaler or None.
Returns:
(sklearn.Scaler): Scaled train data.
(sklearn.Scaler): Scaled test data.
"""
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
return X_train, X_test
def train_test_split_features(data_train, data_test, zone, features):
"""Returns a pd.DataFrame with the explanatory variables and
a pd.Series with the target variable, for both train and test data.
Args:
data_train (pd.DataFrame): Train data set.
data_tes (pd.DataFrame): Test data set.
zone (int): The zone id (id of the wind farm).
features (list): A list of the column names to be used.
Returns:
(pd.DataFrame): Explanatory variables of train data set.
(pd.Series): Target variable fo train data set.
(pd.DataFrame): Explanatory variables of test data set.
(pd.Series): Target variable fo test data set.
"""
X_train = data_train[data_train.ZONEID == zone][features]
y_train = data_train[data_train.ZONEID == zone].TARGETVAR
X_test = data_test[data_test.ZONEID == zone][features]
y_test = data_test[data_test.ZONEID == zone].TARGETVAR
return X_train, X_test, y_train, y_test
def predict_func(model, X, y):
"""Predicts using a given model
and adjusts the result in the interval [0,1].
Predictions can't have values larger than 1 or smaller than 0, because the energy output
consists of nornmalized values in [0,1].
Args:
model (sklearn.model): Model which to use for predicting.
X (pd.DataFrame): Dataframe with explanatory variables.
y (pd:Series) : Target variable of test data
Returns:
(np.array): Adjusted result of the prediction.
"""
y_pred = model.predict(X)
y_pred = pd.DataFrame(
[1 if value >= 1 else 0 if value <= 0 else value for value in y_pred],
index=y.index, columns=['pred'])
return y_pred
|
Minyan910/libcint | testsuite/test_cint4c1e.py | <reponame>Minyan910/libcint
#!/usr/bin/env python
# $Id$
# -*- coding: utf-8
'''
test libcint
'''
__author__ = "<NAME> <<EMAIL>>"
import sys
import os
import ctypes
import numpy
_cint = numpy.ctypeslib.load_library('libcint', '.')
PTR_LIGHT_SPEED = 0
PTR_COMMON_ORIG = 1
PTR_SHIELDING_ORIG = 4
PTR_RINV_ORIG = 4
PTR_RINV_ZETA = 7
PTR_ENV_START = 20
CHARGE_OF = 0
PTR_COORD = 1
NUC_MOD_OF = 2
PTR_ZETA = 3
RAD_GRIDS = 4
ANG_GRIDS = 5
ATM_SLOTS = 6
ATOM_OF = 0
ANG_OF = 1
NPRIM_OF = 2
NCTR_OF = 3
KAPPA_OF = 4
PTR_EXP = 5
PTR_COEFF = 6
BAS_SLOTS = 8
natm = 4
nbas = 0
atm = numpy.zeros((natm,ATM_SLOTS), dtype=numpy.int32)
bas = numpy.zeros((1000,BAS_SLOTS), dtype=numpy.int32)
env = numpy.zeros(10000)
off = PTR_ENV_START
for i in range(natm):
atm[i, CHARGE_OF] = (i+1)*2
atm[i, PTR_COORD] = off
env[off+0] = .2 * (i+1)
env[off+1] = .3 + (i+1) * .5
env[off+2] = .1 - (i+1) * .5
off += 3
off0 = off
# basis with kappa > 0
nh = 0
bas[nh,ATOM_OF ] = 0
bas[nh,ANG_OF ] = 1
bas[nh,KAPPA_OF] = 1
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP] = off
env[off+0] = 1
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1
off += 2
nh += 1
bas[nh,ATOM_OF ] = 1
bas[nh,ANG_OF ] = 2
bas[nh,KAPPA_OF] = 2
bas[nh,NPRIM_OF] = 2
bas[nh,NCTR_OF ] = 2
bas[nh,PTR_EXP] = off
env[off+0] = 5
env[off+1] = 3
bas[nh,PTR_COEFF] = off + 2
env[off+2] = 1
env[off+3] = 2
env[off+4] = 4
env[off+5] = 1
off += 6
nh += 1
bas[nh,ATOM_OF ] = 2
bas[nh,ANG_OF ] = 3
bas[nh,KAPPA_OF] = 3
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP ] = off
env[off+0] = 1
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1
off += 2
nh += 1
bas[nh,ATOM_OF ] = 3
bas[nh,ANG_OF ] = 4
bas[nh,KAPPA_OF] = 4
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP ] = off
env[off+0] = .5
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1.
off = off + 2
nh += 1
nbas = nh
# basis with kappa < 0
n = off - off0
for i in range(n):
env[off+i] = env[off0+i]
for i in range(nh):
bas[i+nh,ATOM_OF ] = bas[i,ATOM_OF ]
bas[i+nh,ANG_OF ] = bas[i,ANG_OF ] - 1
bas[i+nh,KAPPA_OF] =-bas[i,KAPPA_OF]
bas[i+nh,NPRIM_OF] = bas[i,NPRIM_OF]
bas[i+nh,NCTR_OF ] = bas[i,NCTR_OF ]
bas[i+nh,PTR_EXP ] = bas[i,PTR_EXP ] + n
bas[i+nh,PTR_COEFF]= bas[i,PTR_COEFF] + n
env[bas[i+nh,PTR_COEFF]] /= 2 * env[bas[i,PTR_EXP]]
env[bas[5,PTR_COEFF]+0] = env[bas[1,PTR_COEFF]+0] / (2 * env[bas[1,PTR_EXP]+0])
env[bas[5,PTR_COEFF]+1] = env[bas[1,PTR_COEFF]+1] / (2 * env[bas[1,PTR_EXP]+1])
env[bas[5,PTR_COEFF]+2] = env[bas[1,PTR_COEFF]+2] / (2 * env[bas[1,PTR_EXP]+0])
env[bas[5,PTR_COEFF]+3] = env[bas[1,PTR_COEFF]+3] / (2 * env[bas[1,PTR_EXP]+1])
natm = ctypes.c_int(natm)
nbas = ctypes.c_int(nbas)
c_atm = atm.ctypes.data_as(ctypes.c_void_p)
c_bas = bas.ctypes.data_as(ctypes.c_void_p)
c_env = env.ctypes.data_as(ctypes.c_void_p)
opt = ctypes.POINTER(ctypes.c_void_p)()
_cint.CINTlen_spinor.restype = ctypes.c_int
from pyscf import gto
mol = gto.M()
mol._atm = atm[:natm.value]
mol._bas = bas[:nbas.value]
mol._env = env
coords = mol.atom_coords()
ao = mol.eval_gto('GTOval_sph', coords)
def test_int2c1e_sph():
fnpp1 = _cint.cint1e_ipiprinv_sph
fnp1p = _cint.cint1e_iprinvip_sph
nullptr = ctypes.POINTER(ctypes.c_void_p)()
def by_pp(shls, shape):
buf = numpy.empty(shape+(9,), order='F')
fnpp1(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref = buf[:,:,0] + buf[:,:,4] + buf[:,:,8]
fnp1p(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref+=(buf[:,:,0] + buf[:,:,4] + buf[:,:,8])*2
shls = (shls[1], shls[0])
shape = (shape[1], shape[0]) + (9,)
buf = numpy.empty(shape, order='F')
fnpp1(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref+= (buf[:,:,0] + buf[:,:,4] + buf[:,:,8]).transpose(1,0)
return ref * (-.25/numpy.pi)
#intor = _cint.cint4c1e_sph
ao_loc = mol.ao_loc_nr()
for nucid in range(mol.natm):
mol.set_rinv_orig(coords[nucid])
for j in range(nbas.value):
j0 = ao_loc[j]
j1 = ao_loc[j+1]
for i in range(j+1):
di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]
dj = (bas[j,ANG_OF] * 2 + 1) * bas[j,NCTR_OF]
shls = (i, j)
i0 = ao_loc[i]
i1 = ao_loc[i+1]
buf = numpy.einsum('i,j->ij', ao[nucid,i0:i1], ao[nucid,j0:j1])
ref = by_pp(shls, (di,dj))
dd = abs(ref - buf).sum()
if dd > 1e-8:
print "* FAIL: cint2c1e", " shell:", i, j, "err:", dd
return
print 'cint1e_ipiprinv_sph cint1e_iprinvip_sph pass'
def test_int4c1e_sph():
fnpp1 = _cint.cint2e_ipip1_sph
fnp1p = _cint.cint2e_ipvip1_sph
nullptr = ctypes.POINTER(ctypes.c_void_p)()
def by_pp(shls, shape):
buf = numpy.empty(shape+(9,), order='F')
fnpp1(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref = buf[:,:,:,:,0] + buf[:,:,:,:,4] + buf[:,:,:,:,8]
fnp1p(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref+=(buf[:,:,:,:,0] + buf[:,:,:,:,4] + buf[:,:,:,:,8])*2
shls = (shls[1], shls[0]) + shls[2:]
shape = (shape[1], shape[0]) + shape[2:] + (9,)
buf = numpy.empty(shape, order='F')
fnpp1(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref+= (buf[:,:,:,:,0] + buf[:,:,:,:,4] + buf[:,:,:,:,8]).transpose(1,0,2,3)
return ref * (-.25/numpy.pi)
intor = _cint.cint4c1e_sph
for l in range(nbas.value):
for k in range(l+1):
for j in range(nbas.value):
for i in range(j+1):
di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]
dj = (bas[j,ANG_OF] * 2 + 1) * bas[j,NCTR_OF]
dk = (bas[k,ANG_OF] * 2 + 1) * bas[k,NCTR_OF]
dl = (bas[l,ANG_OF] * 2 + 1) * bas[l,NCTR_OF]
shls = (i, j, k, l)
buf = numpy.empty((di,dj,dk,dl), order='F')
intor(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref = by_pp(shls, (di,dj,dk,dl))
dd = abs(ref - buf).max()
if dd > 1e-6:
print "* FAIL: cint4c1e", " shell:", i, j, k, l, "err:", dd
return
print 'cint4c1e_sph pass'
test_int2c1e_sph()
test_int4c1e_sph()
|
Minyan910/libcint | testsuite/test_3c2e.py | #!/usr/bin/env python
# $Id$
# -*- coding: utf-8
from __future__ import print_function
'''
test libcint
'''
__author__ = "<NAME> <<EMAIL>>"
import sys
import os
import ctypes
import numpy
_cint = numpy.ctypeslib.load_library('libcint', '.')
PTR_LIGHT_SPEED = 0
PTR_COMMON_ORIG = 1
PTR_SHIELDING_ORIG = 4
PTR_RINV_ORIG = 4
PTR_RINV_ZETA = 7
PTR_ENV_START = 20
CHARGE_OF = 0
PTR_COORD = 1
NUC_MOD_OF = 2
PTR_ZETA = 3
RAD_GRIDS = 4
ANG_GRIDS = 5
ATM_SLOTS = 6
ATOM_OF = 0
ANG_OF = 1
NPRIM_OF = 2
NCTR_OF = 3
KAPPA_OF = 4
PTR_EXP = 5
PTR_COEFF = 6
BAS_SLOTS = 8
natm = 4
nbas = 0
atm = numpy.zeros((natm+1,ATM_SLOTS), dtype=numpy.int32)
bas = numpy.zeros((1000,BAS_SLOTS), dtype=numpy.int32)
env = numpy.zeros(10000)
off = PTR_ENV_START
for i in range(natm):
atm[i, CHARGE_OF] = (i+1)*2
atm[i, PTR_COORD] = off
env[off+0] = .2 * (i+1)
env[off+1] = .3 + (i+1) * .5
env[off+2] = .1 - (i+1) * .5
off += 3
off0 = off
# basis with kappa > 0
nh = 0
bas[nh,ATOM_OF ] = 0
bas[nh,ANG_OF ] = 1
bas[nh,KAPPA_OF] = 1
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP] = off
env[off+0] = 1
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1
off += 2
nh += 1
bas[nh,ATOM_OF ] = 1
bas[nh,ANG_OF ] = 2
bas[nh,KAPPA_OF] = 2
bas[nh,NPRIM_OF] = 2
bas[nh,NCTR_OF ] = 2
bas[nh,PTR_EXP] = off
env[off+0] = 5
env[off+1] = 3
bas[nh,PTR_COEFF] = off + 2
env[off+2] = 1
env[off+3] = 2
env[off+4] = 4
env[off+5] = 1
off += 6
nh += 1
bas[nh,ATOM_OF ] = 2
bas[nh,ANG_OF ] = 3
bas[nh,KAPPA_OF] = 3
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP ] = off
env[off+0] = 1
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1
off += 2
nh += 1
bas[nh,ATOM_OF ] = 3
bas[nh,ANG_OF ] = 4
bas[nh,KAPPA_OF] = 4
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP ] = off
env[off+0] = .5
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1.
off = off + 2
nh += 1
nbas = nh
# basis with kappa < 0
n = off - off0
for i in range(n):
env[off+i] = env[off0+i]
for i in range(nh):
bas[i+nh,ATOM_OF ] = bas[i,ATOM_OF ]
bas[i+nh,ANG_OF ] = bas[i,ANG_OF ] - 1
bas[i+nh,KAPPA_OF] =-bas[i,KAPPA_OF]
bas[i+nh,NPRIM_OF] = bas[i,NPRIM_OF]
bas[i+nh,NCTR_OF ] = bas[i,NCTR_OF ]
bas[i+nh,PTR_EXP ] = bas[i,PTR_EXP ] + n
bas[i+nh,PTR_COEFF]= bas[i,PTR_COEFF] + n
env[bas[i+nh,PTR_COEFF]] /= 2 * env[bas[i,PTR_EXP]]
env[bas[5,PTR_COEFF]+0] = env[bas[1,PTR_COEFF]+0] / (2 * env[bas[1,PTR_EXP]+0])
env[bas[5,PTR_COEFF]+1] = env[bas[1,PTR_COEFF]+1] / (2 * env[bas[1,PTR_EXP]+1])
env[bas[5,PTR_COEFF]+2] = env[bas[1,PTR_COEFF]+2] / (2 * env[bas[1,PTR_EXP]+0])
env[bas[5,PTR_COEFF]+3] = env[bas[1,PTR_COEFF]+3] / (2 * env[bas[1,PTR_EXP]+1])
nfitid = nbas*2
off += n
bas[nfitid,ATOM_OF ] = 0
bas[nfitid,ANG_OF ] = 0
bas[nfitid,KAPPA_OF] = 0
bas[nfitid,NPRIM_OF] = 1
bas[nfitid,NCTR_OF ] = 1
bas[nfitid,PTR_EXP ] = off
env[off+0] = 0
off += 1
bas[nfitid,PTR_COEFF] = off
env[off+0] = 2 * numpy.sqrt(numpy.pi)
nfitid1 = nbas*2 + 1
off += n
bas[nfitid1,ATOM_OF ] = 0
bas[nfitid1,ANG_OF ] = 0
bas[nfitid1,KAPPA_OF] = 0
bas[nfitid1,NPRIM_OF] = 1
bas[nfitid1,NCTR_OF ] = 1
bas[nfitid1,PTR_EXP ] = off
env[off+0] = 0
off += 1
bas[nfitid1,PTR_COEFF] = off
env[off+0] = 2 * numpy.sqrt(numpy.pi)
natm = ctypes.c_int(natm)
nbas = ctypes.c_int(nbas)
c_atm = atm.ctypes.data_as(ctypes.c_void_p)
c_bas = bas.ctypes.data_as(ctypes.c_void_p)
c_env = env.ctypes.data_as(ctypes.c_void_p)
opt = ctypes.POINTER(ctypes.c_void_p)()
_cint.CINTlen_spinor.restype = ctypes.c_int
def close(v1, vref, count, place):
return round(abs(v1-vref)/count, place) == 0
def test_int3c2e_sph(name, fnref, vref, dim, place):
intor = getattr(_cint, name)
intoref = getattr(_cint, fnref)
intor.restype = ctypes.c_void_p
op = numpy.empty(1000000*dim)
pop = op.ctypes.data_as(ctypes.c_void_p)
opref = numpy.empty(1000000*dim)
pref = opref.ctypes.data_as(ctypes.c_void_p)
v1 = 0
cnt = 0
for k in range(nbas.value):
l = nfitid
bas[l,ATOM_OF] = bas[k,ATOM_OF]
for j in range(nbas.value):
for i in range(nbas.value):
di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]
dj = (bas[j,ANG_OF] * 2 + 1) * bas[j,NCTR_OF]
dk = (bas[k,ANG_OF] * 2 + 1) * bas[k,NCTR_OF]
nd = di*dj*dk*dim
shls = (ctypes.c_int * 4)(i, j, k, l)
intoref(pref, shls, c_atm, natm, c_bas, nbas, c_env, opt)
intor(pop, shls, c_atm, natm, c_bas, nbas, c_env, opt)
if not numpy.allclose(opref[:nd], op[:nd]):
print('Fail:', name, i,j,k)
v1 += abs(numpy.array(op[:nd])).sum()
cnt += nd
if close(v1, vref, cnt, place):
print("pass: ", name)
else:
print("* FAIL: ", name, ". err:", '%.16g' % abs(v1-vref), "/", vref)
def sf2spinor(mat, i, j, bas):
import pyscf.symm.cg
import scipy.linalg
assert(mat.ndim == 3)
l1 = bas[i,ANG_OF]
l2 = bas[j,ANG_OF]
d1 = bas[i,NCTR_OF]
d2 = bas[j,NCTR_OF]
u1a, u1b = pyscf.gto.mole.sph2spinor_l(l1)
u2a, u2b = pyscf.gto.mole.sph2spinor_l(l2)
u1a = scipy.linalg.block_diag(*((u1a,)*d1))
u1b = scipy.linalg.block_diag(*((u1b,)*d1))
u2a = scipy.linalg.block_diag(*((u2a,)*d2))
u2b = scipy.linalg.block_diag(*((u2b,)*d2))
u1 = numpy.vstack((u1a,u1b))
u2 = numpy.vstack((u2a,u2b))
m, n, k = mat.shape
matab = numpy.zeros((m*2,n*2,k))
matab[:m,:n,:] = matab[m:,n:,:] = mat
zmat = numpy.einsum('pjk,pi->ijk', matab, u1.conj())
zmat = numpy.einsum('ipk,pj->ijk', zmat, u2)
return zmat
def test_int3c2e_spinor(name, fnref, vref, dim, place):
abas = bas.copy()
abas[:,KAPPA_OF] = 0
c_bas = abas.ctypes.data_as(ctypes.c_void_p)
intor = getattr(_cint, name)
intoref = getattr(_cint, fnref)
intor.restype = ctypes.c_void_p
v1 = 0
cnt = 0
for k in range(nbas.value):
l = nfitid
for j in range(nbas.value):
for i in range(nbas.value):
di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]
dj = (bas[j,ANG_OF] * 2 + 1) * bas[j,NCTR_OF]
dk = (bas[k,ANG_OF] * 2 + 1) * bas[k,NCTR_OF]
shls = (ctypes.c_int * 4)(i, j, k, l)
opref = numpy.empty((di,dj,dk,dim), order='F')
intoref(opref.ctypes.data_as(ctypes.c_void_p), shls,
c_atm, natm, c_bas, nbas, c_env, opt)
zmat = sf2spinor(opref[:,:,:,0], i, j, bas)
di = (bas[i,ANG_OF] * 4 + 2) * bas[i,NCTR_OF]
dj = (bas[j,ANG_OF] * 4 + 2) * bas[j,NCTR_OF]
dk = (bas[k,ANG_OF] * 2 + 1) * bas[k,NCTR_OF]
op = numpy.empty((di,dj,dk,dim), order='F', dtype=numpy.complex)
intor(op.ctypes.data_as(ctypes.c_void_p), shls,
c_atm, natm, c_bas, nbas, c_env, opt)
if not numpy.allclose(zmat, op[:,:,:,0]):
print('Fail:', name, i,j,k)
v1 += abs(numpy.array(op)).sum()
cnt += op.size
if close(v1, vref, cnt, place):
print("pass: ", name)
else:
print("* FAIL: ", name, ". err:", '%.16g' % abs(v1-vref), "/", vref)
def test_int2c_sph(name, fnref, vref, dim, place):
intor = getattr(_cint, name)
intoref = getattr(_cint, fnref)
intor.restype = ctypes.c_void_p
op = numpy.empty(1000000*dim)
pop = op.ctypes.data_as(ctypes.c_void_p)
opref = numpy.empty(1000000*dim)
pref = opref.ctypes.data_as(ctypes.c_void_p)
v1 = 0
cnt = 0
for k in range(nbas.value):
for i in range(nbas.value):
j = nfitid1
bas[j,ATOM_OF] = bas[i,ATOM_OF]
di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]
dk = (bas[k,ANG_OF] * 2 + 1) * bas[k,NCTR_OF]
nd = di*dk*dim
shls = (ctypes.c_int * 3)(i, j, k)
intoref(pref, shls, c_atm, natm, c_bas, nbas, c_env, opt)
shls = (ctypes.c_int * 2)(i, k)
intor(pop, shls, c_atm, natm, c_bas, nbas, c_env, opt)
if not numpy.allclose(opref[:nd], op[:nd]):
print('Fail:', name, i,k)
v1 += abs(numpy.array(op[:nd])).sum()
cnt += nd
if close(v1, vref, cnt, place):
print("pass: ", name)
else:
print("* FAIL: ", name, ". err:", '%.16g' % abs(v1-vref), "/", vref)
if __name__ == "__main__":
if "--high-prec" in sys.argv:
def close(v1, vref, count, place):
return round(abs(v1-vref), place) == 0
for f in (('cint3c2e_sph', 'cint2e_sph', 1586.350797432699, 1, 10),
('cint3c2e_ip1_sph', 'cint2e_ip1_sph', 2242.052249267909, 3, 10),
('cint3c2e_ip2_sph', 'cint2e_ip2_sph', 1970.982483860059, 3, 10),
):
test_int3c2e_sph(*f)
if "--quick" not in sys.argv:
for f in (('cint3c2e', 'cint3c2e_sph', 4412.363002831547, 1, 10),
):
test_int3c2e_spinor(*f)
# for f in (('cint2c2e_sph', 'cint2e_sph', 782.3104849606677, 1, 10),
# ('cint2c2e_ip1_sph', 'cint2e_ip1_sph', 394.6515972715189, 3, 10),
# ('cint2c2e_ip2_sph', 'cint2e_ip2_sph', 394.6515972715189, 3, 10),
# ):
# test_int2c2e_sph(*f)
for f in (('cint2c2e_sph', 'cint3c2e_sph', 782.3104849606677, 1, 10),
('cint2c2e_ip1_sph', 'cint3c2e_ip1_sph', 394.6515972715189, 3, 10),
('cint2c2e_ip2_sph', 'cint3c2e_ip2_sph', 394.6515972715189, 3, 10),
('cint1e_ovlp_sph', 'cint3c1e_sph', 288.739411257669, 1, 10),
#('cint1e_kin_sph'*2.0, 'cint3c1e_p2_sph', 1662.148571297274, 1, 10),
('cint1e_r2_origj_sph', 'cint3c1e_r2_origk_sph', 1467.040217557744, 1, 10),
):
test_int2c_sph(*f)
|
Minyan910/libcint | examples/python_call.py | from math import *
import numpy
import ctypes
# general contracted DZ basis [3s1p/2s1p] for H2
# exponents contract-coeff
# S 6.0 0.7 0.4
# 2.0 0.6 0.3
# 0.8 0.5 0.2
# P 0.9 1.
def gto_norm(n, a):
# normalization factor of function r^n e^{-a r^2}
s = 2**(2*n+3) * factorial(n+1) * (2*a)**(n+1.5) \
/ (factorial(2*n+2) * sqrt(pi))
return sqrt(s)
CHARGE_OF = 1
PTR_COORD = 2
NUC_MOD_OF = 3
PTR_ZETA = 4
ATM_SLOTS = 6
ATOM_OF = 1
ANG_OF = 2
NPRIM_OF = 3
NCTR_OF = 4
KAPPA_OF = 5
PTR_EXP = 6
PTR_COEFF = 7
BAS_SLOTS = 8
ptr_env = 20
atm = []
bas = []
env = [0] * ptr_env
i = 0
# CHARGE_OF,PTR_COORD
atm.append([1, ptr_env, 0, 0, 0, 0])
# x y z (Bohr)
env.extend([0, 0, -0.8])
ptr_env += 3
atm.append([1, ptr_env, 0, 0, 0, 0])
env.extend([0, 0, 0.8])
# basis for atom #0
# 3s -> 2s
env.extend([6., 2., .8])
env.extend([.7, .6, .5, .4, .3, .2])
# ATOM_OF, ANG_OF, NPRIM_OF, NCTR_OF, KAPPA_OF, PTR_EXP, PTR_COEFF
bas.append([0, 0, 3, 2, 0, ptr_env, ptr_env+3, 0])
ptr_env += 9
env.extend([.9])
env.extend([1.])
bas.append([0, 1, 1, 1, 0, ptr_env, ptr_env+1, 0])
ptr_env += 1
# basis functions for atom #1, they are the same to thoes of atom #0
bas.extend(bas[-2:])
# note the integer type
atm = numpy.array(atm,dtype=int32)
bas = numpy.array(bas,dtype=int32)
env = numpy.array(env)
_cint = ctypes.cdll.LoadLibrary('/path/to/libcint.so')
c_natm = ctypes.c_int(atm.size)
c_nbas = ctypes.c_int(bas.size)
_cint.CINTcgto_spheric.restype = ctypes.c_int
di = _cint.CINTcgto_spheric(ctypes.c_int(0), bas.ctype.data)
dj = _cint.CINTcgto_spheric(ctypes.c_int(1), bas.ctype.data)
c_shls = (ctypes.c_int * 2)(0, 1)
buf = numpy.empty((di.value, dj.value, 3))
_cint.cint1e_ipnuc_sph(buf.ctypes.data, c_shls, \
atm.ctypes.data, c_natm, \
bas.ctypes.data, c_nbas,
env.ctypes.data)
|
WyattSP/MSc-Future-Work | RandomEvolutionModel.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 2 19:07:01 2021
@author: wyattpetryshen
"""
# Code templates for Ornstein-Uhlenbeck process and Brownian motion are from IPython Interactive Computing and Visualization Cookbook, Second Edition (2018), by <NAME>.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from scipy import stats
import time as timetime
import random
import itertools
#Calculate angle between vectors
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def vectorTransform(end_Point,old_origin):
""" Returns vector translated to origin."""
newP = np.subtract(end_Point,old_origin)
return newP
def angle_between(v1, v2):
""" Returns the angle in degrees between vectors 'v1' and 'v2'."""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.degrees(np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)))
#Equation for Ornstein-Uhlenbeck process
def binVals(array,step):
""" Bins array and calculates mean for specified bin size."""
start = 0
step = step
stop = step
iterations = int(len(array)/step)
meanvals = []
for i in np.arange(0,iterations):
tempbin = array[start:stop]
meanbin = np.mean(tempbin)
start = start + step
stop = stop + step
meanvals.append(meanbin)
return(meanvals)
#Code for figure 1 in supplementary information
#Change the parameters accordingly
#Sine wave
sample_rate = 1000
time = np.arange(0, 10, 1/sample_rate)
frequency = 0.1
amplitude = 4
theta = 0
sinewave = amplitude * np.sin(2 * np.pi * frequency * time + theta)
##Model parameters
sigma = 1 #standard deviation
mu = 0 #mean
tau = 0.05 #time constant
##simulation parameters
dt = 0.0001 #Time step
T = 1 #Total time
n = int(T/dt) #Number of steps
t = np.linspace(0., T, n) #Vector of times
##Calculated randomized variables
sigma_bis = sigma * np.sqrt(2. / tau)
sqrtdt = np.sqrt(dt)
#Plot of Sine wave
plt.plot(time,sinewave)
plt.title(r'SineWave with amplitude = {}, frequency = {}'.format(amplitude,frequency))
plt.axis([0, 10, -4, 4])
#Random Drift
for iters in range(100):
##Store results
x = np.zeros(n)
#Euler-Maruyama method
for i in range(n - 1):
x[i + 1] = x[i] + dt * (-(x[i] - sinewave[i]) / tau) + sigma_bis * sqrtdt * np.random.randn()
array = x
plt.plot(time,array,linewidth=0.1)
plt.title(r'OH Drift with amplitude = {}, frequency = {}'.format(amplitude,frequency))
#Time-averaged drift
for iters in range(100):
##Store results
x = np.zeros(n)
#Euler-Maruyama method
for i in range(n - 1):
x[i + 1] = x[i] + dt * (-(x[i] - sinewave[i]) / tau) + sigma_bis * sqrtdt * np.random.randn()
array = x
meanarray = binVals(array,int(sample_rate))
plt.plot(time[int(sample_rate/2):-1:int(sample_rate)],meanarray,linewidth=0.1)
plt.scatter(time[int(sample_rate/2):-1:int(sample_rate)],meanarray,linewidth=0.1)
plt.title(r'OH Drift time-averaged with amplitude = {}, frequency = {}'.format(amplitude,frequency))
plt.axis([0, 10, -6, 6])
#plt.plot(time,x)
#Iterate OH means and calculate the angle between vectors
start_time = timetime.time()
angle_list = []
for iters in range(100):
x1 = np.zeros(n)
x2 = np.zeros(n)
for i in range(n - 1):
x1[i + 1] = x1[i] + dt * (-(x1[i] - sinewave[i]) / tau) + sigma_bis * sqrtdt * np.random.randn()
x2[i + 1] = x2[i] + dt * (-(x2[i] - sinewave[i]) / tau) + sigma_bis * sqrtdt * np.random.randn()
meanarray1, meanarray2 = binVals(x1,int(sample_rate)),binVals(x2,int(sample_rate))
for j in np.arange(1,len(meanarray1)):
if j != len(meanarray1)-1:
Idx_O = j
Idx_E = j+1
v1 = vectorTransform((time[int(sample_rate/2):-1:int(sample_rate)][Idx_O],meanarray1[Idx_O]),(time[int(sample_rate/2):-1:int(sample_rate)][Idx_E],meanarray1[Idx_E]))
v2 = vectorTransform((time[int(sample_rate/2):-1:int(sample_rate)][Idx_O],meanarray2[Idx_O]),(time[int(sample_rate/2):-1:int(sample_rate)][Idx_E],meanarray2[Idx_E]))
vector_angle = angle_between(v1,v2)
angle_list.append(vector_angle)
else:
pass
plt.hist(angle_list, bins = np.arange(0,180,5))
plt.xlabel('Angle')
plt.ylabel('Probability')
plt.title(r'Histogram of OH Trait Drift a=4, frequency=0.1')
print("--- %s seconds ---" % (timetime.time() - start_time))
###Brownian motion
#simulation parameters
n = 100000 #time steps
#Two one dimensional cases that can be combined into two dimensional case
x = np.cumsum(np.random.randn(n))
y = np.cumsum(np.random.randn(n))
xP = np.cumsum(np.random.randn(n))
yP = np.cumsum(np.random.randn(n))
# We add 10 intermediary points between two
# successive points. We interpolate x and y.
k = 50
x2 = np.interp(np.arange(n * k), np.arange(n) * k, x)
y2 = np.interp(np.arange(n * k), np.arange(n) * k, y)
xP2 = np.interp(np.arange(n * k), np.arange(n) * k, xP)
yP2 = np.interp(np.arange(n * k), np.arange(n) * k, yP)
# Now, we draw our points with a gradient of colors.
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
plt.scatter(x2, y2, c=range(n * k), linewidths=0,
marker='o', s=3, cmap=plt.cm.jet,)
plt.axis('equal')
plt.scatter(xP2, yP2, c = range(n*k), linewidths=0,
marker='o', s=3, cmap=plt.cm.jet,)
plt.plot(xP, yP)
plt.plot(x2, y2)
#Brownian Time-averaged drift for single lineages
for iters in range(1000):
##Store results
n = 1000 #time steps
#Two one dimensional cases that can be combined into two dimensional case
x = np.cumsum(np.random.randn(n))
y = np.cumsum(np.random.randn(n))
#Find mean values
meanx = binVals(x,int(100))
meany = binVals(y,int(100))
#plot
plt.plot(meanx,meany,linewidth=0.5)
plt.scatter(meanx,meany,linewidth=0.1, s = 4)
plt.title(r'Brownian Drift time-averaged with equal rates')
#plt.axis([0, 10, -6, 6])
#Iterate BM means and calculate the angle between vectors
start_time = timetime.time()
BW_angle_list = []
for iters in range(10000):
runs = 1000 #time steps
Tavg = 100 #Average years; for random number use random.randrange(i,j)
rate_One = 1 #rate change
rate_Two = 10 #rate change
x, y = np.cumsum(np.random.randn(runs * rate_One)), np.cumsum(np.random.randn(runs * rate_One))
x2, y2 = np.cumsum(np.random.randn(runs * rate_Two)), np.cumsum(np.random.randn(runs * rate_Two))
meanx,meany = binVals(x,int(Tavg)*rate_One),binVals(y,int(Tavg)*rate_One)
meanx2,meany2 = binVals(x2,int(Tavg)*rate_Two),binVals(y2,int(Tavg)*rate_Two)
for j in np.arange(1,len(meanx)):
if j != len(meanx)-1:
Idx_O = j
Idx_E = j+1
v1 = vectorTransform((meanx[Idx_O],meany[Idx_O]),(meanx[Idx_E],meany[Idx_E]))
v2 = vectorTransform((meanx2[Idx_O],meany2[Idx_O]),(meanx2[Idx_E],meany2[Idx_E]))
vector_angle = angle_between(v1,v2)
BW_angle_list.append(vector_angle)
else:
pass
plt.hist(BW_angle_list, bins = np.arange(0,180,1))
plt.xlabel('Angle')
plt.ylabel('Probability')
plt.title(r'Histogram of BW Parallelism')
print("--- %s seconds ---" % (timetime.time() - start_time))
|
pyrkamarcin/epiphany | cli/src/helpers/naming_helpers.py | <filename>cli/src/helpers/naming_helpers.py
def to_role_name(feature_name):
return feature_name.replace("-", "_")
def to_feature_name(role_name):
return role_name.replace("_", "-")
def resource_name(prefix, cluster_name, resource_type, component=None):
name = ''
if (not prefix) or (prefix == 'default'):
if component is None:
name = '%s-%s' % (cluster_name.lower(), resource_type.lower())
else:
name = '%s-%s-%s' % (cluster_name.lower(), component.lower(), resource_type.lower())
else:
if component is None:
name = '%s-%s-%s' % (prefix.lower(), cluster_name.lower(), resource_type.lower())
else:
name = '%s-%s-%s-%s' % (prefix.lower(), cluster_name.lower(), component.lower(), resource_type.lower())
return to_feature_name(name)
def cluster_tag(prefix, cluster_name):
if (not prefix) or (prefix == 'default'):
return cluster_name.lower()
else:
return '%s-%s' % (prefix.lower(), cluster_name.lower())
def storage_account_name(prefix, cluster_name, storage_use):
pre = ''
if not ((not prefix) or (prefix == 'default')):
if len(prefix) > 8:
pre = prefix[:8].lower()
else:
pre = prefix.lower()
sto = ''
if len(storage_use) > 5:
sto = storage_use[:5].lower()
else:
sto = storage_use.lower()
clu = ''
cn = cluster_name.replace('-', '')
length = 24 - (len(pre)+len(sto))
if len(cn) > length:
clu = cn[:length].lower()
else:
clu = cn.lower()
return f'{pre}{clu}{sto}'
def get_os_name_normalized(vm_doc):
expected_indicators = {
"almalinux": "almalinux",
"redhat": "rhel",
"rhel": "rhel",
"ubuntu": "ubuntu",
}
if vm_doc.provider == "azure":
# Example image offers:
# - 0001-com-ubuntu-server-focal
# - RHEL
# - almalinux
for indicator in expected_indicators:
if indicator in vm_doc.specification.storage_image_reference.offer.lower():
return expected_indicators[indicator]
if vm_doc.provider == "aws":
# Example public/official AMI names:
# - ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20220419
# - RHEL-8.5_HVM-20220127-x86_64-3-Hourly2-GP2
# - AlmaLinux OS 8.5.20211116 x86_64
for indicator in expected_indicators:
if indicator in vm_doc.specification.os_full_name.lower():
return expected_indicators[indicator]
# When name is completely custom
return None
|
pyrkamarcin/epiphany | ansible/playbooks/roles/repository/files/download-requirements/src/error.py | import logging
class DownloadRequirementsError(Exception):
"""
Base class for all non standard errors raised during a script run.
"""
def __init__(self, msg: str):
super().__init__()
logging.error(msg)
class CriticalError(DownloadRequirementsError):
"""
Raised when there was an error that could not be fixed by
download-requirements script.
"""
class PackageNotfound(CriticalError):
"""
Raised when there was no package found by the query tool.
"""
class ChecksumMismatch(DownloadRequirementsError):
"""
Raised when there was a file checksum mismatch.
"""
def __init__(self, msg: str):
super().__init__(f'{msg} - download failed due to checksum mismatch, '
'WARNING someone might have replaced the file')
|
pyrkamarcin/epiphany | ansible/playbooks/roles/repository/files/download-requirements/src/command/dnf.py | <filename>ansible/playbooks/roles/repository/files/download-requirements/src/command/dnf.py
from typing import Dict, List
from src.command.command import Command
from src.error import CriticalError
class Dnf(Command):
"""
Interface for `dnf`
"""
def __init__(self, retries: int):
super().__init__('dnf', retries)
def update(self, enablerepo: str,
package: str = None,
disablerepo: str = '*',
assume_yes: bool = True):
"""
Interface for `dnf update`
:param enablerepo:
:param package:
:param disablerepo:
:param assume_yes: if set to True, -y flag will be used
"""
update_parameters: List[str] = ['update']
if assume_yes:
update_parameters.append('-y')
if package is not None:
update_parameters.append(package)
update_parameters.append(f'--disablerepo={disablerepo}')
update_parameters.append(f'--enablerepo={enablerepo}')
self.run(update_parameters)
def install(self, package: str,
assume_yes: bool = True):
"""
Interface for `dnf install -y`
:param package: packaged to be installed
:param assume_yes: if set to True, -y flag will be used
"""
no_ask: str = '-y' if assume_yes else ''
proc = self.run(['install', no_ask, package], accept_nonzero_returncode=True)
if proc.returncode != 0:
if not 'does not update' in proc.stdout: # trying to reinstall package with url
raise CriticalError(f'dnf install failed for `{package}`, reason `{proc.stdout}`')
def remove(self, package: str,
assume_yes: bool = True):
"""
Interface for `dnf remove -y`
:param package: packaged to be removed
:param assume_yes: if set to True, -y flag will be used
"""
no_ask: str = '-y' if assume_yes else ''
self.run(['remove', no_ask, package])
def is_repo_enabled(self, repo: str) -> bool:
output = self.run(['repolist',
'--enabled',
'--quiet',
'-y']).stdout
if repo in output:
return True
return False
def find_rhel_repo_id(self, patterns: List[str]) -> List[str]:
output = self.run(['repolist',
'--all',
'--quiet',
'-y']).stdout
repos: List[str] = []
for line in output.split('\n'):
for pattern in patterns:
if pattern in line:
repos.append(pattern)
return repos
def accept_keys(self):
# to accept import of repo's GPG key (for repo_gpgcheck=1)
self.run(['repolist', '-y'])
def is_repo_available(self, repo: str) -> bool:
retval = self.run(['repoinfo',
'--disablerepo=*',
f'--enablerepo={repo}',
'--quiet']).returncode
if retval == 0:
return True
return False
def makecache(self, timer: bool = True,
assume_yes: bool = True):
args: List[str] = ['makecache']
if timer:
args.append('timer')
if assume_yes:
args.append('-y')
self.run(args)
def list_all_repos_info(self) -> List[Dict[str, str]]:
"""
Query repoinfo and construct info per repository.
"""
args: List[str] = ['repoinfo',
'--all',
'--quiet',
'-y']
raw_output = self.run(args).stdout
elems: List[str] = list(raw_output.split('\n\n'))
repoinfo: List[List[str]] = [{} for _ in range(len(elems))]
for elem_idx, elem in enumerate(elems):
for line in elem.split('\n'):
if line:
key, value = line.split(':', 1)
repoinfo[elem_idx][key.strip()] = value.strip()
return repoinfo
|
pyrkamarcin/epiphany | ansible/playbooks/roles/repository/files/download-requirements/src/command/dnf_config_manager.py | <filename>ansible/playbooks/roles/repository/files/download-requirements/src/command/dnf_config_manager.py<gh_stars>1-10
from src.command.command import Command
class DnfConfigManager(Command):
"""
Interface for `dnf config-manager`
"""
def __init__(self, retries: int):
super().__init__('dnf', retries)
def add_repo(self, repo: str):
self.run(['config-manager', '--add-repo', repo])
def disable_repo(self, repo: str):
self.run(['config-manager', '--set-disabled', repo])
def enable_repo(self, repo: str):
self.run(['config-manager', '--set-enabled', repo])
|
pyrkamarcin/epiphany | ansible/playbooks/roles/repository/files/download-requirements/src/command/dnf_repoquery.py | from typing import Callable, List
from src.command.command import Command
from src.error import CriticalError, PackageNotfound
class DnfRepoquery(Command):
"""
Interface for `dnf repoquery`
"""
def __init__(self, retries: int):
super().__init__('dnf', retries) # repoquery would require yum-utils package
def __query(self, packages: List[str],
queryformat: str,
archlist: List[str],
requires: bool,
resolve: bool,
output_handler: Callable) -> List[str]:
"""
Run generic query using `dnf repoquery` command.
:param packages: data will be returned for those `packages`
:param queryformat: specify custom query output format
:param archlist: limit results to these architectures
:param requires: get capabilities that the packages depend on
:param resolve: resolve capabilities to originating package(s)
:param output_handler: different queries produce different outputs, use specific output handler
:raises:
:class:`CriticalError`: can be raised on exceeding retries or when error occurred
:class:`PackageNotfound`: when query did not return any package info
:returns: query result
"""
args: List[str] = []
args.append('repoquery')
args.append(f'--archlist={",".join(archlist)}')
args.append('--disableplugin=subscription-manager') # to speed up querying
args.append('--latest-limit=1')
args.append(f'--queryformat={queryformat}')
args.append('--quiet')
if requires:
args.append('--requires')
if resolve:
args.append('--resolve')
args.append('-y') # to import GPG keys
args.extend(packages)
# dnf repoquery doesn't set error code on empty results
output = self.run(args).stdout
output_handler(output)
packages: List[str] = []
for line in output.split('\n'):
if line:
packages.append(line)
return packages
def query(self, packages: List[str], queryformat: str, archlist: List[str]) -> List[str]:
"""
Generic query to dnf database.
:param packages: data will be returned for those `packages`
:param queryformat: specify custom query output format
:param archlist: limit results to these architectures
:raises:
:class:`CriticalError`: can be raised on exceeding retries or when error occurred
:class:`PackageNotfound`: when query did not return any package info
:returns: query result
"""
def output_handler(output: str):
""" In addition to errors, handle missing packages """
if not output:
raise PackageNotfound(f'repoquery failed for packages `{packages}`, reason: some of package(s) not found')
elif 'error' in output:
raise CriticalError(f'repoquery failed for packages `{packages}`, reason: `{output}`')
return self.__query(packages, queryformat, archlist, False, False, output_handler)
def get_dependencies(self, packages: List[str], queryformat: str, archlist: List[str]) -> List[str]:
"""
Get all dependencies for `packages`.
:param packages: data will be returned for those `packages`
:param queryformat: specify custom query output format
:param archlist: limit results to these architectures
:raises:
:class:`CriticalError`: can be raised on exceeding retries or when error occurred
:class:`ValueError`: when `packages` list is empty
:returns: query result
"""
# repoquery without KEY argument will query dependencies for all packages
if not packages:
raise ValueError('packages: list cannot be empty')
def output_handler(output: str):
""" Handle errors """
if 'error' in output:
raise CriticalError(f'dnf repoquery failed for packages `{packages}`, reason: `{output}`')
return self.__query(packages, queryformat, archlist, True, True, output_handler)
|
pyrkamarcin/epiphany | ansible/playbooks/roles/repository/files/download-requirements/src/config/os_type.py | <reponame>pyrkamarcin/epiphany
from enum import Enum
from typing import Dict, List
class OSArch(Enum):
""" Supported architecture types """
X86_64 = 'x86_64'
ARM64 = 'arm64'
class OSFamily(Enum):
""" Supported distro type families """
Debian = 'debian'
RedHat = 'redhat'
class OSConfig:
""" Type used for describing OS configuration """
def __init__(self, os_family: OSFamily, os_name: str, os_name_aliases: List[str] = None):
self.family = os_family
self.name = os_name
self.aliases = os_name_aliases or [] # used when detecting os type with /etc/os-release
class OSType(Enum):
""" Supported operating system types """
Almalinux = OSConfig(OSFamily.RedHat, 'almalinux-8')
RHEL = OSConfig(OSFamily.RedHat, 'rhel-8')
Ubuntu = OSConfig(OSFamily.Debian, 'ubuntu-20.04')
@property
def os_family(self) -> OSFamily:
return self.value.family
@property
def os_name(self) -> str:
return self.value.name
@property
def os_aliases(self) -> List[str]:
return self.value.aliases
# Supported operating systems:
SUPPORTED_OS_TYPES: Dict[OSArch, OSConfig] = {
OSArch.X86_64: [
OSType.Almalinux,
OSType.RHEL,
OSType.Ubuntu
],
OSArch.ARM64: []
}
|
pyrkamarcin/epiphany | ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_dnf_repoquery.py | from tests.mocks.command_run_mock import CommandRunMock
from src.command.dnf_repoquery import DnfRepoquery
def test_interface_query(mocker):
''' Check argument construction for `dnf repoquery` - generic query '''
with CommandRunMock(mocker, DnfRepoquery(1).query, {'packages': ['tar', 'vim'],
'queryformat': 'some_format',
'archlist': ['some_arch', 'noarch']}) as call_args:
assert call_args == ['dnf',
'repoquery',
'--archlist=some_arch,noarch',
'--disableplugin=subscription-manager',
'--latest-limit=1',
'--queryformat=some_format',
'--quiet',
'-y',
'tar',
'vim'
]
def test_interface_get_dependencies(mocker):
''' Check argument construction for `repoquery` - dependencies query '''
with CommandRunMock(mocker, DnfRepoquery(1).get_dependencies, {'packages': ['tar', 'vim'],
'queryformat': 'some_format',
'archlist': ['some_arch', 'noarch']}) as call_args:
assert call_args == ['dnf',
'repoquery',
'--archlist=some_arch,noarch',
'--disableplugin=subscription-manager',
'--latest-limit=1',
'--queryformat=some_format',
'--quiet',
'--requires',
'--resolve',
'-y',
'tar',
'vim'
]
|
microsoft/lightgbm-benchmark | tests/common/test_distributed.py | """Tests src/common/io.py"""
import os
import pytest
from unittest.mock import call, Mock, patch
import time
import json
from common.distributed import MultiNodeScript, MPIHandler, mpi_config_class
from test_component import (
assert_runnable_script_properties,
assert_runnable_script_metrics
)
class FakeMultiNodeScript(MultiNodeScript):
def __init__(self):
super().__init__(
task="unittest",
framework="pytest",
framework_version=pytest.__version__
)
def run(self, args, logger, metrics_logger, unknown_args):
# don't do anything
with metrics_logger.log_time_block("fake_time_block", step=1):
time.sleep(1)
@patch('mlflow.end_run')
@patch('mlflow.log_metric')
@patch('mlflow.set_tags')
@patch('mlflow.start_run')
@patch('common.distributed.MPIHandler')
def test_multi_node_script(mpi_handler_mock, mlflow_start_run_mock, mlflow_set_tags_mock, mlflow_log_metric_mock, mlflow_end_run_mock):
# fake mpi initialization + config
mpi_handler_mock().mpi_config.return_value = mpi_config_class(
1, # world_size
0, # world_rank
False, # mpi_available
True, # main_node
)
# then just run main
test_component = FakeMultiNodeScript.main(
[
"foo.py",
"--verbose", "True",
"--custom_properties", json.dumps({'benchmark_name':'unittest'})
]
)
# mlflow initialization
mlflow_start_run_mock.assert_called_once()
mlflow_end_run_mock.assert_called_once()
assert_runnable_script_properties(
test_component,
"unittest",
mlflow_set_tags_mock
)
assert_runnable_script_metrics(
test_component,
[{'key':'fake_time_block', 'step':1}], # user_metrics
mlflow_log_metric_mock
)
class FailingMultiNodeScript(MultiNodeScript):
def __init__(self):
super().__init__(
task="failure",
framework="pytest",
framework_version=pytest.__version__
)
def run(self, args, logger, metrics_logger, unknown_args):
# don't do anything
with metrics_logger.log_time_block("fake_time_block", step=1):
time.sleep(1)
raise Exception("Some fake issue occured during code!")
@patch('common.distributed.MPIHandler')
def test_multi_node_script_failure(mpi_handler_mock):
# fake mpi initialization + config
mpi_handler_mock().mpi_config.return_value = mpi_config_class(
1, # world_size
0, # world_rank
False, # mpi_available
True, # main_node
)
# just run main
with pytest.raises(Exception) as e_test:
test_component = FailingMultiNodeScript.main(
[
"foo.py",
"--verbose", "True",
"--custom_properties", json.dumps({'benchmark_name':'unittest'})
]
)
def test_mpi_handler_mpi_init():
"""Tests the MPIHandler class"""
# create MPI module mock
mpi_module_mock = Mock()
mpi_module_mock.COMM_WORLD = Mock()
mpi_module_mock.COMM_WORLD.Get_size.return_value = 10
mpi_module_mock.COMM_WORLD.Get_rank.return_value = 3
mpi_module_mock.THREAD_MULTIPLE = 3
# patch _mpi_import to return our MPI module mock
with patch.object(MPIHandler, "_mpi_import") as mpi_import_mock:
mpi_import_mock.return_value = mpi_module_mock
mpi_handler = MPIHandler(mpi_init_mode=3) # MPI.THREAD_MULTIPLE
mpi_handler.initialize()
mpi_config = mpi_handler.mpi_config()
mpi_handler.finalize()
# test this random config
assert mpi_config.world_rank == 3
assert mpi_config.world_size == 10
assert mpi_config.mpi_available == True
assert mpi_config.main_node == False
def test_mpi_handler_no_mpi_init():
"""Tests the MPIHandler class"""
# create MPI module mock
mpi_module_mock = Mock()
mpi_module_mock.COMM_WORLD = Mock()
mpi_module_mock.COMM_WORLD.Get_size.return_value = 10 # different value just to make the point
mpi_module_mock.COMM_WORLD.Get_rank.return_value = 3 # different value just to make the point
mpi_module_mock.THREAD_MULTIPLE = 3
# patch _mpi_import to return our MPI module mock
with patch.object(MPIHandler, "_mpi_import") as mpi_import_mock:
with patch.dict(os.environ, {"OMPI_COMM_WORLD_SIZE": "6", "OMPI_COMM_WORLD_RANK": "2"}):
mpi_import_mock.return_value = mpi_module_mock
mpi_handler = MPIHandler(mpi_init_mode=None)
mpi_handler.initialize()
mpi_config = mpi_handler.mpi_config()
mpi_handler.finalize()
# test this random config
assert mpi_config.world_rank == 2
assert mpi_config.world_size == 6
assert mpi_config.mpi_available == True
assert mpi_config.main_node == False
|
microsoft/lightgbm-benchmark | src/common/math.py | """
Helper math functions
"""
import os
import argparse
import logging
import numpy as np
def bootstrap_ci(data, iterations=1000, operators={'mean':np.mean}, confidence_level=0.95, seed=None):
"""
Args:
data (np.array) : input data
iterations (int) : how many bootstrapped samples to generate
operators (Dict[str->func]) : map of functions to produce CI for
confidence_level (float) : confidence_level = 1-alpha
Returns:
operators_ci: Dict[str->tuple]
"""
# values will be stored in a dict
bootstrap_runs = {}
for operator_key in operators.keys():
bootstrap_runs[operator_key] = []
sample_size = len(data)
for _ in range(iterations):
bootstrap = np.random.choice(data, size=sample_size, replace=True)
for operator_key, operator_func in operators.items():
bootstrap_runs[operator_key].append(operator_func(bootstrap))
operators_ci = {}
for operator_key in operators.keys():
values = np.array(bootstrap_runs[operator_key])
ci_left = np.percentile(values, ((1-confidence_level)/2*100))
ci_right = np.percentile(values, (100-(1-confidence_level)/2*100))
ci_mean = np.mean(values) # just for fun
operators_ci[operator_key] = (ci_left, ci_mean, ci_right)
return(operators_ci)
|
microsoft/lightgbm-benchmark | tests/common/test_data.py | <reponame>microsoft/lightgbm-benchmark
"""Tests src/common/data.py"""
import os
import pytest
from unittest.mock import call, Mock, patch
from common.data import RegressionDataGenerator
def test_regression_data_generator():
"""Tests format of outputs of RegressionDataGenerator"""
generator = RegressionDataGenerator(
batch_size=64,
n_features=100,
n_informative=50,
bias=1.0,
noise=1.0,
seed=4
)
for i in range(10):
batch = generator.generate()
assert batch is not None
assert isinstance(batch, tuple)
assert len(batch) == 2
X, y = batch
assert X is not None
assert y is not None
assert X.shape == (64, 100)
assert y.shape == (64,)
def test_regression_data_generator_reproducibility():
"""Tests initializing generator with seeds"""
generator1 = RegressionDataGenerator(
batch_size=64,
n_features=100,
n_informative=50,
bias=1.0,
noise=1.0,
seed=4
)
X1,y1 = generator1.generate()
generator2 = RegressionDataGenerator(
batch_size=64,
n_features=100,
n_informative=50,
bias=1.0,
noise=1.0,
seed=5
)
X2,y2 = generator2.generate()
generator3 = RegressionDataGenerator(
batch_size=64,
n_features=100,
n_informative=50,
bias=1.0,
noise=1.0,
seed=4 # <<< Equal to generator 1
)
X3,y3 = generator3.generate()
# if using same seed twice, should be equal strictly
assert (X1 == X3).all()
assert (y1 == y3).all()
# if using different seeds, likely to be different
assert (X1 != X2).all()
assert (y1 != y2).all()
|
microsoft/lightgbm-benchmark | tests/pipelines/test_lightgbm_training.py | """
Executes the series of scripts end-to-end
to test LightGBM (python) manual benchmark
"""
import os
import sys
import tempfile
import pytest
from unittest.mock import patch
from pipelines.azureml.lightgbm_training import main
def test_lightgbm_training_cpu(aml_config, config_directory):
# create test arguments for the script
script_args = [
"src/pipelines/lightgbm_training.py",
"--exp-config", os.path.join(config_directory, "experiments", "lightgbm_training", "cpu.yaml"),
f"aml.subscription_id={aml_config.subscription_id}",
f"aml.resource_group={aml_config.resource_group}",
f"aml.workspace_name={aml_config.workspace_name}",
f"aml.tenant={aml_config.tenant}",
f"aml.auth={aml_config.auth}",
"+run.validate=True",
"+run.submit=False"
]
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
main()
def test_lightgbm_training_sweep(aml_config, config_directory):
# create test arguments for the script
script_args = [
"src/pipelines/lightgbm_training.py",
"--exp-config", os.path.join(config_directory, "experiments", "lightgbm_training", "sweep.yaml"),
f"aml.subscription_id={aml_config.subscription_id}",
f"aml.resource_group={aml_config.resource_group}",
f"aml.workspace_name={aml_config.workspace_name}",
f"aml.tenant={aml_config.tenant}",
f"aml.auth={aml_config.auth}",
"+run.validate=True",
"+run.submit=False"
]
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
main()
|
microsoft/lightgbm-benchmark | tests/scripts/test_sample_sample.py | <reponame>microsoft/lightgbm-benchmark<filename>tests/scripts/test_sample_sample.py<gh_stars>10-100
"""
Executes the series of scripts end-to-end
to test LightGBM (python) manual benchmark
"""
import os
import sys
import tempfile
import pytest
from unittest.mock import patch
from scripts.sample import sample
# IMPORTANT: see conftest.py for fixtures
def test_sample_inferencing_script(temporary_dir, regression_inference_sample, regression_model_sample):
# create a directory for each output
predictions_dir = os.path.join(temporary_dir, "predictions")
script_args = [
"score.py",
"--data", regression_inference_sample,
"--model", regression_model_sample,
"--output", predictions_dir
]
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
sample.main()
# test expected outputs
#assert os.path.isfile(os.path.join(predictions_dir, "predictions.txt")) |
microsoft/lightgbm-benchmark | tests/common/test_pipelines.py | <reponame>microsoft/lightgbm-benchmark
"""Tests src/common/pipelines.py"""
import os
import sys
import pytest
from unittest.mock import call, Mock, patch
import time
from dataclasses import dataclass
from omegaconf import DictConfig
from common.pipelines import (
parse_pipeline_config,
azureml_connect,
pipeline_submit
)
def test_parse_pipeline_config():
"""Creates a config dataclass and tests parsing it from CLI"""
@dataclass
class test_config:
test_param: str = "default_str"
script_args = [
"test_pipelines.py",
"+aml.subscription_id=test_subscription",
"+aml.resource_group=test_resource_group",
"+aml.workspace_name=test_workspace_name",
"+aml.tenant=test_tenant",
"+experiment.name=test_experiment_name",
"+compute.linux_cpu=test-cluster",
"+compute.linux_gpu=test-gpu-cluster",
"+compute.windows_cpu=test-win-cpu",
"test_config.test_param=test_str_value",
"run.submit=True"
]
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
pipeline_config = parse_pipeline_config(test_config)
# test return value type
assert isinstance(pipeline_config, DictConfig)
# test some custom value
assert pipeline_config.test_config.test_param == "test_str_value"
# checking config fields (see dataclass above)
# aml connect
assert pipeline_config.aml.subscription_id == "test_subscription"
assert pipeline_config.aml.resource_group == "test_resource_group"
assert pipeline_config.aml.workspace_name == "test_workspace_name"
assert pipeline_config.aml.tenant == "test_tenant"
# compute
assert pipeline_config.compute.linux_cpu == "test-cluster"
assert pipeline_config.compute.linux_gpu == "test-gpu-cluster"
assert pipeline_config.compute.windows_cpu == "test-win-cpu"
return pipeline_config
def test_pipeline_submit():
# need a mock pipeline mock (Pipeline)
pipeline_instance_mock = Mock()
workspace_mock = "fake_workspace"
# reusing config from previous test
pipeline_config = test_parse_pipeline_config()
pipeline_run = pipeline_submit(
workspace_mock,
pipeline_config,
pipeline_instance_mock,
# test hardcoded overrides
experiment_description="test_description",
display_name="test_display_name",
tags={'foo':'bar'}
)
pipeline_instance_mock.validate.assert_called_once()
pipeline_instance_mock.validate.assert_called_with(
workspace="fake_workspace" # what's returned by aml_connect mock
)
pipeline_instance_mock.submit.assert_called_once()
pipeline_instance_mock.submit.assert_called_with(
workspace="fake_workspace", # what's returned by aml_connect mock
experiment_name="test_experiment_name",
description="test_description",
display_name="test_display_name",
tags={'foo':'bar'},
default_compute_target="cpu-cluster",
regenerate_outputs=False, # default
continue_on_step_failure=False # default
)
|
microsoft/lightgbm-benchmark | src/scripts/data_processing/generate_data/generate.py | <reponame>microsoft/lightgbm-benchmark
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Generate synthetic data for LightGBM training/inferencing
"""
import os
import sys
import argparse
import logging
from distutils.util import strtobool
import numpy
import sklearn
from sklearn.datasets import make_classification, make_regression
# Add the right path to PYTHONPATH
# so that you can import from common.*
COMMON_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
if COMMON_ROOT not in sys.path:
print(f"Adding {COMMON_ROOT} to PYTHONPATH")
sys.path.append(str(COMMON_ROOT))
# useful imports from common
from common.components import RunnableScript
from common.data import RegressionDataGenerator, RankingDataGenerator, ClassificationDataGenerator
class GenerateSyntheticDataScript(RunnableScript):
def __init__(self):
super().__init__(
task="generate",
framework="sklearn",
framework_version="sklearn.__version__"
)
# this will collect all the "tasks" (file to generate)
# for multi-tasking if available (work in progress)
self.generation_tasks = []
@classmethod
def get_arg_parser(cls, parser=None):
"""Adds component/module arguments to a given argument parser.
Args:
parser (argparse.ArgumentParser): an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add generic arguments
parser = RunnableScript.get_arg_parser(parser)
# add arguments that are specific to the script
group_params = parser.add_argument_group("Synthesis params")
group_params.add_argument(
"--type", required=True, type=str, choices=["classification", "regression", "lambdarank"]
)
group_params.add_argument("--train_samples", required=True, type=int)
group_params.add_argument("--train_partitions", required=False, type=int, default=1)
group_params.add_argument("--test_samples", required=True, type=int)
group_params.add_argument("--test_partitions", required=False, type=int, default=1)
group_params.add_argument("--inferencing_samples", required=True, type=int)
group_params.add_argument("--inferencing_partitions", required=False, type=int, default=1)
group_params.add_argument("--n_features", required=True, type=int)
group_params.add_argument("--n_informative", required=True, type=int)
group_params.add_argument("--n_redundant", required=False, type=int)
group_params.add_argument("--random_state", required=False, default=None, type=int)
group_params.add_argument("--docs_per_query", required=False, default=20, type=int)
group_params.add_argument("--n_label_classes", required=False, default=10, type=int)
group_params = parser.add_argument_group("Format params")
group_params.add_argument(
"--delimiter", required=False, type=str, choices=['tab', 'comma', 'space'], default='comma'
)
group_o = parser.add_argument_group("Outputs")
group_o.add_argument(
"--output_train",
required=True,
type=str,
help="Output data location (directory)",
)
group_o.add_argument(
"--output_test",
required=True,
type=str,
help="Output data location (directory)",
)
group_o.add_argument(
"--output_inference",
required=True,
type=str,
help="Output data location (directory)",
)
group_o.add_argument(
"--output_header",
required=False,
type=str,
help="Output header location (directory)",
)
return parser
def generate_tasks(self, args):
"""Create generation tasks based on arguments"""
train_partition_size = args.train_samples // args.train_partitions
test_partition_size = args.test_samples // args.test_partitions
inferencing_partition_size = args.inferencing_samples // args.inferencing_partitions
batch_size = min(
10000,
train_partition_size,
test_partition_size,
inferencing_partition_size,
)
self.logger.info(f"Using batch size {batch_size}")
if args.type =="regression":
generator = RegressionDataGenerator(
batch_size=batch_size,
n_features=args.n_features,
n_informative=args.n_informative,
bias=0.0,
noise=0.0,
seed=args.random_state,
)
elif args.type =="lambdarank":
generator = RankingDataGenerator(
docs_per_query=args.docs_per_query,
n_label_classes=args.n_label_classes,
batch_size=batch_size,
n_features=args.n_features,
n_informative=args.n_informative,
bias=0.0,
noise=0.0,
seed=args.random_state,
)
elif args.type =="classification":
generator = ClassificationDataGenerator(
n_label_classes=args.n_label_classes,
batch_size=batch_size,
n_features=args.n_features,
n_informative=args.n_informative,
bias=0.0,
noise=0.0,
seed=args.random_state,
)
# add train partitions to list of tasks
for i in range(args.train_partitions):
self.generation_tasks.append(
(os.path.join(args.output_train, f"train_{i}.txt"), generator, train_partition_size//batch_size)
)
# add test partitions to list of tasks
for i in range(args.test_partitions):
self.generation_tasks.append(
(os.path.join(args.output_test, f"test_{i}.txt"), generator, test_partition_size//batch_size)
)
# add inferencing partitions to list of tasks
for i in range(args.inferencing_partitions):
self.generation_tasks.append(
(os.path.join(args.output_inference, f"inference_{i}.txt"), generator, inferencing_partition_size//batch_size)
)
def execute_tasks(self, args):
# show some outputs first
for output_file_path, generator, batches in self.generation_tasks:
self.logger.info(f"Will generate output {output_file_path} with {batches} batches")
partition_count=0
# generate each data outputs
for output_file_path, generator, batches in self.generation_tasks:
self.logger.info(f"Opening file {output_file_path} for writing...")
# create/erase file
with open(output_file_path, "w") as output_file:
output_file.write("")
# iterate and append
for i in range(batches):
with self.metrics_logger.log_time_block("time_data_generation_batch"):
X,y = generator.generate(partition_count)
y = numpy.reshape(y, (y.shape[0], 1))
data = numpy.hstack((y, X)) # keep target as column 0
with self.metrics_logger.log_time_block("time_data_saving_batch"):
with open(output_file_path, "a") as output_file:
numpy.savetxt(
output_file,
data,
delimiter=args.delimiter,
newline="\n",
fmt="%1.3f",
)
self.logger.info(f"Wrote batch {i+1}/{batches}")
del X
del y
partition_count+=1
self.logger.info(f"Finished generating file {output_file_path}.")
self.logger.info(f"Will create a header file for the generated data")
# create a header
if args.output_header:
os.makedirs(args.output_header, exist_ok=True)
header = [f'Column_{i}' for i in range(data.shape[1])]
with open(os.path.join(args.output_header, "header.txt"), 'w') as hf:
hf.writelines(args.delimiter.join(header))
def run(self, args, logger, metrics_logger, unknown_args):
"""Run script with arguments (the core of the component)
Args:
args (argparse.namespace): command line arguments provided to script
logger (logging.getLogger() for this script)
metrics_logger (common.metrics.MetricLogger)
unknown_args (list[str]): list of arguments not recognized during argparse
"""
# make sure the output arguments exists
os.makedirs(args.output_train, exist_ok=True)
os.makedirs(args.output_test, exist_ok=True)
os.makedirs(args.output_inference, exist_ok=True)
# transform delimiter
if args.delimiter == "comma":
args.delimiter = ","
elif args.delimiter == "tab":
args.delimiter = "\t"
elif args.delimiter == "space":
args.delimiter = " "
metrics_logger.log_parameters(
type=args.type,
train_samples=args.train_samples,
test_samples=args.test_samples,
inferencing_samples=args.inferencing_samples,
n_features=args.n_features,
n_informative=args.n_informative,
n_redundant=args.n_redundant,
random_state=args.random_state,
)
# record a metric
logger.info(f"Generating data.")
self.generate_tasks(args)
self.execute_tasks(args)
def get_arg_parser(parser=None):
""" To ensure compatibility with shrike unit tests """
return GenerateSyntheticDataScript.get_arg_parser(parser)
def main(cli_args=None):
""" To ensure compatibility with shrike unit tests """
GenerateSyntheticDataScript.main(cli_args)
if __name__ == "__main__":
main()
|
microsoft/lightgbm-benchmark | src/pipelines/azureml/lightgbm_training.py | <gh_stars>10-100
"""
Runs LightGBM using distributed (mpi) training.
to execute:
> python src/pipelines/azureml/lightgbm_training.py --exp-config conf/experiments/lightgbm_training/cpu.yaml
"""
# pylint: disable=no-member
# NOTE: because it raises 'dict' has no 'outputs' member in dsl.pipeline construction
import os
import sys
import json
import logging
import argparse
# config management
from dataclasses import dataclass
from omegaconf import OmegaConf, MISSING
from typing import Optional, Any, List
# AzureML
from azure.ml.component import Component
from azure.ml.component import dsl
from azure.ml.component.environment import Docker
# when running this script directly, needed to import common
LIGHTGBM_REPO_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
SCRIPTS_SOURCES_ROOT = os.path.join(LIGHTGBM_REPO_ROOT, 'src')
if SCRIPTS_SOURCES_ROOT not in sys.path:
logging.info(f"Adding {SCRIPTS_SOURCES_ROOT} to path")
sys.path.append(str(SCRIPTS_SOURCES_ROOT))
from common.tasks import training_task, training_variant
from common.sweep import SweepParameterParser
from common.aml import load_dataset_from_data_input_spec
from common.aml import apply_sweep_settings
from common.pipelines import (
parse_pipeline_config,
azureml_connect,
pipeline_submit,
COMPONENTS_ROOT
)
### CONFIG DATACLASS ###
# Step 1 : to configure your pipeline, add all your fields inside a
# properly defined dataclass, pipeline_cli_main will figure out how
# to read that config from a given yaml file + hydra override commands
@dataclass
class lightgbm_training_config: # pylint: disable=invalid-name
""" Config object constructed as a dataclass.
NOTE: the name of this class will be used as namespace in your config yaml file.
"""
# NOTE: all those values are REQUIRED in your yaml config file
benchmark_name: str = MISSING
# INPUT DATASETS
tasks: List[training_task] = MISSING
# TRAINING PARAMS
reference: training_variant = MISSING
# free changing parameters on top of reference
variants: Optional[Any] = None
### PIPELINE COMPONENTS ###
# Step 2 : your pipeline consists in assembling components
# load those components from local yaml specifications
# use COMPONENTS_ROOT as base folder
lightgbm_train_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "training", "lightgbm_python", "spec.yaml"))
lightgbm_train_sweep_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "training", "lightgbm_python", "sweep_spec.yaml"))
partition_data_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "data_processing", "partition_data", "spec.yaml"))
lightgbm_data2bin_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "data_processing", "lightgbm_data2bin", "spec.yaml"))
### PIPELINE SPECIFIC CODE ###
def process_sweep_parameters(params_dict, sweep_algorithm):
"""Parses config and spots sweepable paraneters
Args:
params_dict (dict): configuration object (see get_config_class())
sweep_algorithm (str): random, grid, bayesian
Returns:
tunable_params (dict)
"""
# the class below automates parsing of sweepable parameters
sweep_param_parser = SweepParameterParser(
tunable_parameters=[
# those are keys and their default values
"num_iterations",
"num_leaves",
"min_data_in_leaf",
"learning_rate",
"max_bin",
"feature_fraction"
],
cli_prefix=None, # this is not argparse
parameter_sampling=sweep_algorithm
)
# provide config as a dictionary to the parser
sweep_parameters = {
"num_iterations": params_dict['num_iterations'],
"num_leaves": params_dict['num_leaves'],
"min_data_in_leaf": params_dict['min_data_in_leaf'],
"learning_rate": params_dict['learning_rate'],
"max_bin": params_dict['max_bin'],
"feature_fraction": params_dict['feature_fraction'],
}
# parser gonna parse
sweep_param_parser.parse_from_dict(sweep_parameters)
# and return params as we want them
tunable_params = sweep_param_parser.get_tunable_params()
fixed_params = sweep_param_parser.get_fixed_params()
# return dictionaries to fed as params into our pipeline
return tunable_params, fixed_params
### TRAINING PIPELINE ###
# Step 3: your pipeline consists in creating a python function
# decorated with @dsl.pipeline.
# You can create as many subgraphs as you want,
# but `pipeline_cli_main` will need one pipeline function
# taking a single config argument, not a pipeline parameter.
# Here you should create an instance of a pipeline function (using your custom config dataclass)
@dsl.pipeline(
name="lightgbm_training", # pythonic name
description="LightGBM distributed training (mpi)",
non_pipeline_parameters=['config', 'benchmark_custom_properties']
)
def lightgbm_training_pipeline_function(config,
benchmark_custom_properties,
train_dataset,
test_dataset):
"""Pipeline function for this graph.
Args:
TODO
Returns:
dict[str->PipelineOutputData]: a dictionary of your pipeline outputs
for instance to be consumed by other graphs
"""
# create list of all variants params
training_variants_params = [
config.lightgbm_training_config.reference
]
# if there's any variant specified
if config.lightgbm_training_config.variants:
# create distinct training params for each variant
for variant_index, training_variant_config in enumerate(config.lightgbm_training_config.variants):
# create a specific dict of params for the variant
variant_config = OmegaConf.merge(config.lightgbm_training_config.reference, training_variant_config)
training_variants_params.append(variant_config)
# for each variant, check if sweep needs to be applied
for variant_index, variant_params in enumerate(training_variants_params):
############
### DATA ###
############
# if we're using multinode, add partitioning
if variant_params.data.auto_partitioning and (variant_params.training.tree_learner == "data" or variant_params.training.tree_learner == "voting"):
# if using data parallel, train data has to be partitioned first
if (variant_params.runtime.nodes * variant_params.runtime.processes) > 1:
partition_data_step = partition_data_module(
input_data=train_dataset,
mode="roundrobin",
number=(variant_params.runtime.nodes * variant_params.runtime.processes),
header=variant_params.data.header,
verbose=variant_params.training.verbose
)
partition_data_step.runsettings.configure(target=config.compute.linux_cpu)
partitioned_train_data = partition_data_step.outputs.output_data
else:
# for other modes, train data has to be one file
partitioned_train_data = train_dataset
else:
# for other modes, train data has to be one file
partitioned_train_data = train_dataset
# convert into binary files
if variant_params.data.pre_convert_to_binary:
convert_data2bin_step = lightgbm_data2bin_module(
train=partitioned_train_data,
test=test_dataset,
header=variant_params.data.header,
label_column=variant_params.data.label_column,
group_column=variant_params.data.group_column,
max_bin=variant_params.training.max_bin,
custom_params=json.dumps(dict(variant_params.training.custom_params or {})),
verbose=variant_params.training.verbose
)
convert_data2bin_step.runsettings.configure(target=config.compute.linux_cpu)
prepared_train_data = convert_data2bin_step.outputs.output_train
prepared_test_data = convert_data2bin_step.outputs.output_test
else:
prepared_train_data = partitioned_train_data
prepared_test_data = test_dataset
################
### TRAINING ###
################
# copy params into dict for flexibility
training_params = dict(variant_params.training)
# add all data-related params
training_params['header'] = variant_params.data.header
training_params['label_column'] = variant_params.data.label_column
training_params['group_column'] = variant_params.data.group_column
# extract and construct "sweepable" params
if variant_params.sweep:
tunable_params, fixed_params = process_sweep_parameters(
variant_params.training,
variant_params.sweep.algorithm
)
# test if we have sweepable parameters in the learning params
if len(tunable_params) > 0:
use_sweep = True
training_params.update(tunable_params)
else:
use_sweep = False
else:
use_sweep = False
# create custom properties and serialize to pass as argument
variant_custom_properties = {
'variant_index': variant_index,
'framework': "lightgbm",
'framework_build': variant_params.runtime.build,
}
variant_custom_properties.update(benchmark_custom_properties)
training_params['custom_properties'] = json.dumps(variant_custom_properties)
# serialize custom_params to pass as argument
training_params['custom_params'] = json.dumps(dict(variant_params.training.custom_params or {}))
# some debug outputs to expose variant parameters
print(f"*** lightgbm variant#{variant_index}: {training_params}")
# figuring out target (cpu/gpu)
training_target = variant_params.runtime.target
if not training_target:
if (variant_params.training.device_type == 'gpu' or variant_params.training.device_type == 'cuda'):
training_target = config.compute.linux_gpu
else:
training_target = config.compute.linux_cpu
if use_sweep:
# sweep training
if variant_params.sweep.primary_metric is None:
variant_params.sweep.primary_metric=f"node_0/valid_0.{variant_params.training.metric}"
lightgbm_train_step = lightgbm_train_sweep_module(
train = prepared_train_data,
test = prepared_test_data,
**training_params
)
# apply runsettings
lightgbm_train_step.runsettings.target=training_target
lightgbm_train_step.runsettings.resource_layout.node_count = variant_params.runtime.nodes
lightgbm_train_step.runsettings.resource_layout.process_count_per_node = variant_params.runtime.processes
# apply settings from our custom yaml config
apply_sweep_settings(lightgbm_train_step, variant_params.sweep)
else:
# regular training, no sweep
lightgbm_train_step = lightgbm_train_module(
train = prepared_train_data,
test = prepared_test_data,
**training_params
)
# apply runsettings
lightgbm_train_step.runsettings.target=training_target
lightgbm_train_step.runsettings.resource_layout.node_count = variant_params.runtime.nodes
lightgbm_train_step.runsettings.resource_layout.process_count_per_node = variant_params.runtime.processes
###############
### RUNTIME ###
###############
# # optional: override docker (ex: to test custom builds)
if 'build' in variant_params.runtime and variant_params.runtime.build:
custom_docker = Docker(file=os.path.join(LIGHTGBM_REPO_ROOT, variant_params.runtime.build))
lightgbm_train_step.runsettings.environment.configure(
docker=custom_docker
)
##############
### OUTPUT ###
##############
# add some relevant comments on the component
lightgbm_train_step.comment = " -- ".join(
[
f"variant #{variant_index}",
# add more
]
)
# optional: save output model
if variant_params.output and variant_params.output.register_model:
# "{register_model_prefix}-{task_key}-{num_iterations}trees-{num_leaves}leaves-{register_model_suffix}"
model_basename = "{num_iterations}trees-{num_leaves}leaves".format(
num_iterations=variant_params.training.num_iterations,
num_leaves=variant_params.training.num_leaves
)
# prepend task_key if given
if benchmark_custom_properties.get('benchmark_task_key', None):
model_basename = benchmark_custom_properties['benchmark_task_key'] + "-" + model_basename
# prepend prefix if given
if variant_params.output.register_model_prefix:
model_basename = variant_params.output.register_model_prefix + "-" + model_basename
# append suffix if given
if variant_params.output.register_model_suffix:
model_basename += "-" + variant_params.output.register_model_suffix
print(f"*** Will output model at {model_basename}")
# auto-register output with model basename
lightgbm_train_step.outputs.model.register_as(
name=model_basename,
create_new_version=True
)
# return {key: output}'
return {}
# creating an overall pipeline using pipeline_function for each task given
@dsl.pipeline(
name="training_all_tasks",
non_pipeline_parameters=['workspace', 'config']
)
def training_all_tasks(workspace, config):
# loop on all training tasks
for training_task in config.lightgbm_training_config.tasks:
# load the given train dataset
train_data = load_dataset_from_data_input_spec(workspace, training_task.train)
test_data = load_dataset_from_data_input_spec(workspace, training_task.test)
# create custom properties for this task
# they will be passed on to each job as tags
benchmark_custom_properties = {
'benchmark_name' : config.lightgbm_training_config.benchmark_name,
'benchmark_task_key' : training_task.task_key
}
# call pipeline_function as a subgraph here
training_task_subgraph_step = lightgbm_training_pipeline_function(
# NOTE: benchmark_custom_properties is not an actual pipeline input, just passed to the python code
config=config,
benchmark_custom_properties=benchmark_custom_properties,
train_dataset=train_data,
test_dataset=test_data
)
# add some relevant comments on the subgraph
training_task_subgraph_step.comment = " -- ".join([
"LightGBM training pipeline",
f"benchmark name: {config.lightgbm_training_config.benchmark_name}",
f"benchmark task key: {training_task.task_key}"
])
### MAIN BLOCK ###
# Step 4: implement main block using helper functions
def main():
# use parse helper function to get arguments from CLI
config = parse_pipeline_config(lightgbm_training_config)
# you'll need a workspace object to connect
workspace = azureml_connect(config)
# run the pipeline function with the given arguments
pipeline_instance = training_all_tasks(workspace, config)
# generate a nice markdown description
experiment_description="\n".join([
"Training on all specified tasks (see yaml below).",
"```yaml""",
"data_generation_config:",
OmegaConf.to_yaml(config.lightgbm_training_config),
"```"
])
# validate/submit the pipeline (if run.submit=True)
pipeline_submit(
workspace,
config,
pipeline_instance,
experiment_description=experiment_description
)
if __name__ == "__main__":
main()
|
microsoft/lightgbm-benchmark | tests/common/test_sweep.py | <filename>tests/common/test_sweep.py<gh_stars>10-100
"""Tests src/common/metrics.py"""
import os
from azureml.train.hyperdrive import (
choice,
quniform,
qloguniform,
qnormal,
qlognormal,
uniform,
loguniform,
normal,
lognormal
)
from common.sweep import SweepParameterParser
### UNIT TESTS ###
def test_parse_choice():
parsed_param = SweepParameterParser._parse_choice("choice(0.1, 0.2, 0.3)")
expected_param = choice(0.1, 0.2, 0.3)
assert parsed_param == expected_param
def test_parse_uniform():
parsed_param = SweepParameterParser._parse_uniform("uniform(0, 100)")
expected_param = uniform(0, 100)
assert parsed_param == expected_param
def test_parse_loguniform():
parsed_param = SweepParameterParser._parse_loguniform("loguniform(0.1, 0.4)")
expected_param = loguniform(0.1,0.4)
assert parsed_param == expected_param
def test_parse_normal():
parsed_param = SweepParameterParser._parse_normal("normal(0.1, 0.4)")
expected_param = normal(0.1,0.4)
assert parsed_param == expected_param
def test_parse_lognormal():
parsed_param = SweepParameterParser._parse_lognormal("lognormal(0.1, 0.4)")
expected_param = lognormal(0.1,0.4)
assert parsed_param == expected_param
def test_parse_quniform():
parsed_param = SweepParameterParser._parse_quniform("quniform(0, 100, 5)")
expected_param = quniform(0, 100, 5)
assert parsed_param == expected_param
def test_parse_qloguniform():
parsed_param = SweepParameterParser._parse_qloguniform("qloguniform(0, 100, 5)")
expected_param = qloguniform(0, 100, 5)
assert parsed_param == expected_param
def test_parse_qnormal():
parsed_param = SweepParameterParser._parse_qnormal("qnormal(0, 100, 5)")
expected_param = qnormal(0, 100, 5)
assert parsed_param == expected_param
def test_parse_qlognormal():
parsed_param = SweepParameterParser._parse_qlognormal("qlognormal(0, 100, 5)")
expected_param = qlognormal(0, 100, 5)
assert parsed_param == expected_param
def test_sweep_parameter_parsing():
sweep_parser = SweepParameterParser(
tunable_parameters=[
# those are keys and their default values
"num_trees",
"num_leaves",
"min_data_in_leaf",
"learning_rate",
"max_bin",
"feature_fraction"
],
cli_prefix="--",
parameter_sampling="random"
)
cli_parser = sweep_parser.get_arg_parser()
args, unknown_args = cli_parser.parse_known_args([
"--num_trees", "uniform(0.0005, 0.005)",
"--num_leaves", "choice(0, 100, 120, 140, 180)",
"--learning_rate", "0.32"
])
# returns dictionaries
sweep_parser.parse_from_argparse(args)
tunable_params = sweep_parser.get_tunable_params()
fixed_params = sweep_parser.get_fixed_params()
assert tunable_params == {
"num_trees" : uniform(0.0005, 0.005),
"num_leaves" : choice(0, 100, 120, 140, 180)
}
assert fixed_params == {
"learning_rate" : 0.32
}
def test_sweep_parameter_from_argparse():
sweep_parser = SweepParameterParser(
tunable_parameters=[
# those are keys and their default values
"num_trees",
"num_leaves",
"min_data_in_leaf",
"learning_rate",
"max_bin",
"feature_fraction"
],
cli_prefix="--",
parameter_sampling="random"
)
cli_parser = sweep_parser.get_arg_parser()
args, unknown_args = cli_parser.parse_known_args([
"--num_trees", "uniform(0.0005, 0.005)",
"--num_leaves", "choice(0, 100, 120, 140, 180)",
"--learning_rate", "0.32"
])
# returns dictionaries
sweep_parser.parse_from_argparse(args)
tunable_params = sweep_parser.get_tunable_params()
fixed_params = sweep_parser.get_fixed_params()
assert tunable_params == {
"num_trees" : uniform(0.0005, 0.005),
"num_leaves" : choice(0, 100, 120, 140, 180)
}
assert fixed_params == {
"learning_rate" : 0.32
}
def test_sweep_parameter_from_dict():
sweep_parser = SweepParameterParser(
tunable_parameters=[
# those are keys and their default values
"num_trees",
"num_leaves",
"min_data_in_leaf",
"learning_rate",
"max_bin",
"feature_fraction"
],
cli_prefix=None,
parameter_sampling="random"
)
parameter_dict = {
"num_trees" : "uniform(0.0005, 0.005)",
"num_leaves" : "choice(0, 100, 120, 140, 180)",
"min_data_in_leaf" : "quniform(0, 500, 100)",
"learning_rate" : "0.32"
}
# returns dictionaries
sweep_parser.parse_from_dict(parameter_dict)
tunable_params = sweep_parser.get_tunable_params()
fixed_params = sweep_parser.get_fixed_params()
assert tunable_params == {
"num_trees" : uniform(0.0005, 0.005),
"num_leaves" : choice(0, 100, 120, 140, 180),
"min_data_in_leaf" : quniform(0, 500, 100)
}
assert fixed_params == {
"learning_rate" : 0.32
}
|
microsoft/lightgbm-benchmark | src/common/components.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
This script contains a class to structure and standardize all scripts
in the lightgbm-benchmark repository. This class factors duplicate code to
achieve usual routines of every script: logging init, MLFlow init,
system properties logging, etc.
"""
import os
import sys
import argparse
import logging
import traceback
from distutils.util import strtobool
from .metrics import MetricsLogger
from .perf import PerformanceMetricsCollector, PerfReportPlotter
class RunnableScript():
"""
This class factors duplicate code to achieve usual routines
of every script in the lightgbm-benchmark repo: logging init, MLFlow init,
system properties logging, etc.
"""
def __init__(self, task, framework, framework_version, metrics_prefix=None):
""" Generic initialization for all script classes.
Args:
task (str): name of task in the pipeline/benchmark (ex: train, score)
framework (str): name of ML framework
framework_version (str): a version of this framework
metrics_prefix (str): any prefix to add to this scripts metrics
"""
self.task = task
self.framework = framework
self.framework_version = framework_version
self.metrics_prefix = metrics_prefix
self.logger = logging.getLogger(f"{framework}.{task}")
# initializes reporting of metrics
self.metrics_logger = MetricsLogger(
f"{framework}.{task}",
metrics_prefix=metrics_prefix
)
self.perf_report_collector = None
@classmethod
def get_arg_parser(cls, parser=None):
"""Adds component/module arguments to a given argument parser.
Args:
parser (argparse.ArgumentParser): an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add arguments that are specific to the module
if parser is None:
parser = argparse.ArgumentParser()
# add generic arguments here
group_general = parser.add_argument_group("General parameters")
group_general.add_argument(
"--verbose",
required=False,
default=False,
type=strtobool, # use this for bool args, do not use action_store=True
help="set True to show DEBUG logs",
)
group_general.add_argument(
"--custom_properties",
required=False,
default=None,
type=str,
help="provide custom properties as json dict",
)
group_general.add_argument(
"--disable_perf_metrics",
required=False,
default=False,
type=strtobool,
help="disable performance metrics (default: False)",
)
return parser
def initialize_run(self, args):
"""Initialize the component run, opens/setups what needs to be"""
self.logger.info("Initializing script run...")
# open mlflow
self.metrics_logger.open()
# record properties of the run
self.metrics_logger.set_properties(
task = self.task,
framework = self.framework,
framework_version = self.framework_version
)
# if provided some custom_properties by the outside orchestrator
if args.custom_properties:
self.metrics_logger.set_properties_from_json(args.custom_properties)
# add properties about environment of this script
self.metrics_logger.set_platform_properties()
# enable perf reporting
if not args.disable_perf_metrics:
self.perf_report_collector = PerformanceMetricsCollector()
self.perf_report_collector.start()
def run(self, args, logger, metrics_logger, unknown_args):
"""The run function of your script. You are required to override this method
with your own implementation.
Args:
args (argparse.namespace): command line arguments provided to script
logger (logging.logger): a logger initialized for this script
metrics_logger (common.metrics.MetricLogger): to report metrics for this script, already initialized for MLFlow
unknown_args (list[str]): list of arguments not recognized during argparse
"""
raise NotImplementedError(f"run() method from class {self.__class__.__name__} hasn't actually been implemented.")
def finalize_run(self, args):
"""Finalize the run, close what needs to be"""
self.logger.info("Finalizing script run...")
if self.perf_report_collector:
self.perf_report_collector.finalize()
plotter = PerfReportPlotter(self.metrics_logger)
plotter.add_perf_reports(self.perf_report_collector.perf_reports, node=0)
plotter.report_nodes_perf()
# close mlflow
self.metrics_logger.close()
@classmethod
def main(cls, cli_args=None):
""" Component main function, it is not recommended to override this method.
It parses arguments and executes run() with the right arguments.
Args:
cli_args (List[str], optional): list of args to feed script, useful for debugging. Defaults to None.
"""
# initialize root logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
# show the command used to run
if cli_args:
logger.info(f"Running main() with specific cli args: {cli_args}")
else:
logger.info(f"Running main() with sys.argv={sys.argv}")
# construct arg parser
parser = cls.get_arg_parser()
# if argument parsing fails, or if unknown arguments, will except
args, unknown_args = parser.parse_known_args(cli_args)
logger.setLevel(logging.DEBUG if args.verbose else logging.INFO)
# create script instance, initialize mlflow
script_instance = cls()
script_instance.initialize_run(args)
# catch run function exceptions to properly finalize run (kill/join threads)
try:
# run the actual thing
script_instance.run(args, script_instance.logger, script_instance.metrics_logger, unknown_args)
except BaseException as e:
logging.critical(f"Exception occured during run():\n{traceback.format_exc()}")
script_instance.finalize_run(args)
raise e
# close mlflow
script_instance.finalize_run(args)
# return for unit tests
return script_instance
class SingleNodeScript(RunnableScript):
pass
|
microsoft/lightgbm-benchmark | tests/scripts/test_partition_data.py | <gh_stars>10-100
"""
test src/scripts/partition_data/partition.py
"""
import os
import sys
import tempfile
import pytest
from unittest.mock import patch
from scripts.data_processing.partition_data import partition
# IMPORTANT: see conftest.py for fixtures
def verify_partitioned_files(partitioned_data_dir, expected_file_count, expected_file_length):
"""Utility for testing outputs"""
assert os.path.isdir(partitioned_data_dir)
for expected_file in [os.path.join(partitioned_data_dir, "part_{:06d}".format(i)) for i in range(expected_file_count)]:
assert os.path.isfile(
expected_file
), "Script partition.py should generate partitioned data file {expected_file} in --output, but no output files were found"
# open file in read mode
with open(expected_file, 'r') as i_file:
for count, line in enumerate(i_file):
pass
assert (count+1) == expected_file_length # expected size of each chunk
def test_partition_data_roundrobin(temporary_dir, regression_train_sample):
"""Tests src/scripts/data_processing/partition_data/partition.py"""
partitioned_data_dir = os.path.join(temporary_dir, "partitioned_data")
# create test arguments for the script
script_args = [
"partition.py",
"--input", regression_train_sample,
"--output", partitioned_data_dir,
"--mode", "roundrobin",
# regression_train_sample has 100 sample, splitting in 5 x 20
"--number", "5",
]
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
partition.main()
verify_partitioned_files(
partitioned_data_dir,
expected_file_count=5,
expected_file_length=20
)
def test_partition_data_chunk(temporary_dir, regression_train_sample):
"""Tests src/scripts/data_processing/partition_data/partition.py"""
partitioned_data_dir = os.path.join(temporary_dir, "partitioned_data")
# create test arguments for the script
script_args = [
"partition.py",
"--input", regression_train_sample,
"--output", partitioned_data_dir,
"--mode", "chunk",
# regression_train_sample has 100 sample, splitting in 20 x 5
"--number", "5",
]
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
partition.main()
verify_partitioned_files(
partitioned_data_dir,
expected_file_count=20,
expected_file_length=5
)
|
microsoft/lightgbm-benchmark | src/common/distributed.py | <gh_stars>10-100
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
LightGBM/Python training script
"""
import os
import logging
import traceback
from .components import RunnableScript
from dataclasses import dataclass
from .perf import PerformanceMetricsCollector, PerfReportPlotter
@dataclass
class mpi_config_class:
world_size: int = 1
world_rank: int = 0
mpi_available: bool = False
main_node: bool = True
class MPIHandler():
"""Handling MPI initialization in a separate class
so we can patch/mock it during unit testing of MultiNodeScript"""
def __init__(self, mpi_init_mode=None):
"""Constructor"""
self._mpi_module = None
self._comm = None
self._mpi_config = None
self._mpi_init_mode = mpi_init_mode
self.logger = logging.getLogger(__name__)
@classmethod
def _mpi_import(cls):
import mpi4py
mpi4py.rc.initialize = False
mpi4py.rc.finalize = False
from mpi4py import MPI
return MPI
def initialize(self):
# doing our own initialization of MPI to have fine-grain control
self._mpi_module = self._mpi_import()
if self._mpi_init_mode is None:
# use simple env vars instead
self.logger.info(f"no MPI init, using environment variables instead")
world_size = int(os.environ.get("OMPI_COMM_WORLD_SIZE", "1"))
world_rank = int(os.environ.get("OMPI_COMM_WORLD_RANK", "0"))
self._mpi_config = mpi_config_class(
world_size, # world_size
world_rank, # world_rank
(world_size > 1), # mpi_available
(world_rank == 0), # main_node
)
self.comm = None
else:
# use mpi to detect mpi config
self.logger.info(f"Running MPI.Init_thread(required={self._mpi_init_mode})")
try:
self._mpi_module.Init_thread(required=self._mpi_init_mode)
except self._mpi_module.Exception:
self.logger.warning(f"Exception occured during MPI initialization:\n{traceback.format_exc()}")
self.comm = self._mpi_module.COMM_WORLD
self._mpi_config = self.detect_mpi_config()
logging.getLogger().info(f"MPI detection results: {self._mpi_config}")
def finalize(self):
if self._mpi_module.Is_initialized() and not self._mpi_module.Is_finalized():
self.logger.info("MPI was initialized, calling MPI.finalize()")
self._mpi_module.Finalize()
else:
self.logger.warning(f"MPIHandler.finalize() was called, but MPI.Is_initialized={self._mpi_module.Is_initialized()} and MPI.Is_finalized={self._mpi_module.Is_finalized()}")
def mpi_config(self):
return self._mpi_config
def detect_mpi_config(self):
""" Detects if we're running in MPI.
Args:
None
Returns:
mpi_config (namedtuple)
"""
try:
mpi_config = mpi_config_class(
self.comm.Get_size(), # world_size
self.comm.Get_rank(), # world_rank
(self.comm.Get_size() > 1), # mpi_available
(self.comm.Get_rank() == 0), # main_node
)
except:
mpi_config = mpi_config_class(
1, # world_size
0, # world_rank
False, # mpi_available
True, # main_node
)
logging.getLogger().critical(f"MPI detection failed, switching to single node: {mpi_config}, see traceback below:\n{traceback.format_exc()}")
return mpi_config
class MultiNodeScript(RunnableScript):
def __init__(self, task, framework, framework_version, metrics_prefix=None, mpi_init_mode=None):
""" Generic initialization for all script classes.
Args:
task (str): name of task in the pipeline/benchmark (ex: train, score)
framework (str): name of ML framework
framework_version (str): a version of this framework
metrics_prefix (str): any prefix to add to this scripts metrics
mpi_init_mode (int): mode to initialize MPI
"""
# just use the regular init
super().__init__(
task = task,
framework = framework,
framework_version = framework_version,
metrics_prefix = metrics_prefix
)
self._mpi_handler = MPIHandler(mpi_init_mode=mpi_init_mode)
self._mpi_config = None
def mpi_config(self):
"""Getter"""
return self._mpi_config
def initialize_run(self, args):
"""Initialize the component run, opens/setups what needs to be"""
self.logger.info("Initializing multi node component script...")
self.logger.info("Initializing MPI.")
self._mpi_handler.initialize()
self._mpi_config = self._mpi_handler.mpi_config()
# open mlflow
self.metrics_logger.open()
if self._mpi_config.main_node:
# record properties only from the main node
self.metrics_logger.set_properties(
task = self.task,
framework = self.framework,
framework_version = self.framework_version
)
# if provided some custom_properties by the outside orchestrator
if args.custom_properties:
self.metrics_logger.set_properties_from_json(args.custom_properties)
# add properties about environment of this script
self.metrics_logger.set_platform_properties()
# enable perf reporting
if not args.disable_perf_metrics:
self.perf_report_collector = PerformanceMetricsCollector()
self.perf_report_collector.start()
def finalize_run(self, args):
"""Finalize the run, close what needs to be"""
self.logger.info("Finalizing multi node component script...")
# clean exit from mpi
self.logger.info("Finalizing MPI.")
self._mpi_handler.finalize()
if self.perf_report_collector:
self.perf_report_collector.finalize()
plotter = PerfReportPlotter(self.metrics_logger)
plotter.add_perf_reports(self.perf_report_collector.perf_reports, node=self._mpi_config.world_rank)
plotter.report_nodes_perf()
# close mlflow
self.metrics_logger.close()
|
microsoft/lightgbm-benchmark | src/scripts/model_transformation/treelite_compile/compile_treelite.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
TreeLite/Python inferencing script
"""
import os
import sys
import argparse
import logging
import numpy
from distutils.util import strtobool
import pandas as pd
import treelite, treelite_runtime
# Add the right path to PYTHONPATH
# so that you can import from common.*
COMMON_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
if COMMON_ROOT not in sys.path:
print(f"Adding {COMMON_ROOT} to PYTHONPATH")
sys.path.append(str(COMMON_ROOT))
# useful imports from common
from common.components import RunnableScript
from common.io import input_file_path
class TreeLightCompileScript(RunnableScript):
def __init__(self):
super().__init__(
task = 'compile',
framework = 'treelite_python',
framework_version = treelite.__version__
)
@classmethod
def get_arg_parser(cls, parser=None):
"""Adds component/module arguments to a given argument parser.
Args:
parser (argparse.ArgumentParser): an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add generic arguments
parser = RunnableScript.get_arg_parser(parser)
group_i = parser.add_argument_group("Input Data")
group_i.add_argument("--model",
required=False, type=input_file_path, help="Exported model location (file path)")
group_treelite = parser.add_argument_group("Treelite parameters")
group_treelite.add_argument("--model_format",
required=False, default="lightgbm", type=str, help="format of the input --model")
group_treelite.add_argument("--so_path",
required=False, default="./mymodel.so", type=str, help="full path to the saved model")
group_treelite.add_argument("--toolchain",
required=False, default="gcc", type=str, help="toolchain for compiling model")
return parser
def run(self, args, logger, metrics_logger, unknown_args):
"""Run script with arguments (the core of the component)
Args:
args (argparse.namespace): command line arguments provided to script
logger (logging.getLogger() for this script)
metrics_logger (common.metrics.MetricLogger)
unknown_args (list[str]): list of arguments not recognized during argparse
"""
logger.info(f"Converting model to Treelite")
with metrics_logger.log_time_block("model_compilation"):
model = treelite.Model.load(
args.model,
model_format=args.model_format
)
model.export_lib(
toolchain=args.toolchain,
libpath=args.so_path,
verbose=True,
params={'parallel_comp':16}
)
def get_arg_parser(parser=None):
""" To ensure compatibility with shrike unit tests """
return TreeLightCompileScript.get_arg_parser(parser)
def main(cli_args=None):
""" To ensure compatibility with shrike unit tests """
TreeLightCompileScript.main(cli_args)
if __name__ == "__main__":
main()
|
microsoft/lightgbm-benchmark | src/scripts/training/lightgbm_python/train.py | <reponame>microsoft/lightgbm-benchmark
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
LightGBM/Python training script
"""
import os
import sys
import argparse
import logging
import traceback
import json
from distutils.util import strtobool
import lightgbm
from collections import namedtuple
# Add the right path to PYTHONPATH
# so that you can import from common.*
COMMON_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
if COMMON_ROOT not in sys.path:
print(f"Adding {COMMON_ROOT} to PYTHONPATH")
sys.path.append(str(COMMON_ROOT))
# useful imports from common
from common.components import RunnableScript
from common.io import get_all_files
from common.lightgbm_utils import LightGBMCallbackHandler
from common.distributed import MultiNodeScript
class LightGBMPythonMpiTrainingScript(MultiNodeScript):
def __init__(self):
super().__init__(
task = "train",
framework = "lightgbm",
framework_version = lightgbm.__version__
)
@classmethod
def get_arg_parser(cls, parser=None):
"""Adds component/module arguments to a given argument parser.
Args:
parser (argparse.ArgumentParser): an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add generic arguments
parser = RunnableScript.get_arg_parser(parser)
group_i = parser.add_argument_group("Input Data")
group_i.add_argument("--train",
required=True, type=str, help="Training data location (file or dir path)")
group_i.add_argument("--test",
required=True, type=str, help="Testing data location (file path)")
group_i.add_argument("--construct",
required=False, default=True, type=strtobool, help="use lazy initialization during data loading phase")
group_i.add_argument("--header", required=False, default=False, type=strtobool)
group_i.add_argument("--label_column", required=False, default="0", type=str)
group_i.add_argument("--group_column", required=False, default=None, type=str)
group_o = parser.add_argument_group("Outputs")
group_o.add_argument("--export_model",
required=False, type=str, help="Export the model in this location (file path)")
# learner params
group_lgbm = parser.add_argument_group("LightGBM learning parameters")
group_lgbm.add_argument("--objective", required=True, type=str)
group_lgbm.add_argument("--metric", required=True, type=str)
group_lgbm.add_argument("--boosting_type", required=True, type=str)
group_lgbm.add_argument("--tree_learner", required=True, type=str)
group_lgbm.add_argument("--label_gain", required=False, type=str, default=None)
group_lgbm.add_argument("--num_trees", required=True, type=int)
group_lgbm.add_argument("--num_leaves", required=True, type=int)
group_lgbm.add_argument("--min_data_in_leaf", required=True, type=int)
group_lgbm.add_argument("--learning_rate", required=True, type=float)
group_lgbm.add_argument("--max_bin", required=True, type=int)
group_lgbm.add_argument("--feature_fraction", required=True, type=float)
group_lgbm.add_argument("--device_type", required=False, type=str, default="cpu")
group_lgbm.add_argument("--custom_params", required=False, type=str, default=None)
return parser
def load_lgbm_params_from_cli(self, args, mpi_config):
"""Gets the right LightGBM parameters from argparse + mpi config
Args:
args (argparse.Namespace)
mpi_config (namedtuple): as returned from detect_mpi_config()
Returns:
lgbm_params (dict)
"""
# copy all parameters from argparse
cli_params = dict(vars(args))
# removing arguments that are purely CLI
for key in ['verbose', 'custom_properties', 'export_model', 'test', 'train', 'custom_params', 'construct', 'disable_perf_metrics']:
del cli_params[key]
# doing some fixes and hardcoded values
lgbm_params = cli_params
lgbm_params['feature_pre_filter'] = False
lgbm_params['verbose'] = 2
lgbm_params['header'] = bool(args.header) # strtobool returns 0 or 1, lightgbm needs actual bool
lgbm_params['is_provide_training_metric'] = True
# add mpi parameters if relevant
if mpi_config.mpi_available:
lgbm_params['num_machines'] = mpi_config.world_size
lgbm_params['machines'] = ":"
# process custom params
if args.custom_params:
custom_params = json.loads(args.custom_params)
lgbm_params.update(custom_params)
return lgbm_params
def assign_train_data(self, args, mpi_config):
""" Identifies which training file to load on this node.
Checks for consistency between number of files and mpi config.
Args:
args (argparse.Namespace)
mpi_config (namedtuple): as returned from detect_mpi_config()
Returns:
str: path to the data file for this node
"""
train_file_paths = get_all_files(args.train)
if mpi_config.mpi_available:
# depending on mode, we'll require different number of training files
if args.tree_learner == "data" or args.tree_learner == "voting":
if len(train_file_paths) == mpi_config.world_size:
train_data = train_file_paths[mpi_config.world_rank]
else:
raise Exception(f"To use MPI with tree_learner={args.tree_learner} and node count {mpi_config.world_rank}, you need to partition the input data into {mpi_config.world_rank} files (currently found {len(train_file_paths)})")
elif args.tree_learner == "feature":
if len(train_file_paths) == 1:
train_data = train_file_paths[0]
else:
raise Exception(f"To use MPI with tree_learner=parallel you need to provide only 1 input file, but {len(train_file_paths)} were found")
elif args.tree_learner == "serial":
if len(train_file_paths) == 1:
train_data = train_file_paths[0]
else:
raise Exception(f"To use single node training, you need to provide only 1 input file, but {len(train_file_paths)} were found")
else:
NotImplementedError(f"tree_learner mode {args.tree_learner} does not exist or is not implemented.")
else:
# if not using mpi, let's just use serial mode with one unique input file
if args.tree_learner != "serial":
logging.getLogger().warning(f"Using tree_learner={args.tree_learner} on single node does not make sense, switching back to tree_learner=serial")
args.tree_learner = "serial"
if len(train_file_paths) == 1:
train_data = train_file_paths[0]
else:
raise Exception(f"To use single node training, you need to provide only 1 input file, but {len(train_file_paths)} were found")
return train_data
def run(self, args, logger, metrics_logger, unknown_args):
"""Run script with arguments (the core of the component)
Args:
args (argparse.namespace): command line arguments provided to script
logger (logging.getLogger() for this script)
metrics_logger (common.metrics.MetricLogger)
unknown_args (list[str]): list of arguments not recognized during argparse
"""
# get mpi config as a namedtuple
mpi_config = self.mpi_config()
# figure out the lgbm params from cli args + mpi config
lgbm_params = self.load_lgbm_params_from_cli(args, mpi_config)
# create a handler for the metrics callbacks
callbacks_handler = LightGBMCallbackHandler(
metrics_logger=metrics_logger,
metrics_prefix=f"node_{mpi_config.world_rank}/"
)
# make sure the output argument exists
if args.export_model and mpi_config.main_node:
os.makedirs(args.export_model, exist_ok=True)
args.export_model = os.path.join(args.export_model, "model.txt")
# log params only once by doing it only on main node (node 0)
if mpi_config.main_node:
# log lgbm parameters
logger.info(f"LGBM Params: {lgbm_params}")
metrics_logger.log_parameters(**lgbm_params)
# register logger for lightgbm logs
lightgbm.register_logger(logger)
logger.info(f"Loading data for training")
with metrics_logger.log_time_block("time_data_loading", step=mpi_config.world_rank):
# obtain the path to the train data for this node
train_data_path = self.assign_train_data(args, mpi_config)
test_data_paths = get_all_files(args.test)
logger.info(f"Running with 1 train file and {len(test_data_paths)} test files.")
# construct datasets
if args.construct:
train_data = lightgbm.Dataset(train_data_path, params=lgbm_params).construct()
val_datasets = [
train_data.create_valid(test_data_path).construct() for test_data_path in test_data_paths
]
# capture data shape in metrics
metrics_logger.log_metric(key="train_data.length", value=train_data.num_data(), step=mpi_config.world_rank)
metrics_logger.log_metric(key="train_data.width", value=train_data.num_feature(), step=mpi_config.world_rank)
else:
train_data = lightgbm.Dataset(train_data_path, params=lgbm_params)
val_datasets = [
train_data.create_valid(test_data_path) for test_data_path in test_data_paths
]
# can't count rows if dataset is not constructed
# mlflow can only log float.
# metrics_logger.log_metric(key="train_data.length", value="n/a")
# metrics_logger.log_metric(key="train_data.width", value="n/a")
logger.info(f"Training LightGBM with parameters: {lgbm_params}")
with metrics_logger.log_time_block("time_training", step=mpi_config.world_rank):
booster = lightgbm.train(
lgbm_params,
train_data,
valid_sets = val_datasets,
callbacks=[callbacks_handler.callback]
)
if args.export_model and mpi_config.main_node:
logger.info(f"Writing model in {args.export_model}")
booster.save_model(args.export_model)
def get_arg_parser(parser=None):
""" To ensure compatibility with shrike unit tests """
return LightGBMPythonMpiTrainingScript.get_arg_parser(parser)
def main(cli_args=None):
""" To ensure compatibility with shrike unit tests """
LightGBMPythonMpiTrainingScript.main(cli_args)
if __name__ == "__main__":
main()
|
microsoft/lightgbm-benchmark | tests/scripts/test_treelite_python.py | <filename>tests/scripts/test_treelite_python.py<gh_stars>10-100
"""
Executes the series of scripts end-to-end
to test LightGBM (python) manual benchmark
"""
import os
import sys
import tempfile
from unittest.mock import patch
from scripts.model_transformation.treelite_compile import compile_treelite
from scripts.inferencing.treelite_python import score
# IMPORTANT: see conftest.py for fixtures
def test_treelist_inferencing_script(temporary_dir, regression_inference_sample, regression_model_sample):
# create a directory for each output
predictions_dir = os.path.join(temporary_dir, "predictions")
if sys.platform == "linux" or sys.platform == "linux2":
toolchain = "gcc"
elif sys.platform == "win32":
toolchain = "msvc"
else:
raise NotImplementedError(f"unit test doesn't know how to handle toolchain for platform {sys.platform}")
script_args = [
"compile_treelite.py",
"--model", regression_model_sample,
"--model_format", "lightgbm",
"--toolchain", toolchain,
"--so_path", os.path.join(temporary_dir, "mymodel.so")
]
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
compile_treelite.main()
script_args = [
"score.py",
"--so_path", os.path.join(temporary_dir, "mymodel.so"),
"--data", regression_inference_sample,
"--output", predictions_dir,
"--nthreads", "1",
]
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
score.main()
# test expected outputs
#assert os.path.isfile(os.path.join(predictions_dir, "predictions.txt"))
|
microsoft/lightgbm-benchmark | tests/aml/test_components.py | <reponame>microsoft/lightgbm-benchmark
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
PyTest suite for testing if each run.py is aligned with module specification:
> Status: this code relates to the _recipe_ and is a _proposition_
"""
import pytest
import os
from shrike.pipeline.testing.components import (
component_spec_yaml_exists_and_is_parsable,
)
from shrike.pipeline.testing.components import component_uses_private_acr
from shrike.pipeline.testing.components import component_uses_private_python_feed
from shrike.pipeline.testing.components import component_run_py_import
from shrike.pipeline.testing.components import component_run_get_arg_parser
from shrike.pipeline.testing.components import (
if_arguments_from_component_spec_match_script_argparse,
)
COMPONENT_ROOT_FOLDER = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "..", "src", "scripts")
)
# modules that should ALSO pass advanced tests (design pattern)
COMPONENT_SPEC_FILES = [
"sample/spec.yaml",
"data_processing/generate_data/spec.yaml",
"data_processing/lightgbm_data2bin/spec.yaml",
"data_processing/partition_data/spec.yaml",
"training/lightgbm_python/spec.yaml",
"model_transformation/treelite_compile/spec.yaml",
"inferencing/lightgbm_python/spec.yaml",
"inferencing/lightgbm_c_api/spec.yaml",
"inferencing/lightgbm_ray/spec.yaml",
"inferencing/custom_win_cli/spec.yaml",
"inferencing/treelite_python/spec.yaml",
]
### BASIC TESTS ###
# for basic module designs (minimal wrappers)
@pytest.mark.parametrize("component_spec_path", COMPONENT_SPEC_FILES)
def test_component_run_py_import(component_spec_path):
"""Try importing run.py, just to check if basic script passes syntax/imports checks"""
component_run_py_import(
os.path.join(COMPONENT_ROOT_FOLDER, component_spec_path)
)
@pytest.mark.parametrize("component_spec_path", COMPONENT_SPEC_FILES)
def test_component_spec_yaml_exists_and_is_parsable(component_spec_path):
"""Try loading and parsing the component spec yaml file"""
component_spec_yaml_exists_and_is_parsable(
os.path.join(COMPONENT_ROOT_FOLDER, component_spec_path)
)
### ADVANCED TESTS ###
# for module implementing full design pattern (get_arg_parser())
@pytest.mark.parametrize("component_spec_path", COMPONENT_SPEC_FILES)
def test_component_run_get_arg_parser(component_spec_path):
"""Tests if component run.py has function get_arg_parser(parser)"""
component_run_get_arg_parser(
os.path.join(COMPONENT_ROOT_FOLDER, component_spec_path)
)
@pytest.mark.parametrize("component_spec_path", COMPONENT_SPEC_FILES)
def test_if_arguments_from_component_spec_match_script_argparse(component_spec_path):
"""Tests alignment between module_spec arguments and script parser arguments"""
if_arguments_from_component_spec_match_script_argparse(
os.path.join(COMPONENT_ROOT_FOLDER, component_spec_path)
)
# NOTE: this test has been disabled because it requires exception re-throw in compliant_handle()
# @pytest.mark.parametrize("module", MODULE_MANIFEST_ADVANCED)
# def test_script_main_with_synthetic_arguments(mocker, module):
# """Tests alignment between module_spec arguments and script parser arguments"""
# script_main_with_synthetic_arguments(module, mocker)
|
microsoft/lightgbm-benchmark | src/common/tasks.py | <reponame>microsoft/lightgbm-benchmark
from dataclasses import dataclass
from omegaconf import MISSING
from typing import Any, Optional
@dataclass
class data_input_spec:
# NOTE: Union is not supported in Hydra/OmegaConf
# specify either by dataset name and version
name: Optional[str] = None
version: Optional[str] = None
# or by uuid (non-registered)
uuid: Optional[str] = None
# or by datastore+path
datastore: Optional[str] = None
path: Optional[str] = None
validate: bool = True
@dataclass
class inferencing_task:
data: data_input_spec = MISSING
model: data_input_spec = MISSING
task_key: Optional[str] = None
predict_disable_shape_check: bool = False
@dataclass
class inferencing_variants:
framework: str = MISSING
build: Optional[str] = None
@dataclass
class data_generation_task:
task: str = MISSING
task_key: Optional[str] = None
train_samples: int = MISSING
train_partitions: int = 1
test_samples: int = MISSING
test_partitions: int = 1
inferencing_samples: int = MISSING
inferencing_partitions: int = 1
n_features: int = MISSING
n_informative: Optional[int] = None
n_label_classes: Optional[int] = None
docs_per_query: Optional[int] = None
delimiter: str = "comma"
@dataclass
class training_task:
train: data_input_spec = MISSING
test: data_input_spec = MISSING
# provide a key for internal tagging + reporting
task_key: Optional[str] = None
@dataclass
class sweep_early_termination_settings:
policy_type: str = 'default' # truncation_selection | median_stopping | bandit
evaluation_interval: Optional[int] = None
delay_evaluation: Optional[int] = None
# truncation settings
truncation_percentage: Optional[int] = None # for truncation_selection
# bandit settings
slack_factor: Optional[float] = None
@dataclass
class sweep_limits_settings:
max_total_trials: int = MISSING
max_concurrent_trials: Optional[int] = None # must be between 1 and 100
timeout_minutes: Optional[int] = None
@dataclass
class sweep_settings:
# TODO: add all parameters from shrike https://github.com/Azure/shrike/blob/387fadb47d69e46bd7e5ac6f243250dc6044afaa/shrike/pipeline/pipeline_helper.py#L809
# goal settings
primary_metric: Optional[str] = None
goal: Optional[str] = None
algorithm: str = "random"
early_termination: Optional[sweep_early_termination_settings] = None
limits: Optional[sweep_limits_settings] = None
@dataclass
class lightgbm_training_variant_parameters:
# fixed training parameters
objective: str = MISSING
metric: str = MISSING
boosting: str = MISSING
tree_learner: str = MISSING
# sweepable training parameters
# NOTE: need to be str so they can be parsed (ex: 'choice(100,200)')
num_iterations: str = MISSING
num_leaves: str = MISSING
min_data_in_leaf: str = MISSING
learning_rate: str = MISSING
max_bin: str = MISSING
feature_fraction: str = MISSING
label_gain: Optional[str] = None
custom_params: Optional[Any] = None
# COMPUTE
device_type: str = "cpu"
verbose: bool = False
@dataclass
class lightgbm_training_data_variant_parameters:
# FILE OPTIONS
auto_partitioning: bool = True
pre_convert_to_binary: bool = False # doesn't work with partitioned data (yet)
# input parameters
header: bool = False
label_column: Optional[str] = "0"
group_column: Optional[str] = None
construct: bool = True
@dataclass
class lightgbm_training_environment_variant_parameters:
# COMPUTE
nodes: int = 1
processes: int = 1
target: Optional[str] = None
build: Optional[str] = None
@dataclass
class lightgbm_training_output_variant_parameters:
register_model: bool = False # "{register_model_prefix}-{task_key}-{num_iterations}trees-{num_leaves}leaves-{register_model_suffix}"
register_model_prefix: Optional[str] = None
register_model_suffix: Optional[str] = None
@dataclass
class training_variant:
# three below are mandatory sections of the variant config
data: lightgbm_training_data_variant_parameters = MISSING
training: lightgbm_training_variant_parameters = MISSING
runtime: lightgbm_training_environment_variant_parameters = MISSING
# two below are optional
sweep: Optional[sweep_settings] = None
output:Optional[lightgbm_training_output_variant_parameters] = None
|
microsoft/lightgbm-benchmark | src/pipelines/azureml/lightgbm_inferencing.py | """
Compares 3 versions of LightGBM Inferencing
A - vanilla
B - custom build
C - treelite
to execute:
> python src/pipelines/azureml/lightgbm_inferencing.py --exp-config conf/experiments/lightgbm-inferencing.yaml
"""
# pylint: disable=no-member
# NOTE: because it raises 'dict' has no 'outputs' member in dsl.pipeline construction
import os
import sys
import json
import logging
# config management
from dataclasses import dataclass
from omegaconf import DictConfig, OmegaConf, MISSING
from typing import Optional, List
# AzureML
from azure.ml.component import Component
from azure.ml.component import dsl
from azure.ml.component.environment import Docker
# when running this script directly, needed to import common
LIGHTGBM_REPO_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
SCRIPTS_SOURCES_ROOT = os.path.join(LIGHTGBM_REPO_ROOT, 'src')
if SCRIPTS_SOURCES_ROOT not in sys.path:
logging.info(f"Adding {SCRIPTS_SOURCES_ROOT} to path")
sys.path.append(str(SCRIPTS_SOURCES_ROOT))
from common.tasks import inferencing_task, inferencing_variants
from common.pipelines import (
parse_pipeline_config,
azureml_connect,
pipeline_submit,
COMPONENTS_ROOT
)
from common.aml import load_dataset_from_data_input_spec
### CONFIG DATACLASS ###
# Step 1 : to configure your pipeline, add all your fields inside a
# properly defined dataclass, pipeline_cli_main will figure out how
# to read that config from a given yaml file + hydra override commands
@dataclass
class lightgbm_inferencing_config: # pylint: disable=invalid-name
""" Config object constructed as a dataclass.
NOTE: the name of this class will be used as namespace in your config yaml file.
See conf/reference/evaluate_qas_model.yaml for an example.
"""
benchmark_name: str = MISSING
tasks: List[inferencing_task] = MISSING
variants: List[inferencing_variants] = MISSING
### PIPELINE COMPONENTS ###
# Step 2 : your pipeline consists in assembling components
# load those components from local yaml specifications
# use COMPONENTS_ROOT as base folder
lightgbm_python_score_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "inferencing", "lightgbm_python", "spec.yaml"))
lightgbm_c_api_score_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "inferencing", "lightgbm_c_api", "spec.yaml"))
lightgbm_ray_score_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "inferencing", "lightgbm_ray", "spec.yaml"))
custom_win_cli_score_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "inferencing", "custom_win_cli", "spec.yaml"))
treelite_compile_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "model_transformation", "treelite_compile", "spec.yaml"))
treelite_score_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "inferencing", "treelite_python", "spec.yaml"))
### INFERENCING TASKS ###
# Step 3: your pipeline consists in creating a python function
# decorated with @dsl.pipeline.
# You can create as many subgraphs as you want,
# but `pipeline_cli_main` will need one pipeline function
# taking a single config argument, not a pipeline parameter.
@dsl.pipeline(name=f"lightgbm_inferencing", # pythonic name
description=f"LightGBM inferencing on user defined dataset/model",
non_pipeline_parameters=['benchmark_custom_properties', 'config'])
def inferencing_task_pipeline_function(benchmark_custom_properties,
config,
data,
model,
predict_disable_shape_check):
"""This pipeline consists in running multiple inferencing
frameworks in parallel on a given input data/model pair.
Args:
TODO
Returns:
dict[str->PipelineOutputData]: a dictionary of your pipeline outputs
for instance to be consumed by other graphs
"""
# creating a dict to store pipeline outputs
pipeline_outputs = {}
# loop through all inferencing variants
for variant_index, variant in enumerate(config.lightgbm_inferencing_config.variants):
# add last minute custom proeprties
custom_properties = benchmark_custom_properties.copy()
custom_properties.update({
# adding build settings (docker)
'framework_build' : variant.build or "default",
# adding variant_index to spot which variant is the reference
'variant_index' : variant_index
})
# passing as json string that each module parses to digest as tags/properties
custom_properties = json.dumps(custom_properties)
# list of comments to surface on the component itself
variant_comment = [
f"variant #{variant_index}"
]
if variant.framework == "treelite_python":
treelite_compile_step = treelite_compile_module(
model = model,
verbose = False,
custom_properties = custom_properties
)
treelite_compile_step.runsettings.configure(target=config.compute.linux_cpu)
inferencing_step = treelite_score_module(
data = data,
compiled_model = treelite_compile_step.outputs.compiled_model,
verbose = False,
custom_properties = custom_properties
)
inferencing_step.runsettings.configure(target=config.compute.linux_cpu)
elif variant.framework == "lightgbm_c_api":
# call module with all the right arguments
inferencing_step = lightgbm_c_api_score_module(
data = data,
model = model,
predict_disable_shape_check = predict_disable_shape_check,
verbose = False,
custom_properties = custom_properties
)
inferencing_step.runsettings.configure(target=config.compute.linux_cpu)
elif variant.framework == "custom_win_cli":
# call module with all the right arguments
inferencing_step = custom_win_cli_score_module(
data = data,
model = model,
verbose = False,
custom_properties = custom_properties.replace("\"","\\\"")
)
inferencing_step.runsettings.configure(target=config.compute.windows_cpu)
elif variant.framework == "lightgbm_python":
# call module with all the right arguments
inferencing_step = lightgbm_python_score_module(
data = data,
model = model,
predict_disable_shape_check = predict_disable_shape_check,
verbose = False,
custom_properties = custom_properties
)
inferencing_step.runsettings.configure(target=config.compute.linux_cpu)
elif variant.framework == "lightgbm_ray":
# call module with all the right arguments
inferencing_step = lightgbm_ray_score_module(
data = data,
model = model,
verbose = False,
custom_properties = custom_properties
)
inferencing_step.runsettings.configure(target=config.compute.linux_cpu)
else:
raise NotImplementedError(f"framework {variant.framework} not implemented (yet)")
if variant.build:
# build path is relative to docker/ subfolder
custom_docker = Docker(file=os.path.join(LIGHTGBM_REPO_ROOT, variant.build))
inferencing_step.runsettings.environment.configure(
docker=custom_docker
)
variant_comment.append(f"build {variant.build}")
else:
variant_comment.append(f"default build")
# add some comment to the component
inferencing_step.comment = " -- ".join(variant_comment)
# return {key: output}'
return pipeline_outputs
@dsl.pipeline(
name="inferencing_all_tasks",
non_pipeline_parameters=["workspace", "config"] # required to use config object
)
def inferencing_all_tasks(workspace, config):
"""Pipeline's main building function.
Args:
workspace (azureml.core.Workspace): the AzureML workspace
This is not an actual pipeline parameter
config (DictConfig): the pipeline configuration object containing pipeline config dataclass
This is not an actual pipeline parameter
Returns:
None
"""
for inferencing_task in config.lightgbm_inferencing_config.tasks:
data = load_dataset_from_data_input_spec(workspace, inferencing_task.data)
model = load_dataset_from_data_input_spec(workspace, inferencing_task.model)
# create custom properties for this task
benchmark_custom_properties = {
'benchmark_name' : config.lightgbm_inferencing_config.benchmark_name,
'benchmark_dataset' : inferencing_task.data.name,
'benchmark_model' : inferencing_task.model.name,
}
inferencing_task_subgraph_step = inferencing_task_pipeline_function(
data=data,
model=model,
predict_disable_shape_check=inferencing_task.predict_disable_shape_check or False,
benchmark_custom_properties=benchmark_custom_properties,
config=config
)
# add some relevant comments on the subgraph
inferencing_task_subgraph_step.comment = " -- ".join([
f"benchmark name: {config.lightgbm_inferencing_config.benchmark_name}",
# NOTE: add more here?
])
### MAIN BLOCK ###
# Step 4: implement main block
def main():
# use parse helper function to get arguments from CLI
config = parse_pipeline_config(lightgbm_inferencing_config)
# you'll need a workspace object to connect
workspace = azureml_connect(config)
# run the pipeline function with the given arguments
pipeline_instance = inferencing_all_tasks(workspace, config)
experiment_description="\n".join([
"Inferencing on all specified tasks (see yaml below).",
"```yaml",
"lightgbm_inferencing_config:",
OmegaConf.to_yaml(config.lightgbm_inferencing_config),
"```"
])
# validate/submit the pipeline (if run.submit=True)
pipeline_submit(
workspace,
config,
pipeline_instance,
experiment_description=experiment_description
)
if __name__ == "__main__":
main()
|
microsoft/lightgbm-benchmark | tests/conftest.py | <reponame>microsoft/lightgbm-benchmark
""" Add src/ to path """
import os
import sys
import logging
import pytest
import tempfile
LIGHTGBM_BENCHMARK_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "src")
)
if LIGHTGBM_BENCHMARK_ROOT not in sys.path:
logging.info(f"Adding {LIGHTGBM_BENCHMARK_ROOT} to path")
sys.path.append(str(LIGHTGBM_BENCHMARK_ROOT))
from common.pipelines import aml_connection_config
from common.paths import CONFIG_PATH
@pytest.fixture()
def config_directory():
"""Returns path to configuration files"""
return CONFIG_PATH
@pytest.fixture()
def temporary_dir():
"""Creates a temporary directory for the tests below"""
temp_directory = tempfile.TemporaryDirectory()
yield temp_directory.name
temp_directory.cleanup()
TEST_DATA_ROOT = os.path.join(os.path.dirname(__file__), "data")
@pytest.fixture()
def regression_train_sample():
return os.path.join(TEST_DATA_ROOT, "regression", "train")
@pytest.fixture()
def regression_test_sample():
return os.path.join(TEST_DATA_ROOT, "regression", "test")
@pytest.fixture()
def regression_inference_sample():
return os.path.join(TEST_DATA_ROOT, "regression", "inference")
@pytest.fixture()
def regression_model_sample():
return os.path.join(TEST_DATA_ROOT, "regression", "model")
# add cli options to connect to AzureML
def pytest_addoption(parser):
parser.addoption("--aml_subscription_id", action="store")
parser.addoption("--aml_resource_group", action="store")
parser.addoption("--aml_workspace_name", action="store")
parser.addoption("--aml_auth", action="store")
parser.addoption("--aml_tenant", action="store")
@pytest.fixture(scope='session')
def aml_config(request):
"""
Creates some aml config for unit tests that require connectivity (tests/pipelines)
NOTE: will except with AssertionError and fail test if not provided properly
"""
subscription_id = request.config.option.aml_subscription_id or os.environ.get('AML_SUBSCRIPTION_ID')
resource_group = request.config.option.aml_resource_group or os.environ.get('AML_RESOURCE_GROUP')
workspace_name = request.config.option.aml_workspace_name or os.environ.get('AML_WORKSPACE_NAME')
auth = request.config.option.aml_auth or os.environ.get('AML_AUTH') or "interactive"
tenant = request.config.option.aml_tenant or os.environ.get('AML_TENANT')
test_config = []
if subscription_id is None:
test_config.append("To run this unit test, you need to provide a subscription through --aml_subscription_id or env var AML_SUBSCRIPTION_ID")
if resource_group is None:
test_config.append("To run this unit test, you need to provide a subscription through --aml_resource_group or env var AML_RESOURCE_GROUP")
if workspace_name is None:
test_config.append("To run this unit test, you need to provide a subscription through --aml_workspace_name or env var AML_WORKSPACE_NAME")
assert (not test_config), "\n".join(test_config)
return aml_connection_config(
subscription_id,
resource_group,
workspace_name,
auth,
tenant,
False # force auth
)
|
microsoft/lightgbm-benchmark | src/common/io.py | <filename>src/common/io.py<gh_stars>10-100
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
This contains helper functions to handle inputs and outputs arguments
in the benchmark scripts. It also provides some automation routine to handle data.
"""
import os
import argparse
import logging
def input_file_path(path):
""" Argparse type to resolve input path as single file from directory.
Given input path can be either a file, or a directory.
If it's a directory, this returns the path to the unique file it contains.
Args:
path (str): either file or directory path
Returns:
str: path to file, or to unique file in directory
"""
if os.path.isfile(path):
logging.getLogger(__name__).info(f"Found INPUT file {path}")
return path
if os.path.isdir(path):
all_files = os.listdir(path)
if not all_files:
raise Exception(f"Could not find any file in specified input directory {path}")
if len(all_files) > 1:
raise Exception(f"Found multiple files in input file path {path}, use input_directory_path type instead.")
logging.getLogger(__name__).info(f"Found INPUT directory {path}, selecting unique file {all_files[0]}")
return os.path.join(path, all_files[0])
logging.getLogger(__name__).critical(f"Provided INPUT path {path} is neither a directory or a file???")
return path
def get_all_files(path, fail_on_unknown_type=False):
""" Scans some input path and returns a list of files.
Args:
path (str): either a file, or directory path
fail_on_unknown_type (bool): fails if path is neither a file or a dir?
Returns:
List[str]: list of paths contained in path
"""
# if input path is already a file, return as list
if os.path.isfile(path):
logging.getLogger(__name__).info(f"Found INPUT file {path}")
return [path]
# if input path is a directory, list all files and return
if os.path.isdir(path):
all_files = [ os.path.join(path, entry) for entry in os.listdir(path) ]
if not all_files:
raise Exception(f"Could not find any file in specified input directory {path}")
return all_files
if fail_on_unknown_type:
raise FileNotFoundError(f"Provided INPUT path {path} is neither a directory or a file???")
else:
logging.getLogger(__name__).critical(f"Provided INPUT path {path} is neither a directory or a file???")
return path
class PartitioningEngine():
"""This class handles partitioning data files into chunks with various strategies. """
PARTITION_MODES = [
'chunk',
'roundrobin',
'append'
]
def __init__(self, mode, number, header=False, logger=None):
"""Constructs and setup of the engine
Args:
mode (str): which partition mode (in PartitioningEngine.PARTITION_MODE list)
number (int): parameter, behavior depends on mode
header (bool): are there header in the input files?
logger (logging.logger): a custom logger, if needed, for this engine to log
"""
self.mode = mode
self.number = number
self.header = header
self.logger = logger or logging.getLogger(__name__)
def split_by_append(self, input_files, output_path, file_count_target):
"""Just appends N++ files in N groups.
Args:
input_files (List[str]): list of file paths
output_path (str): directory path, where to write the partitions
file_count_target (int): how many partitions we want
"""
if len(input_files) < file_count_target:
raise Exception(f"To use mode=append, the number of input files ({len(input_files)}) needs to be higher than requested number of output files ({file_count_target})")
# each partition starts as an empty list
partitions = [
[] for i in range(file_count_target)
]
# loop on all files, and put them in one partition
for index, input_file in enumerate(input_files):
partitions[index % file_count_target].append(input_file)
self.logger.info(f"Shuffled {len(input_files)} files into {file_count_target} partitions.")
# then write each partition by appending content
for current_partition_index, partition in enumerate(partitions):
self.logger.info(f"Writing partition {current_partition_index}...")
with open(os.path.join(output_path, "part_{:06d}".format(current_partition_index)), 'a', encoding="utf-8") as output_handler:
for input_file in partition:
self.logger.info(f"Reading input file {input_file}...")
with open(input_file, 'r') as input_handler:
output_handler.write(input_handler.read())
self.logger.info(f"Created {current_partition_index+1} partitions")
def split_by_size(self, input_files, output_path, partition_size):
"""Splits input files into a variable number of partitions
by chunking a fixed number of lines from inputs into each
output file.
Args:
input_files (List[str]): list of file paths
output_path (str): directory path, where to write the partitions
partition_size (int): how many lines per partition
"""
current_partition_size = 0
current_partition_index = 0
self.logger.info(f"Creating partition {current_partition_index}")
header_line = None # there can be only on header line
for input_file in input_files:
self.logger.info(f"Opening input file {input_file}")
with open(input_file, "r", encoding="utf-8") as input_handler:
for line in input_handler:
if self.header and header_line is None:
# if first line of first input file
# write that line in every partition
header_line = line
if partition_size > 0 and current_partition_size >= partition_size:
current_partition_index += 1
current_partition_size = 0
self.logger.info(f"Creating partition {current_partition_index}")
with open(os.path.join(output_path, "part_{:06d}".format(current_partition_index)), 'a', encoding="utf-8") as output_handler:
if self.header and current_partition_size == 0:
# put header before anything else
output_handler.write(header_line)
output_handler.write(line)
current_partition_size += 1
self.logger.info(f"Created {current_partition_index+1} partitions")
def split_by_count(self, input_files, output_path, partition_count):
"""Splits input files into a fixed number of partitions by round-robin
shuffling of the lines of input files.
Args:
input_files (List[str]): list of file paths
output_path (str): directory path, where to write the partitions
partition_count (int): how many lines per partition
"""
self.logger.info(f"Creating {partition_count} partitions using round robin.")
partition_files = [open(os.path.join(output_path, "part_{:06d}".format(i)), "w", encoding="utf-8") for i in range(partition_count)]
current_index = 0
header_line = None # there can be only on header line
for input_file in input_files:
self.logger.info(f"Opening input file {input_file}")
with open(input_file, "r", encoding="utf-8") as input_handler:
for line_index, line in enumerate(input_handler):
if self.header and header_line is None:
# if first line of first input file
# write that line in every partition
header_line = line
for partition_file in partition_files:
partition_file.write(header_line)
continue
elif self.header and line_index == 0:
# if first line of 2nd... input file, just pass
continue
partition_files[current_index % partition_count].write(line)
current_index += 1
for handler in partition_files:
handler.close()
self.logger.info(f"Created {partition_count} partitions")
def run(self, input_path, output_path):
"""Runs the partition based on provided arguments.
Args:
input_path (str): path to input file(s)
output_path (str): path to store output partitions
"""
# Retrieve all input files
if os.path.isfile(input_path):
self.logger.info("Input is one unique file")
file_names = [os.path.basename(input_path)]
input_files = [input_path]
else:
self.logger.info("Input is a directory, listing all of them for processing")
file_names = os.listdir(input_path)
input_files = [os.path.join(input_path, file) for file in file_names]
self.logger.info("Found {} files in {}".format(len(input_files), input_path))
if self.mode == "chunk":
self.split_by_size(input_files, output_path, self.number)
elif self.mode == "roundrobin":
self.split_by_count(input_files, output_path, self.number)
elif self.mode == "append":
self.split_by_append(input_files, output_path, self.number)
else:
raise NotImplementedError(f"Mode {self.mode} not implemented.")
|
microsoft/lightgbm-benchmark | tests/scripts/test_lightgbm_python.py | <filename>tests/scripts/test_lightgbm_python.py
"""
Executes the series of scripts end-to-end
to test LightGBM (python) manual benchmark
"""
import os
import sys
import tempfile
import pytest
from unittest.mock import patch
from scripts.training.lightgbm_python import train
from scripts.inferencing.lightgbm_python import score
from common.distributed import mpi_config_class
# IMPORTANT: see conftest.py for fixtures
@patch('common.distributed.MPIHandler')
def test_lightgbm_python_train(mpi_handler_mock, temporary_dir, regression_train_sample, regression_test_sample):
"""Tests src/scripts/training/lightgbm_python/train.py"""
model_dir = os.path.join(temporary_dir, "model")
# create test arguments for the script
objective_argument = "regression"
script_args = [
"train.py",
"--train", regression_train_sample,
"--test", regression_test_sample,
"--export_model", model_dir,
"--objective", objective_argument,
"--boosting_type", "gbdt",
"--tree_learner", "serial",
"--metric", "rmse",
"--num_trees", "5",
"--num_leaves", "10",
"--min_data_in_leaf", "1",
"--learning_rate", "0.3",
"--max_bin", "16",
"--feature_fraction", "0.15",
"--device_type", "cpu"
]
# fake mpi initialization + config
mpi_handler_mock().mpi_config.return_value = mpi_config_class(
1, # world_size
0, # world_rank
False, # mpi_available
True, # main_node
)
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
train.main()
# test expected outputs
assert os.path.isfile(
os.path.join(model_dir, "model.txt")
), "Script train.py should generate a model.txt output file but did not"
def test_lightgbm_python_score(temporary_dir, regression_model_sample, regression_inference_sample):
"""Tests src/scripts/inferencing/lightgbm_python/score.py"""
predictions_dir = os.path.join(temporary_dir, "predictions")
# create test arguments for the script
script_args = [
"score.py",
"--data", regression_inference_sample,
"--model", regression_model_sample,
"--output", predictions_dir
]
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
score.main()
# test expected outputs
assert os.path.isfile(os.path.join(predictions_dir, "predictions.txt"))
|
microsoft/lightgbm-benchmark | tests/scripts/test_lightgbm_inferencing_ray.py | """
Test LightGBM Inferencing using Ray
"""
import os
import sys
import tempfile
import pytest
from unittest.mock import patch
from scripts.inferencing.lightgbm_ray import score
# IMPORTANT: see conftest.py for fixtures
def test_lightgbm_ray_score(temporary_dir, regression_model_sample, regression_inference_sample):
"""Tests src/scripts/inferencing/lightgbm_ray/score.py"""
predictions_dir = os.path.join(temporary_dir, "predictions")
# create test arguments for the script
script_args = [
"score.py",
"--data", regression_inference_sample,
"--model", regression_model_sample,
"--output", predictions_dir
]
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
score.main()
# test expected outputs
assert os.path.isfile(os.path.join(predictions_dir, "predictions.txt"))
|
microsoft/lightgbm-benchmark | src/common/sweep.py | <reponame>microsoft/lightgbm-benchmark
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Parses Sweep parameters from text arguments (cli or yaml)
"""
import re
import argparse
import logging
from azureml.core import Workspace, Datastore, Dataset
from azureml.train.hyperdrive import (
choice,
quniform,
qloguniform,
qnormal,
qlognormal,
uniform,
loguniform,
normal,
lognormal
)
class SweepParameterParser():
ALLOWED_DISTRIBUTIONS = {
"random": ["choice", "uniform", "loguniform", "normal", "lognormal", "quniform"],
"grid": ["choice"],
"bayesian": ["choice", "quniform", "uniform"]
}
def __init__(self, tunable_parameters, cli_prefix, parameter_sampling):
self.tunable_parameters = tunable_parameters
self.cli_prefix = cli_prefix
self.parameter_sampling = parameter_sampling
self.parser = None
self.args = None
self.unknown_args = None
self.tunable_params = {}
self.fixed_params = {}
self.logger = logging.getLogger(__name__)
if self.parameter_sampling not in SweepParameterParser.ALLOWED_DISTRIBUTIONS:
raise Exception(f"Sampling distribution {self.parameter_sampling} is not in the list of allowed distributiond {SweepParameterParser.ALLOWED_DISTRIBUTIONS}")
def get_arg_parser(self, parser=None):
"""Adds component/module arguments to a given argument parser.
Args:
parser (argparse.ArgumentParser): an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add arguments that are specific to the module
if parser is None:
parser = argparse.ArgumentParser(__doc__)
for key in self.tunable_parameters:
parser.add_argument(
self.cli_prefix + key,
type=str,
required=False,
default=None,
help="TODO"
)
self.parser = parser
return self.parser
def parse_from_dict(self, parameter_dict):
"""Parses parameters provided in a dictionary to check if they are sweepable.
Args:
parameter_dict (dict)
Returns:
tunable_params (dict): all sweep parameters from parameter_dict, constructed as sweep sdk objects
fixed_params (dict): all fixed / constant parameters from parameter_dict
"""
self.logger.debug(f"parsing sweep params from input dict {parameter_dict}")
self.tunable_params = {}
self.fixed_params = {}
# we're building a loop to test every compatible parsing format
tunable_parsing_methods = {}
for sweep_parameter_key in SweepParameterParser.ALLOWED_DISTRIBUTIONS[self.parameter_sampling]:
parsing_method_key = f"_parse_{sweep_parameter_key}"
if hasattr(self, parsing_method_key):
tunable_parsing_methods[sweep_parameter_key] = getattr(self, parsing_method_key)
else:
raise ValueError(f"sweep parameter type {sweep_parameter_key} from SweepParameterParser.ALLOWED_DISTRIBUTIONS[{self.parameter_sampling}] unknown, cannot find parsing method {parsing_method_key}")
self.logger.debug(f"prepared parsing methods {tunable_parsing_methods}")
# for any key in tunable parameters
for param_key in self.tunable_parameters:
# let's get the value from the parsed args
if param_key in parameter_dict:
param_value = parameter_dict[param_key]
else:
continue
if not isinstance(param_value, str):
# if we hit a default value in the argparser, let's continue
continue
# if that value matches any of the parsing methods
for sweep_param_key in tunable_parsing_methods:
if param_value.lower().startswith(sweep_param_key):
# run the parsing method for this parameter
# and add it to the actually tunable params
self.tunable_params[param_key] = tunable_parsing_methods[sweep_param_key](param_value)
break
else:
# if nothing matches, let's consider this a fixed param
self.fixed_params[param_key] = self._parse_number(param_value)
self.logger.debug(f"found tunable/sweep params: {self.tunable_params}")
self.logger.debug(f"found fixed/const params: {self.fixed_params}")
return self.tunable_params, self.fixed_params
def parse_from_argparse(self, args):
"""Parses parameters provided as an argparse namespace.
Args:
args (argparse.Namespace)
Returns:
tunable_params (dict): all sweep parameters from parameter_dict, constructed as sweep sdk objects
fixed_params (dict): all fixed / constant parameters from parameter_dict
"""
self.logger.debug(f"parsing sweep params from argparse namespace {args}")
return self.parse_from_dict(vars(args))
def get_tunable_params(self):
""" Returns sweep params parsed from last call to parse function. """
return self.tunable_params
def get_fixed_params(self):
""" Returns fixed params parsed from last call to parse function. """
return self.fixed_params
"""
BELOW: PARSING ALL SWEEP EXPRESSIONS
see https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters
choice,
quniform,
qloguniform,
qnormal,
qlognormal,
uniform,
loguniform,
normal,
lognormal
"""
@classmethod
def _parse_number(cls, number_as_str):
"""Guesses the type of the argument"""
number_as_str = number_as_str.strip(" ") # remove space at beginning and end
for type_class in [int, float]:
try:
value = type_class(number_as_str)
return value
except:
pass
else:
return number_as_str
@classmethod
def _parse_choice(cls, parameter_as_str):
choice_pattern = r"choice\(([0-9\. ,]+)\)"
matched_choice = re.match(choice_pattern, parameter_as_str)
if not matched_choice:
raise Exception(f"Could not match required format {choice_pattern} in expression {parameter_as_str}")
choice_entry_pattern = r"[0-9\. ]+"
choice_list = re.findall(choice_entry_pattern, matched_choice.group(1))
if not choice_list:
raise Exception(f"Could not match required list of choices with pattern {choice_entry_pattern} in expression {matched_choice.group(1)}")
return choice(
*[cls._parse_number(group) for group in choice_list]
)
@classmethod
def _parse_quniform(cls, parameter_as_str):
return cls._parse_match(
parameter_as_str,
r"quniform\(([0-9\. ]+),([0-9\. ]+),([0-9\. ]+)\)",
quniform
)
@classmethod
def _parse_qloguniform(cls, parameter_as_str):
return cls._parse_match(
parameter_as_str,
r"qloguniform\(([0-9\. ]+),([0-9\. ]+),([0-9\. ]+)\)",
qloguniform
)
@classmethod
def _parse_qnormal(cls, parameter_as_str):
return cls._parse_match(
parameter_as_str,
r"qnormal\(([0-9\. ]+),([0-9\. ]+),([0-9\. ]+)\)",
qnormal
)
@classmethod
def _parse_qlognormal(cls, parameter_as_str):
return cls._parse_match(
parameter_as_str,
r"qlognormal\(([0-9\. ]+),([0-9\. ]+),([0-9\. ]+)\)",
qlognormal
)
@classmethod
def _parse_uniform(cls, parameter_as_str):
return cls._parse_match(
parameter_as_str,
r"uniform\(([0-9\. ]+),([0-9\. ]+)\)",
uniform
)
@classmethod
def _parse_loguniform(cls, parameter_as_str):
return cls._parse_match(
parameter_as_str,
r"loguniform\(([0-9\. ]+),([0-9\. ]+)\)",
loguniform
)
@classmethod
def _parse_normal(cls, parameter_as_str):
return cls._parse_match(
parameter_as_str,
r"normal\(([0-9\. ]+),([0-9\. ]+)\)",
normal
)
@classmethod
def _parse_lognormal(cls, parameter_as_str):
return cls._parse_match(
parameter_as_str,
r"lognormal\(([0-9\. ]+),([0-9\. ]+)\)",
lognormal
)
@classmethod
def _parse_match(cls, parameter_as_str, pattern, matched_class):
matched_param = re.match(pattern, parameter_as_str)
if matched_param:
return matched_class(
*[cls._parse_number(group) for group in matched_param.groups()]
)
else:
raise Exception(f"Could not match required format {pattern} in expression {parameter_as_str}")
|
microsoft/lightgbm-benchmark | src/common/data.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Helper code to handle/process data
"""
import numpy as np
# derived from https://github.com/scikit-learn/scikit-learn/blob/7e1e6d09bcc2eaeba98f7e737aac2ac782f0e5f1/sklearn/datasets/_samples_generator.py#L506
# for generating data in batches
class RegressionDataGenerator():
"""Generator for regression data"""
def __init__(self,
batch_size: int,
n_features: int,
n_informative: int,
bias: float,
noise: float,
seed: int):
"""Initializes the generator"""
self.batch_size = batch_size
self.n_features = n_features
self.n_informative = min(n_features, n_informative)
self.bias = bias
self.noise = noise
self.generator = np.random.RandomState(seed)
# Generate a ground truth model with only n_informative features being non-zeros
self.ground_truth = np.zeros((self.n_features, 1))
self.ground_truth[:n_informative, :] = 100 * self.generator.rand(self.n_informative, 1)
def generate(self, partition_count=0):
"""Generate one batch of data.
Returns:
X (numpy.ndarray)
y (numpy.ndarray)
"""
# Randomly generate a well conditioned input set
X = self.generator.randn(self.batch_size, self.n_features)
y = np.dot(X, self.ground_truth) + self.bias
# Add noise
if self.noise > 0.0:
y += self.generator.normal(scale=self.noise, size=y.shape)
y = np.squeeze(y)
return X, y
class ClassificationDataGenerator():
"""Generator for regression data"""
def __init__(self,
n_label_classes: int,
batch_size: int,
n_features: int,
n_informative: int,
bias: float,
noise: float,
seed: int):
"""Initializes the generator"""
self.n_label_classes = n_label_classes
self.batch_size = batch_size
self.n_features = n_features
self.n_informative = min(n_features, n_informative)
self.bias = bias
self.noise = noise
self.generator = np.random.RandomState(seed)
# Generate a ground truth model with only n_informative features being non-zeros
self.ground_truth = np.zeros((self.n_features, 1))
self.ground_truth[:n_informative, :] = 100 * self.generator.rand(self.n_informative, 1)
def generate(self, partition_count=0):
"""Generate one batch of data.
Returns:
X (numpy.ndarray)
y (numpy.ndarray)
"""
# Randomly generate a well conditioned input set
X = self.generator.randn(self.batch_size, self.n_features)
y = np.dot(X, self.ground_truth) + self.bias
# Add noise
if self.noise > 0.0:
y += self.generator.normal(scale=self.noise, size=y.shape)
# create n_label_classes ranking labels
y = ((y - min(y))/(max(y)-min(y))*self.n_label_classes).astype(int)
y = np.squeeze(y)
return X, y
class RankingDataGenerator():
"""Generator for regression data"""
def __init__(self,
docs_per_query: int,
n_label_classes: int,
batch_size: int,
n_features: int,
n_informative: int,
bias: float,
noise: float,
seed: int):
"""Initializes the generator"""
self.docs_per_query = docs_per_query
self.n_label_classes = n_label_classes
self.batch_size = batch_size
self.n_features = n_features
self.n_informative = min(n_features, n_informative)
self.bias = bias
self.noise = noise
self.generator = np.random.RandomState(seed)
# Generate a ground truth model with only n_informative features being non-zeros
self.ground_truth = np.zeros((self.n_features, 1))
self.ground_truth[:n_informative, :] = 100 * self.generator.rand(self.n_informative, 1)
def generate(self, partition_count=0):
"""Generate one batch of data.
Returns:
X (numpy.ndarray)
y (numpy.ndarray)
"""
# Randomly generate a well conditioned input set
X = self.generator.randn(self.batch_size, self.n_features)
y = np.dot(X, self.ground_truth) + self.bias
base_count = max(self.docs_per_query, 1000) * (partition_count + 1)
# add query column
query_col = [[(i // self.docs_per_query) + base_count] for i in range(self.batch_size)]
X = np.hstack((query_col, X))
# Add noise
if self.noise > 0.0:
y += self.generator.normal(scale=self.noise, size=y.shape)
# create n_label_classes ranking labels
y = ((y - min(y))/(max(y)-min(y))*self.n_label_classes).astype(int)
y = np.squeeze(y)
return X, y |
microsoft/lightgbm-benchmark | src/scripts/analysis/analyze.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
TreeLite/Python inferencing script
"""
import os
import sys
import re
import json
import argparse
import logging
from distutils.util import strtobool
from jinja2 import Template
import mlflow
import pandas as pd
import numpy as np
# Add the right path to PYTHONPATH
# so that you can import from common.*
COMMON_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
if COMMON_ROOT not in sys.path:
logging.debug(f"Adding {COMMON_ROOT} to PYTHONPATH")
sys.path.append(str(COMMON_ROOT))
# useful imports from common
from common.metrics import MetricsLogger
from common.io import input_file_path
from shrike.pipeline.aml_connect import add_cli_args, azureml_connect_cli
def get_arg_parser(parser=None):
"""Adds component/module arguments to a given argument parser.
Args:
parser (argparse.ArgumentParser): an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add arguments that are specific to the script
if parser is None:
parser = argparse.ArgumentParser(__doc__)
group_exp = parser.add_argument_group("MLFlow Experiment")
group_exp.add_argument("--experiment-id", dest="experiment_id",
required=True, type=str)
group_exp.add_argument("--benchmark-id", dest="benchmark_id",
required=True, type=str)
group_aml = parser.add_argument("--mlflow-target", dest="mlflow_target", required=False, type=str, choices=['azureml', 'local'], default='local')
group_data = parser.add_argument_group("Data operations")
group_data.add_argument("--data-load", dest="data_load",
required=False, default=None, type=str, help="path to file export benchmark data"
)
group_data.add_argument("--data-save", dest="data_save",
required=False, default=None, type=str, help="path to file read benchmark data"
)
group_analysis = parser.add_argument_group("Analysis parameters")
group_analysis.add_argument(
"--template",
required=True,
type=str,
choices=['inferencing'],
help="which analysis template to use"
)
group_analysis.add_argument(
"--output",
required=False,
default=None,
type=str,
help="Path to write report in markdown"
)
group_analysis.add_argument(
"--verbose",
required=False,
default=False,
type=strtobool, # use this for bool args, do not use action_store=True
help="set True to show DEBUG logs",
)
group_aml = parser.add_argument_group("AzureML Connect (if using --mlflow-target azureml)")
add_cli_args(group_aml)
return parser
class AnalysisEngine():
"""
Class to run the analysis of multiple AzureML runs
and generate a benchmark report
"""
def __init__(self):
""" Constructor """
# list to store lines of data obtained from AzureML runs
self.benchmark_data = None
self.variants = None
self.models = None
self.datasets = None
# location of the jinja templates to generate reports
self.templates_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "templates"))
self.logger = logging.getLogger(__name__)
def load_benchmark_data(self, file_path):
""" Loads the previously saved benchmark data (and skip fetching) """
# reset internal data list
self.benchmark_data = []
# read line by line json
self.benchmark_data = pd.read_json(file_path)
def save_benchmark_data(self, file_path):
""" Saves the fetched benchmark data into a file """
# create output directory
os.makedirs(os.path.dirname(file_path), exist_ok=True)
# write data line by line in json
self.benchmark_data.to_json(file_path)
def fetch_benchmark_data(self, experiment_id, filter_string):
""" Gets the data from fetching AzureML runs with a given set of filters """
self.logger.info("Fetching Experiment")
mlflow.set_experiment(experiment_id)
self.logger.info("Fetching Benchmark Runs")
# NOTE: returns a pandas dataframe
self.benchmark_data = mlflow.search_runs(
filter_string=filter_string
)
# extract all model information if present
if 'tags.benchmark_model' in self.benchmark_data.columns:
models = self.benchmark_data[['tags.benchmark_model']].drop_duplicates()
model_info = models['tags.benchmark_model'].str.extract(r"model-([a-zA-Z0-9]+)-([a-zA-Z0-9]+)-([0-9]+)cols-([0-9]+)trees-([0-9]+)leaves")
model_info.columns = ['model_origin', 'model_task', 'model_columns', 'model_trees', 'model_leaves']
models = models.join(model_info)
self.benchmark_data = pd.merge(self.benchmark_data, models, how='left', on='tags.benchmark_model')
#print("*** MODELS ***")
#print(models.to_markdown())
# extract all dataset information if present
if 'tags.benchmark_dataset' in self.benchmark_data.columns:
datasets = self.benchmark_data[['tags.benchmark_dataset']].drop_duplicates()
dataset_info = datasets['tags.benchmark_dataset'].str.extract(r"data-([a-zA-Z0-9]+)-([a-zA-Z0-9]+)-([0-9]+)cols-([0-9]+)samples-([a-zA-Z0-9]+)")
dataset_info.columns = ['dataset_origin', 'dataset_task', 'dataset_columns', 'dataset_samples', 'dataset_benchmark_task']
datasets = datasets.join(dataset_info)
self.benchmark_data = pd.merge(self.benchmark_data, datasets, how='left', on='tags.benchmark_dataset')
#print("*** DATASETS ***")
#print(datasets.to_markdown())
return self.benchmark_data
def report_inferencing(self, output_path):
""" Uses fetched or load data to produce a reporting for inferencing tasks. """
# create variant readable id
self.benchmark_data['variant_id'] = self.benchmark_data['tags.framework'] + "#" + self.benchmark_data['tags.variant_index']
# extract variants
variants = self.benchmark_data[
[
# select variant specific columns/tags
'variant_id',
'tags.variant_index',
'tags.framework',
'tags.framework_version',
'tags.framework_build',
'tags.cpu_count',
'params.num_threads',
'tags.machine',
'tags.system'
]
].drop_duplicates().set_index('variant_id').sort_values(by='tags.variant_index')
# get a list of variant_id ordered by tags.variant_index
variant_indices = (
self.benchmark_data[['tags.variant_index', 'variant_id']]
.drop_duplicates()
.set_index('tags.variant_index')
.to_dict()
)['variant_id']
variant_indices_sorted_keys = sorted(list(variant_indices.keys()))
variant_indices_sorted = [ variant_indices[k] for k in variant_indices_sorted_keys ]
variants.columns = ['index', 'framework', 'version', 'build', 'cpu count', 'num threads', 'machine', 'system']
#variants = variants.transpose()
# reduce time_inferencing to predict time per request, in micro seconds
self.benchmark_data['avg_predict_time_usecs'] = self.benchmark_data['metrics.time_inferencing'].astype(float) / self.benchmark_data['dataset_samples'].astype(int) * 1000000
# create a readable name for each task configuration
self.benchmark_data['inferencing task config'] = (
self.benchmark_data['model_trees'] + " trees<br/>"
+ self.benchmark_data['model_leaves'] + " leaves<br/>"
+ self.benchmark_data['model_columns'] + " cols"
)
# pivot metrics table
metrics = self.benchmark_data.pivot(
index=['inferencing task config'],
columns=['variant_id'],
values=['avg_predict_time_usecs']
)
# rename columns to have only variant_id
metrics.columns = [ col[1] for col in metrics.columns ]
metrics = metrics[variant_indices_sorted] # order columns by increasing tags.variant_index
percentile_metrics_reports = []
for variant_id in variant_indices_sorted:
percentile_metrics_values = (
self.benchmark_data.loc[self.benchmark_data['variant_id'] == variant_id][[
'inferencing task config',
'variant_id',
'metrics.batch_time_inferencing_p50_usecs',
'metrics.batch_time_inferencing_p90_usecs',
'metrics.batch_time_inferencing_p99_usecs'
]]
).dropna()
if len(percentile_metrics_values) == 0:
continue
percentile_metrics = (
percentile_metrics_values.pivot(
index=['inferencing task config'],
columns=['variant_id'],
values=['metrics.batch_time_inferencing_p50_usecs', 'metrics.batch_time_inferencing_p90_usecs', 'metrics.batch_time_inferencing_p99_usecs']
)
)
percentile_metrics.columns = [ col[0].lstrip("metrics.batch_time_inferencing_") for col in percentile_metrics.columns ]
percentile_metrics_reports.append(
{
'variant_id' : variant_id,
'report' : percentile_metrics.to_markdown()
}
)
# load the jinja template from local files
with open(os.path.join(self.templates_dir, "inferencing.md"), "r") as i_file:
template_str = i_file.read()
# use jinja Template
template_obj = Template(template_str)
# render the template
rendered_report = template_obj.render(
variants_table=variants.to_markdown(),
metrics_table=metrics.to_markdown(),
percentile_metrics_reports=percentile_metrics_reports
)
# save or print
if output_path:
with open(output_path, "w") as o_file:
o_file.write(rendered_report)
else:
print(rendered_report)
def run(args, unknown_args=[]):
"""Run script with arguments (the core of the component)
Args:
args (argparse.namespace): command line arguments provided to script
unknown_args (list[str]): list of arguments not known
"""
# get logger for general outputs
logger = logging.getLogger()
analysis_engine = AnalysisEngine()
if args.template == 'inferencing':
if args.data_load:
analysis_engine.load_benchmark_data(args.data_load)
else:
if args.mlflow_target == 'azureml':
# use helper to connect to AzureML
print("Connecting to AzureML...")
ws = azureml_connect_cli(args)
mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())
elif args.mlflow_target == 'local':
pass # nothing to do here
else:
raise NotImplementedError(f"--mlflow-target {args.mlflow_target} is not implemented (yet)")
# querying runs for specific filters
analysis_engine.fetch_benchmark_data(
experiment_id=args.experiment_id,
filter_string=f"tags.task = 'score' and tags.benchmark_name = '{args.benchmark_id}'"
)
if args.data_save:
analysis_engine.save_benchmark_data(args.data_save)
analysis_engine.report_inferencing(args.output)
else:
raise NotImplementedError(f"Analysis template {args.template} does not exist (yet?)")
def main(cli_args=None):
"""Component main function, parses arguments and executes run() function.
Args:
cli_args (List[str], optional): list of args to feed script, useful for debugging. Defaults to None.
"""
# initialize root logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
# construct arg parser and parse arguments
parser = get_arg_parser()
args, unknown_args = parser.parse_known_args(cli_args)
logger.setLevel(logging.DEBUG if args.verbose else logging.INFO)
# run the actual thing
run(args, unknown_args)
if __name__ == "__main__":
main()
|
microsoft/lightgbm-benchmark | tests/common/test_lightgbm_utils.py | """Tests src/common/io.py"""
import os
import pytest
from unittest.mock import call, Mock, patch
from common.lightgbm_utils import LightGBMCallbackHandler
from lightgbm.callback import CallbackEnv
def test_lightgbm_callback_handler():
metrics_logger = Mock()
callback_handler = LightGBMCallbackHandler(
metrics_logger, metrics_prefix=None, metrics_suffix=None
)
# namedtuple
# see https://lightgbm.readthedocs.io/en/latest/_modules/lightgbm/callback.html
callback_env = CallbackEnv(
None, # model
{"foo_param": 0.32}, # params
3, # iteration
0, # begin_iteration
5, # end_iteration
[
# list of tuples
(
"valid_0", # dataset name
"rmse", # evaluation name
12345.0, # result
None, # _
),
(
"valid_0", # dataset name
"l2", # evaluation name
3456.0, # result
None, # _
)
]
)
callback_handler.callback(callback_env)
metrics_logger.log_metric.assert_has_calls(
[
call(key="valid_0.rmse", value=12345.0, step=3),
call(key="valid_0.l2", value=3456.0, step=3)
],
any_order=True
)
|
microsoft/lightgbm-benchmark | tests/scripts/test_generate_data.py | <filename>tests/scripts/test_generate_data.py
"""
Executes the series of scripts end-to-end
to test LightGBM (python) manual benchmark
"""
import os
import sys
import tempfile
import pytest
from unittest.mock import patch
from scripts.data_processing.generate_data import generate
def test_generate_regression_data(temporary_dir):
"""Tests src/scripts/data_processing/generate_data/generate.py"""
task_type = "regression"
output_train = os.path.join(temporary_dir, task_type, "train")
output_test = os.path.join(temporary_dir, task_type, "test")
output_inference = os.path.join(temporary_dir, task_type, "inference")
output_header = os.path.join(temporary_dir, task_type, "header")
# create test arguments for the script
script_args = [
"generate.py",
"--train_samples", "100",
"--train_partitions", "4",
"--test_samples", "10",
"--test_partitions", "2",
"--inferencing_samples", "100",
"--inferencing_partitions", "1",
"--n_features", "40",
"--n_informative", "10",
"--random_state", "5",
"--output_train", output_train,
"--output_test", output_test,
"--output_inference", output_inference,
"--output_header", output_header,
"--type", task_type,
]
if task_type == "classification":
script_args.extend(["--n_redundant", "5"])
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
generate.main()
# test expected outputs
assert os.path.isfile(
os.path.join(output_train, "train_0.txt")
), "Script generate.py should generate train_0.txt under --output dir but did not"
assert os.path.isfile(
os.path.join(output_train, "train_1.txt")
), "Script generate.py should generate train_1.txt under --output dir but did not"
assert os.path.isfile(
os.path.join(output_train, "train_2.txt")
), "Script generate.py should generate train_2.txt under --output dir but did not"
assert os.path.isfile(
os.path.join(output_train, "train_3.txt")
), "Script generate.py should generate train_3.txt under --output dir but did not"
assert not os.path.isfile(
os.path.join(output_train, "train_4.txt")
), "Script generate.py should NOT generate train_4.txt under --output dir but DID"
assert os.path.isfile(
os.path.join(output_test, "test_0.txt")
), "Script generate.py should generate test_0.txt under --output dir but did not"
assert os.path.isfile(
os.path.join(output_test, "test_1.txt")
), "Script generate.py should generate test_1.txt under --output dir but did not"
assert not os.path.isfile(
os.path.join(output_test, "test_2.txt")
), "Script generate.py should NOT generate test_2.txt under --output dir but DID"
assert os.path.isfile(
os.path.join(output_inference, "inference_0.txt")
), "Script generate.py should generate inference_0.txt under --output dir but did not"
assert not os.path.isfile(
os.path.join(output_inference, "inference_1.txt")
), "Script generate.py should NOT generate inference_1.txt under --output dir but DID"
assert os.path.isfile(
os.path.join(output_header, "header.txt")
), "Script generate.py should generate inference_0.txt under --output dir but did not"
def test_generate_classification_data(temporary_dir):
"""Tests src/scripts/data_processing/generate_data/generate.py"""
task_type = "classification"
output_train = os.path.join(temporary_dir, task_type, "train")
output_test = os.path.join(temporary_dir, task_type, "test")
output_inference = os.path.join(temporary_dir, task_type, "inference")
output_header = os.path.join(temporary_dir, task_type, "header")
# create test arguments for the script
script_args = [
"generate.py",
"--train_samples", "100",
"--test_samples", "10",
"--inferencing_samples", "100",
"--n_features", "40",
"--n_informative", "10",
"--random_state", "5",
"--output_train", output_train,
"--output_test", output_test,
"--output_inference", output_inference,
"--output_header", output_header,
"--type", task_type,
]
if task_type == "classification":
script_args.extend(["--n_redundant", "5"])
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
generate.main()
# test expected outputs
assert os.path.isfile(
os.path.join(output_train, "train_0.txt")
), "Script generate.py should generate train_0.txt under --output dir but did not"
assert not os.path.isfile(
os.path.join(output_train, "train_1.txt")
), "Script generate.py should NOT generate train_1.txt under --output dir but DID"
assert os.path.isfile(
os.path.join(output_test, "test_0.txt")
), "Script generate.py should generate test_0.txt under --output dir but did not"
assert not os.path.isfile(
os.path.join(output_test, "test_1.txt")
), "Script generate.py should NOT generate test_1.txt under --output dir but DID"
assert os.path.isfile(
os.path.join(output_inference, "inference_0.txt")
), "Script generate.py should generate inference_0.txt under --output dir but did not"
assert not os.path.isfile(
os.path.join(output_inference, "inference_1.txt")
), "Script generate.py should NOT generate inference_1.txt under --output dir but DID"
assert os.path.isfile(
os.path.join(output_header, "header.txt")
), "Script generate.py should generate inference_0.txt under --output dir but did not"
def test_generate_ranking_data(temporary_dir):
"""Tests src/scripts/data_processing/generate_data/generate.py"""
task_type = "lambdarank"
output_train = os.path.join(temporary_dir, task_type, "train")
output_test = os.path.join(temporary_dir, task_type, "test")
output_inference = os.path.join(temporary_dir, task_type, "inference")
output_header = os.path.join(temporary_dir, task_type, "header")
# create test arguments for the script
script_args = [
"generate.py",
"--train_samples", "100",
"--train_partitions", "4",
"--test_samples", "10",
"--test_partitions", "2",
"--inferencing_samples", "100",
"--inferencing_partitions", "1",
"--n_features", "40",
"--n_informative", "10",
"--random_state", "5",
"--output_train", output_train,
"--output_test", output_test,
"--output_inference", output_inference,
"--output_header", output_header,
"--type", task_type,
]
if task_type == "classification":
script_args.extend(["--n_redundant", "5"])
if task_type == "lambdarank":
script_args.extend(["--docs_per_query", "5"])
script_args.extend(["--n_label_classes", "3"])
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
generate.main()
# test expected outputs
assert os.path.isfile(
os.path.join(output_train, "train_0.txt")
), "Script generate.py should generate train_0.txt under --output dir but did not"
assert os.path.isfile(
os.path.join(output_train, "train_1.txt")
), "Script generate.py should generate train_1.txt under --output dir but did not"
assert os.path.isfile(
os.path.join(output_train, "train_2.txt")
), "Script generate.py should generate train_2.txt under --output dir but did not"
assert os.path.isfile(
os.path.join(output_train, "train_3.txt")
), "Script generate.py should generate train_3.txt under --output dir but did not"
assert not os.path.isfile(
os.path.join(output_train, "train_4.txt")
), "Script generate.py should NOT generate train_4.txt under --output dir but DID"
assert os.path.isfile(
os.path.join(output_test, "test_0.txt")
), "Script generate.py should generate test_0.txt under --output dir but did not"
assert os.path.isfile(
os.path.join(output_test, "test_1.txt")
), "Script generate.py should generate test_1.txt under --output dir but did not"
assert not os.path.isfile(
os.path.join(output_test, "test_2.txt")
), "Script generate.py should NOT generate test_2.txt under --output dir but DID"
assert os.path.isfile(
os.path.join(output_inference, "inference_0.txt")
), "Script generate.py should generate inference_0.txt under --output dir but did not"
assert not os.path.isfile(
os.path.join(output_inference, "inference_1.txt")
), "Script generate.py should NOT generate inference_1.txt under --output dir but DID"
assert os.path.isfile(
os.path.join(output_header, "header.txt")
), "Script generate.py should generate header.txt under --output dir but did not" |
microsoft/lightgbm-benchmark | src/scripts/data_processing/lightgbm_data2bin/data2bin.py | <filename>src/scripts/data_processing/lightgbm_data2bin/data2bin.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
LightGBM/Python dataset saving to binary
"""
import os
import sys
import argparse
import logging
import traceback
import json
from distutils.util import strtobool
import lightgbm
# Add the right path to PYTHONPATH
# so that you can import from common.*
COMMON_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
if COMMON_ROOT not in sys.path:
sys.path.append(str(COMMON_ROOT))
# useful imports from common
from common.components import RunnableScript
from common.io import input_file_path, get_all_files
class LightGBMData2BinScript(RunnableScript):
def __init__(self):
super().__init__(
task="data2bin",
framework="lightgbm_python",
framework_version=lightgbm.__version__
)
@classmethod
def get_arg_parser(cls, parser=None):
"""Adds component/module arguments to a given argument parser.
Args:
parser (argparse.ArgumentParser): an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add generic arguments
parser = RunnableScript.get_arg_parser(parser)
group_i = parser.add_argument_group("Input Data")
group_i.add_argument("--train",
required=True, type=input_file_path, help="Training data location (file path or dir path with unique file)")
group_i.add_argument("--test",
required=True, type=str, help="Testing data location (file or dir path with multiple files)")
group_i.add_argument("--header", required=False, default=False, type=strtobool)
group_i.add_argument("--label_column", required=False, default="0", type=str)
group_i.add_argument("--group_column", required=False, default=None, type=str)
group_o = parser.add_argument_group("Outputs")
group_o.add_argument("--output_train",
required=False, type=str, help="export binary train data (folder)")
group_o.add_argument("--output_test",
required=False, type=str, help="export binary test data (folder)")
# learner params
group_lgbm = parser.add_argument_group("LightGBM Dataset parameters")
group_lgbm.add_argument("--max_bin", required=True, type=int)
group_lgbm.add_argument("--custom_params", required=False, type=str, default=None)
return parser
def load_lgbm_params_from_cli(self, args):
"""Gets the right LightGBM parameters from argparse + mpi config
Args:
args (argparse.Namespace)
Returns:
lgbm_params (dict)
"""
# copy all parameters from argparse
cli_params = dict(vars(args))
# removing arguments that are purely CLI
for key in ['verbose', 'custom_properties', 'output_train', 'output_test', 'test', 'train', 'custom_params']:
del cli_params[key]
# doing some fixes and hardcoded values
lgbm_params = cli_params
lgbm_params['verbose'] = 2
lgbm_params['header'] = bool(args.header) # strtobool returns 0 or 1, lightgbm needs actual bool
# process custom params
if args.custom_params:
custom_params = json.loads(args.custom_params)
lgbm_params.update(custom_params)
return lgbm_params
def run(self, args, logger, metrics_logger, unknown_args):
"""Run script with arguments (the core of the component)
Args:
args (argparse.namespace): command line arguments provided to script
logger (logging.getLogger() for this script)
metrics_logger (common.metrics.MetricLogger)
unknown_args (list[str]): list of arguments not recognized during argparse
"""
# figure out the lgbm params from cli args
lgbm_params = self.load_lgbm_params_from_cli(args)
# make sure the output argument exists
os.makedirs(args.output_train, exist_ok=True)
args.output_train = os.path.join(args.output_train, "train.bin")
os.makedirs(args.output_test, exist_ok=True)
# log lgbm parameters
logger.info(f"LGBM Params: {lgbm_params}")
metrics_logger.log_parameters(**lgbm_params)
# register logger for lightgbm logs
lightgbm.register_logger(logger)
logger.info(f"Loading data for training")
with metrics_logger.log_time_block("time_data_loading.train"):
# construct dataset
train_data = lightgbm.Dataset(args.train, params=lgbm_params).construct()
# capture data shape in metrics
metrics_logger.log_metric(key="train_data.length", value=train_data.num_data())
metrics_logger.log_metric(key="train_data.width", value=train_data.num_feature())
with metrics_logger.log_time_block("time_data_saving.train"):
# construct dataset
train_data.save_binary(args.output_train)
with metrics_logger.log_time_block("time_data_loading.test"):
# construct dataset
test_data_paths = get_all_files(args.test)
val_datasets = [
train_data.create_valid(test_data_path).construct() for test_data_path in test_data_paths
]
# capture data shape in metrics
for index, valid_data in enumerate(val_datasets):
metrics_logger.log_metric(key=f"test_data.valid_{index}.length", value=valid_data.num_data())
metrics_logger.log_metric(key=f"test_data.valid_{index}.width", value=valid_data.num_feature())
with metrics_logger.log_time_block("time_data_saving.test"):
# construct dataset
for index, valid_data in enumerate(val_datasets):
valid_data.save_binary(os.path.join(args.output_test, f"test_{index}.bin"))
def get_arg_parser(parser=None):
""" To ensure compatibility with shrike unit tests """
return LightGBMData2BinScript.get_arg_parser(parser)
def main(cli_args=None):
""" To ensure compatibility with shrike unit tests """
LightGBMData2BinScript.main(cli_args)
if __name__ == "__main__":
main()
|
microsoft/lightgbm-benchmark | src/pipelines/azureml/data_generation.py | <filename>src/pipelines/azureml/data_generation.py
"""
Generates synthetic data with multiple parameters.
See config file /conf/experiments/data-generation.yaml
to execute:
> python src/pipelines/azureml/data_generation.py --exp-config conf/experiments/data-generation.yaml
"""
# pylint: disable=no-member
# NOTE: because it raises 'dict' has no 'outputs' member in dsl.pipeline construction
import os
import sys
import json
import logging
import argparse
# config management
from dataclasses import dataclass
from omegaconf import OmegaConf, MISSING
from typing import Optional, List
# AzureML
from azure.ml.component import Component
from azure.ml.component import dsl
# when running this script directly, needed to import common
LIGHTGBM_REPO_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
SCRIPTS_SOURCES_ROOT = os.path.join(LIGHTGBM_REPO_ROOT, 'src')
if SCRIPTS_SOURCES_ROOT not in sys.path:
logging.info(f"Adding {SCRIPTS_SOURCES_ROOT} to path")
sys.path.append(str(SCRIPTS_SOURCES_ROOT))
from common.tasks import data_generation_task
from common.pipelines import (
parse_pipeline_config,
azureml_connect,
pipeline_submit,
COMPONENTS_ROOT
)
### CONFIG DATACLASS ###
# Step 1 : to configure your pipeline, add all your fields inside a
# properly defined dataclass, pipeline_cli_main will figure out how
# to read that config from a given yaml file + hydra override commands
@dataclass
class data_generation_config: # pylint: disable=invalid-name
""" Config object constructed as a dataclass.
The name of this class will be used as namespace in your config yaml file.
"""
# NOTE: all those values are REQUIRED in your yaml config file
benchmark_name: str = MISSING
tasks: List[data_generation_task] = MISSING
# OUTPUT REGISTRATION
register_outputs: bool = False
register_outputs_prefix: str = "synthetic"
### PIPELINE COMPONENTS ###
# Step 2 : your pipeline consists in assembling components
# load those components from local yaml specifications
# use COMPONENTS_ROOT as base folder
generate_data_component = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "data_processing", "generate_data", "spec.yaml"))
### DATA GENERATION PIPELINE ###
# Step 3: your pipeline consists in creating a python function
# decorated with @dsl.pipeline.
# You can create as many subgraphs as you want,
# but `pipeline_cli_main` will need one pipeline function
# taking a single config argument, not a pipeline parameter.
@dsl.pipeline(
name="generate_all_datasets", # pythonic name
non_pipeline_parameters=["config"] # required to use config object
)
def data_generation_main_pipeline_function(config):
"""Pipeline's main building function.
Args:
config (DictObject): the pipeline configuration object containing pipeline config dataclass
This is not an actual pipeline parameter
Returns:
None
"""
benchmark_custom_properties = json.dumps({
'benchmark_name' : config.data_generation_config.benchmark_name
})
# for each task provided in the general config
for generation_task in config.data_generation_config.tasks:
# run a generation step with the right parameters
generate_data_step = generate_data_component(
learning_task = generation_task.task,
train_samples = generation_task.train_samples,
train_partitions = generation_task.train_partitions,
test_samples = generation_task.test_samples,
test_partitions = generation_task.test_partitions,
inferencing_samples = generation_task.inferencing_samples,
inferencing_partitions = generation_task.inferencing_partitions,
n_features = generation_task.n_features,
n_informative = generation_task.n_informative,
n_label_classes = generation_task.n_label_classes,
docs_per_query = generation_task.docs_per_query,
delimiter = generation_task.delimiter,
random_state = 5,
verbose = False,
custom_properties = benchmark_custom_properties
)
# run it on the right compute target
generate_data_step.runsettings.configure(target=config.compute.linux_cpu)
# if config asks to register the outputs automatically...
if config.data_generation_config.register_outputs:
# create a prefix for the dataset
dataset_prefix = "{prefix}-{task}-{cols}cols".format(
prefix=config.data_generation_config.register_outputs_prefix,
task=generation_task.task,
cols=generation_task.n_features
)
# register each output (train, test, inference)
generate_data_step.outputs.output_train.register_as(
name=f"{dataset_prefix}-{generation_task.train_samples}samples-train",
create_new_version=True,
tags={ # add tags that will show up in AzureML
'type':'train',
'task':generation_task.task,
'origin':'synthetic',
'samples':generation_task.train_samples,
'features':generation_task.n_features,
'informative':generation_task.n_informative
}
)
generate_data_step.outputs.output_test.register_as(
name=f"{dataset_prefix}-{generation_task.test_samples}samples-test",
create_new_version=True,
tags={ # add tags that will show up in AzureML
'type':'test',
'task':generation_task.task,
'origin':'synthetic',
'samples':generation_task.test_samples,
'features':generation_task.n_features,
'informative':generation_task.n_informative
}
)
generate_data_step.outputs.output_inference.register_as(
name=f"{dataset_prefix}-{generation_task.inferencing_samples}samples-inference",
create_new_version=True,
tags={ # add tags that will show up in AzureML
'type':'inference',
'task':generation_task.task,
'origin':'synthetic',
'samples':generation_task.inferencing_samples,
'features':generation_task.n_features,
'informative':generation_task.n_informative
}
)
### MAIN BLOCK ###
# Step 4: implement main block using helper functions
def main():
# use parse helper function to get arguments from CLI
config = parse_pipeline_config(data_generation_config)
# you'll need a workspace object to connect
workspace = azureml_connect(config)
# run the pipeline function with the given arguments
pipeline_instance = data_generation_main_pipeline_function(config)
# generate a nice markdown description
experiment_description="\n".join([
"Generating synthetic datasets (see yaml below).",
"```yaml",
"data_generation_config:",
OmegaConf.to_yaml(config.data_generation_config),
"```"
])
# validate/submit the pipeline (if run.submit=True)
pipeline_submit(
workspace,
config,
pipeline_instance,
experiment_description=experiment_description
)
if __name__ == "__main__":
main()
|
microsoft/lightgbm-benchmark | tests/common/test_metrics.py | """Tests src/common/metrics.py"""
import os
import pytest
from unittest.mock import call, Mock, patch
import time
import platform
import psutil
from common.metrics import MetricsLogger
@patch('mlflow.end_run')
@patch('mlflow.start_run')
def test_unique_mlflow_parallel_initialization(mlflow_start_run_mock, mlflow_end_run_mock):
""" Tests MetricsLogger() unique initialization of mlflow"""
# if open is called twice, we initialize mlflow only once
metrics_logger = MetricsLogger()
metrics_logger_2 = MetricsLogger()
metrics_logger.open()
metrics_logger_2.open()
metrics_logger.close()
metrics_logger_2.close()
mlflow_start_run_mock.assert_called_once()
mlflow_end_run_mock.assert_called_once()
@patch('mlflow.end_run')
@patch('mlflow.start_run')
def test_unique_mlflow_sequence_initialization(mlflow_start_run_mock, mlflow_end_run_mock):
""" Tests MetricsLogger() unique initialization of mlflow"""
# when open/close multiple times in a sequence, we initialize mlflow each time
metrics_logger = MetricsLogger()
metrics_logger.open()
metrics_logger.close()
assert mlflow_start_run_mock.call_count == 1
assert mlflow_end_run_mock.call_count == 1
metrics_logger_2 = MetricsLogger()
metrics_logger_2.open()
metrics_logger_2.close()
assert mlflow_start_run_mock.call_count == 2
assert mlflow_end_run_mock.call_count == 2
@patch('mlflow.log_metric')
def test_metrics_logger_log_metric(mlflow_log_metric_mock):
""" Tests MetricsLogger().log_metric() """
metrics_logger = MetricsLogger()
metrics_logger.open()
metrics_logger.log_metric("foo", "bar", step=16)
metrics_logger.close()
mlflow_log_metric_mock.assert_called_with(
"foo", "bar", step=16
)
@patch('mlflow.log_metric')
def test_metrics_logger_log_metric_with_prefix(mlflow_log_metric_mock):
""" Tests MetricsLogger().log_metric() """
metrics_logger = MetricsLogger(metrics_prefix="foo/")
metrics_logger.open()
metrics_logger.log_metric("foo", "bar", step=16)
metrics_logger.close()
mlflow_log_metric_mock.assert_called_with(
"foo/foo", "bar", step=16
)
@patch('mlflow.log_metric')
def test_mlflow_metrics_logger_log_metric_with_prefix_2sessions(mlflow_log_metric_mock):
""" Tests MetricsLogger().log_metric() """
# when initializing multiple times, each logger has its own prefix
metrics_logger = MetricsLogger(metrics_prefix="foo/")
metrics_logger.open()
metrics_logger_2 = MetricsLogger()
metrics_logger_2.open()
metrics_logger.log_metric("foo", "bar", step=16)
mlflow_log_metric_mock.assert_called_with(
"foo/foo", "bar", step=16
)
metrics_logger_2.log_metric("foo2", "bar2", step=12)
mlflow_log_metric_mock.assert_called_with(
"foo2", "bar2", step=12
)
metrics_logger.close()
metrics_logger_2.close()
@patch('mlflow.log_metric')
def test_metrics_logger_log_metric_too_long(mlflow_log_metric_mock):
""" Tests MetricsLogger().log_metric() """
metrics_logger = MetricsLogger()
metrics_logger.open()
metric_key = "x" * 250
assert len(metric_key), 250
short_metric_key = "x" * 50
assert len(short_metric_key), 50
metrics_logger.log_metric(
metric_key, "bar", step=15
)
metrics_logger.close()
mlflow_log_metric_mock.assert_called_with(
short_metric_key, "bar", step=15
)
def test_metrics_logger_log_metric_non_allowed_chars():
""" Tests MetricsLogger().log_metric() """
test_cases = [
{
'input': "a!@$b%^&c_-/d",
'expected':"abc_-/d"
},
{
'input': "abcd",
'expected':"abcd"
},
{
'input': "node_0/valid_0.ndcg@1",
'expected':"node_0/valid_0.ndcg1"
},
]
for test_case in test_cases:
assert MetricsLogger._remove_non_allowed_chars(test_case['input']) == test_case['expected']
@patch('mlflow.set_tags')
def test_metrics_logger_set_properties(mlflow_set_tags_mock):
""" Tests MetricsLogger().set_properties() """
metrics_logger = MetricsLogger()
metrics_logger.open()
metrics_logger.set_properties(
key1 = "foo",
key2 = 0.45
)
metrics_logger.close()
mlflow_set_tags_mock.assert_called_with(
{ 'key1' : "foo", 'key2' : 0.45 }
)
@patch('mlflow.set_tags')
def test_metrics_logger_set_platform_properties(mlflow_set_tags_mock):
""" Tests MetricsLogger().set_properties() """
metrics_logger = MetricsLogger()
metrics_logger.open()
platform_properties = {
"machine":platform.machine(),
"processor":platform.processor(),
"system":platform.system(),
"system_version":platform.version(),
"cpu_count":os.cpu_count(),
"architecture":"-".join(platform.architecture()),
"platform":platform.platform(),
"cpu_frequency":round(psutil.cpu_freq().current),
"system_memory":round((psutil.virtual_memory().total) / (1024*1024*1024))
}
metrics_logger.set_platform_properties()
metrics_logger.close()
mlflow_set_tags_mock.assert_called_with(
platform_properties
)
@patch('mlflow.set_tags')
def test_metrics_logger_set_properties_from_json(mlflow_set_tags_mock):
""" Tests MetricsLogger().set_properties_from_json() """
metrics_logger = MetricsLogger()
metrics_logger.open()
metrics_logger.set_properties_from_json(
"{ \"key1\" : \"foo\", \"key2\" : 0.45 }"
)
mlflow_set_tags_mock.assert_called_with(
{ 'key1' : "foo", 'key2' : '0.45' }
)
# test failure during json parsing
with pytest.raises(ValueError) as exc_info:
metrics_logger.set_properties_from_json(
"{ 'foo': NOTHING }"
)
# making sure it's the right exception
assert str(exc_info.value).startswith("During parsing of JSON properties")
# test failure if dict is not provided
with pytest.raises(ValueError) as exc_info:
metrics_logger.set_properties_from_json(
"[\"bla\", \"foo\"]"
)
# making sure it's the right exception
assert str(exc_info.value).startswith("Provided JSON properties should be a dict")
metrics_logger.close()
@patch('mlflow.log_param')
def test_metrics_logger_log_parameters(mlflow_log_param_mock):
""" Tests MetricsLogger().log_parameters() """
metrics_logger = MetricsLogger()
metrics_logger.open()
metrics_logger.log_parameters(
key1 = "foo",
key2 = 0.45,
str_way_too_long = ("*" * 1024)
)
metrics_logger.close()
mlflow_log_param_mock.assert_has_calls(
[
call("key1", "foo"),
call("key2", 0.45),
],
any_order=True
)
@patch('mlflow.log_metric')
def test_metrics_logger_log_time_block(mlflow_log_metric_mock):
""" Tests MetricsLogger().log_time_block() """
metrics_logger = MetricsLogger(metrics_prefix="foo_time_block/")
metrics_logger.open()
with metrics_logger.log_time_block("foo_metric", step=2):
time.sleep(0.01)
metrics_logger.close()
# there should be only one call in this case
metric_calls = mlflow_log_metric_mock.call_args_list
assert mlflow_log_metric_mock.call_count == 1
assert len(metric_calls) == 1
# test metric key argument
assert (metric_calls[0].args[0] == "foo_time_block/foo_metric")
assert (metric_calls[0].kwargs["step"] == 2)
@patch('mlflow.log_figure')
@patch('mlflow.log_metric')
def test_log_inferencing_latencies(mlflow_log_metric_mock, mlflow_log_figure_mock):
""" Tests MetricsLogger().log_inferencing_larencies() """
metrics_logger = MetricsLogger()
metrics_logger.open()
test_latencies = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 5.0]
test_batch_sizes = [1, 1, 1, 1, 1, 1, 1, 5]
metrics_logger.log_inferencing_latencies(test_latencies, batch_length=test_batch_sizes, factor_to_usecs=1000000.0)
metrics_logger.close()
#assert mlflow_log_metric_mock.call_args_list == []
mlflow_log_metric_mock.assert_has_calls(
[
call("prediction_batches", 8, step=None), # len(test_latencies)
call("prediction_queries", 12, step=None), # sum(test_batch_sizes)
# reference values based on test_latencies above
call('prediction_latency_avg', 650000.0, step=None),
call('batch_latency_p50_usecs', 450000.0, step=None),
call('batch_latency_p75_usecs', 625000.0, step=None),
call('batch_latency_p90_usecs', 1989999.9999999993, step=None),
call('batch_latency_p95_usecs', 3494999.9999999977, step=None),
call('batch_latency_p99_usecs', 4698999.999999999, step=None),
call('prediction_latency_p50_usecs', 450000.0, step=None),
call('prediction_latency_p75_usecs', 625000.0, step=None),
call('prediction_latency_p90_usecs', 790000.0, step=None),
call('prediction_latency_p95_usecs', 894999.9999999999, step=None),
call('prediction_latency_p99_usecs', 978999.9999999999, step=None),
],
any_order=True
)
# testing logging figures
figure_calls = mlflow_log_figure_mock.call_args_list
assert mlflow_log_figure_mock.call_count == 2
assert len(figure_calls) == 2
# second argument of each call is file name
assert (figure_calls[0].args[1] == "batch_latency_log_histogram.png")
assert (figure_calls[1].args[1] == "prediction_latency_log_histogram.png")
|
microsoft/lightgbm-benchmark | tests/common/test_math.py | """Tests src/common/math.py"""
import os
import pytest
import numpy as np
from common.math import bootstrap_ci
def test_bootstrap_ci_fixed_seed():
"""Testing the bootstrap_ci method, but we can't have a non-deterministic test here. """
sample_data = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 5.0])
operators={
'mean':np.mean,
'p90': (lambda x : np.percentile(x, 90)),
'p99': (lambda x : np.percentile(x, 99)),
}
np.random.seed(404) # fixed const
# because we're fixing the seed, we can actually go deeper
expected_values = {
'mean': (0.30000000000000004, 0.99395, 2.1624999999999996),
'p90': (0.5, 2.36413, 5.0),
'p99': (0.593, 3.469213, 5.0),
}
returned_values = bootstrap_ci(
sample_data,
iterations=1000,
operators=operators,
confidence_level=0.95
)
assert returned_values == expected_values
def test_bootstrap_ci_no_seed():
"""Testing the bootstrap_ci method, but we can't have a non-deterministic test here. """
np.random.seed(None) # not const
sample_data = np.random.rand(100)
operators={
'mean':np.mean,
'p90': (lambda x : np.percentile(x, 90)),
'p99': (lambda x : np.percentile(x, 99)),
}
returned_values = bootstrap_ci(
sample_data,
iterations=1000,
operators=operators,
confidence_level=0.95
)
for key in operators:
# check type
assert key in returned_values
assert isinstance(returned_values[key], tuple)
assert len(returned_values[key]) == 3
# basic interval ordering
ci_left, ci_mean, ci_right = returned_values[key]
assert ci_left <= ci_mean
assert ci_mean <= ci_right
# because it's a bootstrap, these are supposed to be true
assert min(sample_data) <= ci_left
assert ci_right <= max(sample_data)
# tests that are specific to the operators
assert returned_values['p90'][0] <= returned_values['p99'][0] # p90 < p99 so left CI also
assert returned_values['p90'][1] <= returned_values['p99'][1] # p90 < p99 so mean also
assert returned_values['p90'][2] <= returned_values['p99'][2] # p90 < p99 so right CI also
|
microsoft/lightgbm-benchmark | tests/scripts/test_lightgbm_data2bin.py | <filename>tests/scripts/test_lightgbm_data2bin.py
"""
test src/scripts/partition_data/partition.py
"""
import os
import sys
import tempfile
import pytest
from unittest.mock import patch
from scripts.data_processing.lightgbm_data2bin import data2bin
# IMPORTANT: see conftest.py for fixtures
def test_lightgbm_data2bin(temporary_dir, regression_train_sample, regression_test_sample):
"""Tests src/scripts/data_processing/lightgbm_data2bin/data2bin.py"""
binary_train_data_dir = os.path.join(temporary_dir, "binary_train_data")
binary_test_data_dir = os.path.join(temporary_dir, "binary_test_data")
# create test arguments for the script
script_args = [
"data2bin.py",
"--train", regression_train_sample,
"--test", regression_test_sample,
"--output_train", binary_train_data_dir,
"--output_test", binary_test_data_dir,
"--header", "False",
"--label_column", "0",
"--max_bin", "255",
]
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
data2bin.main()
assert os.path.isfile(os.path.join(binary_train_data_dir, "train.bin"))
assert os.path.isfile(os.path.join(binary_test_data_dir, "test_0.bin"))
|
microsoft/lightgbm-benchmark | src/scripts/inferencing/treelite_python/score.py | <gh_stars>10-100
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
TreeLite/Python inferencing script
"""
import os
import sys
import argparse
import logging
import numpy
from distutils.util import strtobool
import pandas as pd
import treelite, treelite_runtime
# Add the right path to PYTHONPATH
# so that you can import from common.*
COMMON_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
if COMMON_ROOT not in sys.path:
print(f"Adding {COMMON_ROOT} to PYTHONPATH")
sys.path.append(str(COMMON_ROOT))
# useful imports from common
from common.components import RunnableScript
from common.io import input_file_path
class TreeLightInferencingScript(RunnableScript):
def __init__(self):
super().__init__(
task = 'score',
framework = 'treelite_python',
framework_version = treelite.__version__
)
@classmethod
def get_arg_parser(cls, parser=None):
"""Adds component/module arguments to a given argument parser.
Args:
parser (argparse.ArgumentParser): an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add generic arguments
parser = RunnableScript.get_arg_parser(parser)
group_i = parser.add_argument_group("Input Data")
group_i.add_argument("--data",
required=True, type=input_file_path, help="Inferencing data location (file path)")
group_i.add_argument("--so_path",
required=False, default = "./mymodel.so" , help="full path to model so")
group_i.add_argument("--output",
required=False, default=None, type=str, help="Inferencing output location (file path)")
group_params = parser.add_argument_group("Scoring parameters")
group_params.add_argument("--num_threads",
required=False, default=1, type=int, help="number of threads")
return parser
def run(self, args, logger, metrics_logger, unknown_args):
"""Run script with arguments (the core of the component)
Args:
args (argparse.namespace): command line arguments provided to script
logger (logging.getLogger() for this script)
metrics_logger (common.metrics.MetricLogger)
unknown_args (list[str]): list of arguments not recognized during argparse
"""
# record relevant parameters
metrics_logger.log_parameters(
num_threads=args.num_threads
)
if args.output:
# make sure the output argument exists
os.makedirs(args.output, exist_ok=True)
# and create your own file inside the output
args.output = os.path.join(args.output, "predictions.txt")
logger.info(f"Loading data for inferencing")
with metrics_logger.log_time_block("time_data_loading"):
my_data = pd.read_csv(args.data).to_numpy()
predictor = treelite_runtime.Predictor(
args.so_path,
verbose=True,
nthread=args.num_threads
)
dmat = treelite_runtime.DMatrix(my_data)
logger.info(f"Running .predict()")
with metrics_logger.log_time_block("time_inferencing"):
predictor.predict(dmat)
def get_arg_parser(parser=None):
""" To ensure compatibility with shrike unit tests """
return TreeLightInferencingScript.get_arg_parser(parser)
def main(cli_args=None):
""" To ensure compatibility with shrike unit tests """
TreeLightInferencingScript.main(cli_args)
if __name__ == "__main__":
main()
|
microsoft/lightgbm-benchmark | tests/common/test_component.py | """Tests src/common/io.py"""
import os
import pytest
from unittest.mock import call, Mock, patch
import time
import json
from common.components import SingleNodeScript
from common.metrics import MetricsLogger
def assert_runnable_script_properties(script_instance: SingleNodeScript, benchmark_name: str, mlflow_set_tags_mock: Mock):
"""Tests properties recorded by a SingleNodeScript class"""
tags_calls = mlflow_set_tags_mock.call_args_list
assert len(tags_calls) == 3
# benchmark common proeprties
assert (tags_calls[0].args[0] == {
"task": script_instance.task,
"framework": script_instance.framework,
"framework_version": script_instance.framework_version
}), "first call to set_tags() is supposed to be for benchmark properties"
# custom properties
assert (tags_calls[1].args[0] == {"benchmark_name": benchmark_name}), "2nd call to set_tags() is for custom properties parsed from json argument"
# test all platform properties
platform_property_keys = [
"machine",
"processor",
"system",
"system_version",
"cpu_count",
"architecture",
"platform",
"cpu_frequency",
"system_memory"
]
for key in platform_property_keys:
assert key in tags_calls[2].args[0], f"platform property {key} is expected in the 3nd call to set_tags()"
def assert_runnable_script_metrics(script_instance: SingleNodeScript, user_metrics: list, mlflow_log_metric_mock: Mock):
"""Tests metrics recorded by a SingleNodeScript class"""
# now let's test all metrics
metrics_calls = mlflow_log_metric_mock.call_args_list
# N user metric + 11 performance metrics
assert len(metrics_calls) == (11 + len(user_metrics))
# user metric testing
assert isinstance(user_metrics, list)
for entry in user_metrics:
assert isinstance(entry, dict)
if 'key' in entry:
assert metrics_calls[0].args[0] == entry['key']
assert isinstance(metrics_calls[0].args[1], float)
if 'value' in entry:
assert metrics_calls[0].args[1] == entry['value']
assert "step" in metrics_calls[0].kwargs
if 'step' in entry:
assert metrics_calls[0].kwargs["step"] == entry['step']
# perf metrics
perf_metrics_call_args = [
"max_t_(cpu_pct_per_cpu_avg)",
"max_t_(cpu_pct_per_cpu_min)",
"max_t_(cpu_pct_per_cpu_max)",
"max_t_(mem_percent)",
"max_t_(disk_usage_percent)",
"max_t_(disk_io_read_mb)",
"max_t_(disk_io_write_mb)",
"max_t_(net_io_lo_sent_mb)",
"max_t_(net_io_ext_sent_mb)",
"max_t_(net_io_lo_recv_mb)",
"max_t_(net_io_ext_recv_mb)",
]
for index, metric_key in enumerate(perf_metrics_call_args):
assert metrics_calls[index+1].args[0] == MetricsLogger._remove_non_allowed_chars(metric_key)
assert "step" in metrics_calls[index+1].kwargs
assert metrics_calls[index+1].kwargs["step"] == 0 # using node id as step
class FakeSingleNodeScript(SingleNodeScript):
def __init__(self):
super().__init__(
task="unittest",
framework="pytest",
framework_version=pytest.__version__
)
def run(self, args, logger, metrics_logger, unknown_args):
# don't do anything
with metrics_logger.log_time_block("fake_time_block", step=1):
time.sleep(1)
@patch('mlflow.end_run')
@patch('mlflow.log_metric')
@patch('mlflow.set_tags')
@patch('mlflow.start_run')
def test_single_node_script_metrics(mlflow_start_run_mock, mlflow_set_tags_mock, mlflow_log_metric_mock, mlflow_end_run_mock):
# just run main
test_component = FakeSingleNodeScript.main(
[
"foo.py",
"--verbose", "True",
"--custom_properties", json.dumps({'benchmark_name':'unittest'})
]
)
# mlflow initialization
mlflow_start_run_mock.assert_called_once()
mlflow_end_run_mock.assert_called_once()
assert_runnable_script_properties(
test_component,
"unittest",
mlflow_set_tags_mock
)
assert_runnable_script_metrics(
test_component,
[{'key':'fake_time_block', 'step':1}], # user_metrics
mlflow_log_metric_mock
)
class FailingSingleNodeScript(SingleNodeScript):
def __init__(self):
super().__init__(
task="failure",
framework="pytest",
framework_version=pytest.__version__
)
def run(self, args, logger, metrics_logger, unknown_args):
# don't do anything
with metrics_logger.log_time_block("fake_time_block", step=1):
time.sleep(1)
raise Exception("Some fake issue occured during code!")
def test_failure_single_node_script_metrics():
# just run main
with pytest.raises(Exception) as e_test:
test_component = FailingSingleNodeScript.main(
[
"foo.py",
"--verbose", "True",
"--custom_properties", json.dumps({'benchmark_name':'unittest'})
]
)
|
microsoft/lightgbm-benchmark | src/scripts/inferencing/custom_win_cli/score.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Custom Binaries inferencing script
"""
import os
import sys
import argparse
import logging
from distutils.util import strtobool
from subprocess import PIPE
from subprocess import run as subprocess_run
from subprocess import TimeoutExpired
# Add the right path to PYTHONPATH
# so that you can import from common.*
COMMON_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
if COMMON_ROOT not in sys.path:
print(f"Adding {COMMON_ROOT} to PYTHONPATH")
sys.path.append(str(COMMON_ROOT))
# useful imports from common
from common.components import RunnableScript
from common.io import input_file_path
# STEP 1 : provide the name of your binary executable
# (copy it inside the static_binaries subfolder)
BINARY_FILE_NAME = "lightgbm.exe" # <<< rename to fit your binary
BINARIES_FOLDER = os.path.join(os.path.dirname(__file__), "static_binaries")
BINARY_FILE_PATH = os.path.join(os.path.dirname(__file__), "static_binaries", BINARY_FILE_NAME)
class CustomCLIInferencingScript(RunnableScript):
def __init__(self):
# STEP 2 : feel free to update those to reflect your custom binary framework/version
super().__init__(
task="score",
framework="custom_bin",
framework_version=BINARY_FILE_NAME
)
@classmethod
def get_arg_parser(cls, parser=None):
"""Adds component/module arguments to a given argument parser.
Args:
parser (argparse.ArgumentParser): an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add generic arguments
parser = RunnableScript.get_arg_parser(parser)
# STEP 3 : below are the arguments that will be passed
# by the inferencing benchmark pipeline, if you want to add more arguments
# you will have to modify the pipeline itself
# alternatively, you can hardcode values in the custom_cli_command list below (see STEP 4)
group_i = parser.add_argument_group("Input Data")
group_i.add_argument("--data",
required=True, type=input_file_path, help="Inferencing data location (file path)")
group_i.add_argument("--model",
required=True, type=input_file_path, help="Exported model location")
group_i.add_argument("--output",
required=False, default=None, type=str, help="Inferencing output location (file path)")
group_params = parser.add_argument_group("Scoring parameters")
group_params.add_argument("--num_threads",
required=False, default=1, type=int, help="number of threads")
return parser
def run(self, args, logger, metrics_logger, unknown_args):
"""Run script with arguments (the core of the component)
Args:
args (argparse.namespace): command line arguments provided to script
logger (logging.getLogger() for this script)
metrics_logger (common.metrics.MetricLogger)
unknown_args (list[str]): list of arguments not recognized during argparse
"""
# record relevant parameters
metrics_logger.log_parameters(
num_threads=args.num_threads
)
if args.output:
# make sure the output argument exists
os.makedirs(args.output, exist_ok=True)
# and create your own file inside the output
args.output = os.path.join(args.output, "predictions.txt")
# STEP 4: write the command for your custom cli as a list
# the example below corresponds to commands for lightgbm_cli.exe
# see https://lightgbm.readthedocs.io/en/latest/Parameters.html
custom_cli_command = [
BINARY_FILE_PATH,
"task=prediction",
f"model={args.model}",
f"data={args.data}",
"verbosity=2",
f"num_threads={args.num_threads}",
#f"predict_disable_shape_check=True"
]
# STEP 5 : if you need to add an output
# the example below corresponds to commands for lightgbm_cli.exe
if args.output:
custom_cli_command.append(f"output_result ={args.output}")
logger.info(f"Running custom command: {custom_cli_command}")
with metrics_logger.log_time_block(metric_name="time_inferencing"):
custom_cli_call = subprocess_run(
custom_cli_command,
stdout=PIPE,
stderr=PIPE,
universal_newlines=True,
check=False, # will not raise an exception if subprocess fails (so we capture with .returncode)
timeout=None
)
logger.info(f"RETURN CODE: {custom_cli_call.returncode}")
logger.info(f"STDOUT: {custom_cli_call.stdout}")
logger.info(f"STDERR: {custom_cli_call.stderr}")
# OPTIONAL: apply any post processing on logs here (ex: extract metrics)
def get_arg_parser(parser=None):
""" To ensure compatibility with shrike unit tests """
return CustomCLIInferencingScript.get_arg_parser(parser)
def main(cli_args=None):
""" To ensure compatibility with shrike unit tests """
CustomCLIInferencingScript.main(cli_args)
if __name__ == "__main__":
main()
|
microsoft/lightgbm-benchmark | src/scripts/data_processing/partition_data/partition.py | """
Partitions input data (text/lines) into chunks for parallel processing.
NOTE: current script assumes all records are independent.
"""
import os
import sys
import argparse
import logging
from distutils.util import strtobool
# Add the right path to PYTHONPATH
# so that you can import from common.*
COMMON_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
if COMMON_ROOT not in sys.path:
print(f"Adding {COMMON_ROOT} to PYTHONPATH")
sys.path.append(str(COMMON_ROOT))
# useful imports from common
from common.io import PartitioningEngine
from common.components import RunnableScript
class PartitionDataScript(RunnableScript):
def __init__(self):
super().__init__(
task = "partition",
framework = "python",
framework_version = "n/a"
)
@classmethod
def get_arg_parser(cls, parser=None):
"""Adds component/module arguments to a given argument parser.
Args:
parser (argparse.ArgumentParser): an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add generic arguments
parser = RunnableScript.get_arg_parser(parser)
# add arguments that are specific to the script
group = parser.add_argument_group('Partitioning arguments')
group.add_argument("--input", dest="input", type=str, required=True, help="file/directory to split")
group.add_argument("--output", dest="output", type=str, help="location to store partitioned files", required=True)
group.add_argument("--mode", type=str, choices=PartitioningEngine.PARTITION_MODES, required=True, help="Partitioning mode")
group.add_argument("--number", type=int, required=True, help="If roundrobin number of partition, if chunk number of records per partition")
group.add_argument("--header", type=strtobool, required=False, default=False, help="Should we preserve firstline into each partition?")
return parser
def run(self, args, logger, metrics_logger, unknown_args):
"""Run script with arguments (the core of the component)
Args:
args (argparse.namespace): command line arguments provided to script
logger (logging.getLogger() for this script)
metrics_logger (common.metrics.MetricLogger)
unknown_args (list[str]): list of arguments not recognized during argparse
"""
# Create output folder
os.makedirs(args.output, exist_ok=True)
# create instance of partitioner
partition_engine = PartitioningEngine(
mode = args.mode,
number = args.number,
header = args.header,
logger=logger
)
# simply run
logger.info(f"Running partitioning...")
with metrics_logger.log_time_block("time_partitioning"):
partition_engine.run(args.input, args.output)
def get_arg_parser(parser=None):
""" To ensure compatibility with shrike unit tests """
return PartitionDataScript.get_arg_parser(parser)
def main(cli_args=None):
""" To ensure compatibility with shrike unit tests """
PartitionDataScript.main(cli_args)
if __name__ == "__main__":
main()
|
microsoft/lightgbm-benchmark | src/scripts/sample/sample.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
This script is a tutorial sample script to explain how all the benchmark
scripts are structured and standardized using the `RunnableScript` helper class.
We've numbered the steps you need to modify and adapt this sample script
to your own needs.
Follow each STEP below, and their associated TODO.
"""
import os
import sys
import argparse
import logging
import lightgbm
import numpy
from distutils.util import strtobool
# Add the right path to PYTHONPATH
# so that you can import from common.*
COMMON_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
if COMMON_ROOT not in sys.path:
print(f"Adding {COMMON_ROOT} to PYTHONPATH")
sys.path.append(str(COMMON_ROOT))
# useful imports from common
from common.io import input_file_path
from common.components import RunnableScript
class SampleScript(RunnableScript):
"""
STEP 1 : Package your script as a class.
This class inherits from RunnableScript, that factors
duplicate code to achieve usual routines
of every script in the lightgbm-benchmark repo.
It has a standard main() function (see below)
that you should not need to modify except for edge cases.
See `src/common/components.py` for details on that class.
TODO: name your class specifically for this script
"""
def __init__(self):
"""
STEP 3 : Define your benchmark "task" in the constructor.
This Specific constructor for this SampleScript class. It has no arguments,
as it will be called from the helper `main()` method.
In your custom script class, you need to call the super constructor with the parameters below.
TODO: pick your task (score, train, generate, compile, ...)
TODO: name your framework and version
"""
super().__init__(
task="sample_task", # str
framework="sample_framework", # str
framework_version="0.0.1" # str
)
@classmethod
def get_arg_parser(cls, parser=None):
"""
STEP 4 : Define your arguments
This method will be called by the main() function
to add your script custom arguments to argparse,
on top of standard arguments of the benchmark.
TODO: align this section with your requirements.
Args:
parser (argparse.ArgumentParser): an existing argument parser instance
Returns:
ArgumentParser: the argument parser instance
"""
# IMPORTANT: call this to add generic benchmark arguments
parser = RunnableScript.get_arg_parser(parser)
# add arguments that are specific to your script
# here's a couple examples
group_i = parser.add_argument_group("I/O Arguments")
group_i.add_argument(
"--data",
required=True,
type=input_file_path, # use this helper type for a directory containing a single file
help="Some input location (directory containing a unique file)",
)
group_i.add_argument(
"--model",
required=True,
type=input_file_path, # use this helper type for a directory containing a single file
help="Some input location (directory containing a unique file)",
)
group_i.add_argument(
"--output",
required=True,
default=None,
type=str,
help="Some output location (directory)",
)
# make sure to return parser
return parser
def run(self, args, logger, metrics_logger, unknown_args):
"""
STEP 5 : Define your run function.
This is the core function of your script.
You are required to override this method with your own implementation.
Args:
args (argparse.namespace): command line arguments provided to script
logger (logging.logger): a logger initialized for this script
metrics_logger (common.metrics.MetricLogger): to report metrics for this script, already initialized for MLFlow
unknown_args (list[str]): list of arguments not recognized during argparse
"""
# make sure the output argument exists
os.makedirs(args.output, exist_ok=True)
# and create your own file inside the output
args.output = os.path.join(args.output, "predictions.txt")
# CUSTOM CODE STARTS HERE
# below this line is user code
logger.info(f"Loading model from {args.model}")
booster = lightgbm.Booster(model_file=args.model)
# to log executing time of a code block, use log_time_block()
logger.info(f"Loading data for inferencing")
with metrics_logger.log_time_block(metric_name="time_data_loading"):
inference_data = lightgbm.Dataset(args.data, free_raw_data=False).construct()
inference_raw_data = inference_data.get_data()
# optional: add data shape as property
metrics_logger.set_properties(
inference_data_length=inference_data.num_data(),
inference_data_width=inference_data.num_feature(),
)
# to log executing time of a code block, use log_time_block()
logger.info(f"Running .predict()")
with metrics_logger.log_time_block(metric_name="time_inferencing"):
booster.predict(data=inference_raw_data)
# CUSTOM CODE ENDS HERE
def get_arg_parser(parser=None):
"""
STEP 2: main function block
The section below (get_arg_parser(), main() and main block) should go unchanged,
except for the name of your class.
Those are standard functions we enforce in order to get some unit tests
on the module (arguments parsing mainly).
To ensure compatibility with shrike unit tests
TODO: just replace SampleScript to the name of your class
"""
return SampleScript.get_arg_parser(parser)
def main(cli_args=None):
""" To ensure compatibility with shrike unit tests """
SampleScript.main(cli_args)
if __name__ == "__main__":
# The main function is defined in src/common/components.py
# and is standard to all scripts.
main()
|
microsoft/lightgbm-benchmark | tests/scripts/test_lightgbm_inferencing_c_api.py | <filename>tests/scripts/test_lightgbm_inferencing_c_api.py<gh_stars>10-100
"""
Executes the series of scripts end-to-end
to test LightGBM (python) manual benchmark
"""
import os
import sys
import tempfile
import pytest
from unittest.mock import patch, Mock, call
from scripts.inferencing.lightgbm_c_api import score
# IMPORTANT: see conftest.py for fixtures
@patch('mlflow.log_metric')
@patch('mlflow.set_tags')
@patch('scripts.inferencing.lightgbm_c_api.score.subprocess_run')
@patch('scripts.inferencing.lightgbm_c_api.score.locate_lightgbm_benchmark_binaries')
def test_lightgbm_c_api_score(locate_binaries_mock, subprocess_run_mock, mlflow_set_tags_mock, mlflow_log_metric_mock, temporary_dir, regression_model_sample, regression_inference_sample):
"""Tests src/scripts/inferencing/lightgbm_c_api/score.py"""
predictions_dir = os.path.join(temporary_dir, "predictions")
locate_binaries_mock.return_value = "fake_cli.exe"
# create a first mock for the return of subprocess
subprocess_call_handle_mock = Mock()
subprocess_call_handle_mock.returncode = 0
subprocess_call_handle_mock.stderr = "# empty logs"
subprocess_call_handle_mock.stdout = """
# fake logs for parsing metrics from C API binaries (cli)
ROW line=0 label=0.42 null_elem=3 prediction=0.45 time_usecs=45.2
ROW line=1 label=0.42 null_elem=3 prediction=0.45 time_usecs=45.3
ROW line=3 label=0.42 null_elem=3 prediction=0.45 time_usecs=45.4
METRIC foo=342.0
PROPRETY foo2=bar2
"""
# feed that mock into a subprocess.run() mock
subprocess_run_mock.return_value = subprocess_call_handle_mock
# create test arguments for the script
script_args = [
"score.py",
"--data", regression_inference_sample,
"--model", regression_model_sample,
"--output", predictions_dir
]
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
score.main()
# test arguments
assert isinstance(subprocess_run_mock.call_args.args[0], list), "first argument of subprocess.run() should be a list"
assert "fake_cli.exe" in subprocess_run_mock.call_args.args[0][0], "first element in subprocess.run() command should contain return value of locate_lightgbm_benchmark_binaries()"
# test expected outputs
assert os.path.isfile(os.path.join(predictions_dir, "predictions.txt"))
metric_calls = mlflow_log_metric_mock.call_args_list
# 25 = 11 perf metrics + 13 inference metrics + 1 custom (foo)
assert mlflow_log_metric_mock.call_count == 25
assert len(metric_calls) == 25
|
microsoft/lightgbm-benchmark | tests/common/test_perf.py | """Tests src/common/metrics.py"""
import os
import pytest
from unittest.mock import call, Mock, patch
import time
from common.perf import PerformanceReportingThread, PerformanceMetricsCollector
def verify_all_perf_report_keys(perf_report):
"""Helper test function, tests all keys in perf report"""
assert isinstance(perf_report, dict)
assert "timestamp" in perf_report, "perf report should have a timestamp key"
required_keys = [
"cpu_pct_per_cpu_avg",
"cpu_pct_per_cpu_min",
"cpu_pct_per_cpu_max",
"mem_percent",
"disk_usage_percent",
"disk_io_read_mb",
"disk_io_write_mb",
"net_io_lo_sent_mb",
"net_io_ext_sent_mb",
"net_io_lo_recv_mb",
"net_io_ext_recv_mb"
]
for key in required_keys:
assert key in perf_report, f"key {key} should be in the perf report, but instead we find: {list(perf_report.keys())}"
assert isinstance(perf_report[key], float) # all metrics are float so far\
assert "not_in_perf_report" not in perf_report
def test_perf_report_run_as_thread():
""" Tests PerformanceReportingThread() as a thread """
# creating a mock to provide as callback
call_on_loop_method = Mock()
call_on_exit_method = Mock()
perf_report_thread = PerformanceReportingThread(
initial_time_increment=2.0,
callback_on_loop=call_on_loop_method,
callback_on_exit=call_on_exit_method
)
perf_report_thread.start() # will engage in first loop and sleep 2.0
time.sleep(0.5) # will wait to be in the middle of that loop
perf_report_thread.finalize()
# on exit not called in this one
call_on_exit_method.assert_called_once()
# get all mock calls
callback_call_args = call_on_loop_method.call_args_list
assert len(callback_call_args) == 1 # just called once
assert len(callback_call_args[0].args) == 1 # only 1 argument
perf_report = callback_call_args[0].args[0]
verify_all_perf_report_keys(perf_report)
def test_perf_report_collector_run_as_thread():
""" Tests PerformanceMetricsCollector() """
# creating a mock to provide as callback
test_max_length = 10
perf_collector = PerformanceMetricsCollector(max_length=test_max_length)
# hack internal values to make the test faster
perf_collector.report_thread.cpu_interval = 0.01
perf_collector.report_thread.time_increment = 0.02
# fake the loop in the internal thread
# if we run the exact times we can hold
for i in range(test_max_length):
perf_collector.report_thread._run_loop()
# we expect to have internal list full
assert len(perf_collector.perf_reports) > 0
assert len(perf_collector.perf_reports) == test_max_length
# 1 more time...
perf_collector.report_thread._run_loop()
# and length should be half
assert len(perf_collector.perf_reports) == (test_max_length // 2)
# and frequency increased
assert perf_collector.perf_reports_freqs == 2
# then every other report should be skipped
for i in range(4):
perf_collector.report_thread._run_loop()
# and length should be half + 2 new values
assert len(perf_collector.perf_reports) == (test_max_length // 2) + 2
for report in perf_collector.perf_reports:
verify_all_perf_report_keys(report)
|
microsoft/lightgbm-benchmark | src/common/lightgbm_utils.py | <gh_stars>10-100
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
This classes provide help to integrate lightgbm
"""
import lightgbm
import logging
class LightGBMCallbackHandler():
""" This class handles LightGBM callbacks for recording metrics. """
def __init__(self, metrics_logger, metrics_prefix=None, metrics_suffix=None):
"""
Args:
metrics_logger (common.metrics.MetricsLogger): class to log metrics using MLFlow
"""
self.metrics = {}
self.metrics_logger = metrics_logger
self.metrics_prefix = metrics_prefix
self.metrics_suffix = metrics_suffix
self.logger = logging.getLogger(__name__)
def _format_metric_key(self, data_name, eval_name):
"""Builds a metric key with prefix and suffix"""
key = f"{data_name}.{eval_name}"
if self.metrics_prefix:
key = self.metrics_prefix + key
if self.metrics_suffix:
key = key + self.metrics_suffix
return key
def callback(self, env: lightgbm.callback.CallbackEnv) -> None:
"""Callback method to collect metrics produced by LightGBM.
See https://lightgbm.readthedocs.io/en/latest/_modules/lightgbm/callback.html
"""
# let's record in the object for future use
self.metrics[env.iteration] = env.evaluation_result_list
# loop on all the evaluation results tuples
for data_name, eval_name, result, _ in env.evaluation_result_list:
# log each as a distinct metric
self.metrics_logger.log_metric(
key=self._format_metric_key(data_name, eval_name),
value=result,
step=env.iteration # provide iteration as step in mlflow
)
|
microsoft/lightgbm-benchmark | src/common/metrics.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
These classes provide some tools to automate wall time compute and logging.
"""
import os
import time
import re
from functools import wraps
import mlflow
import platform
import psutil
import json
import traceback
import logging
class MetricType():
# a metric your script generates once (per node), example: training time
ONETIME_METRIC = 1
# a metric generated multiple times, once per "step" or iteration, example: rmse
ITERATION_METRIC = 2
# a perf metric generated at regular intervals
PERF_INTERVAL_METRIC = 2
class MetricsLogger():
"""
Class for handling metrics logging in MLFlow.
"""
_initialized = False
def __init__(self, session_name=None, metrics_prefix=None):
self._metrics_prefix = metrics_prefix
self._session_name = session_name
self._logger = logging.getLogger(__name__)
def open(self):
"""Opens the MLFlow session."""
if not MetricsLogger._initialized:
self._logger.info(f"Initializing MLFLOW [session='{self._session_name}', metrics_prefix={self._metrics_prefix}]")
mlflow.start_run()
MetricsLogger._initialized = True
def close(self):
"""Close the MLFlow session."""
if MetricsLogger._initialized:
self._logger.info(f"Finalizing MLFLOW [session='{self._session_name}']")
mlflow.end_run()
MetricsLogger._initialized = False
else:
self._logger.warning(f"Call to finalize MLFLOW [session='{self._session_name}'] that was never initialized.")
@classmethod
def _remove_non_allowed_chars(cls, name_string):
""" Removes chars not allowed for metric keys in mlflow """
return re.sub(r'[^a-zA-Z0-9_\-\.\ \/]', '', name_string)
def log_metric(self, key, value, step=None, type=MetricType.ONETIME_METRIC):
"""Logs a metric key/value pair.
Args:
key (str): metric key
value (str): metric value
step (int): which step to log this metric? (see mlflow.log_metric())
type (int): type of the metric
"""
if self._metrics_prefix:
key = self._metrics_prefix + key
key = self._remove_non_allowed_chars(key)
self._logger.debug(f"mlflow[session={self._session_name}].log_metric({key},{value})")
# NOTE: there's a limit to the name of a metric
if len(key) > 50:
key = key[:50]
if type == MetricType.PERF_INTERVAL_METRIC:
pass # for now, do not process those
else:
try:
mlflow.log_metric(key, value, step=step)
except mlflow.exceptions.MlflowException:
self._logger.critical(f"Could not log metric using MLFLOW due to exception:\n{traceback.format_exc()}")
def log_figure(self, figure, artifact_file):
"""Logs a figure using mlflow
Args:
figure (Union[matplotlib.figure.Figure, plotly.graph_objects.Figure]): figure to log
artifact_file (str): name of file to record
"""
try:
mlflow.log_figure(figure, artifact_file)
except mlflow.exceptions.MlflowException:
self._logger.critical(f"Could not log figure using MLFLOW due to exception:\n{traceback.format_exc()}")
def set_properties(self, **kwargs):
"""Set properties/tags for the session.
Args:
kwargs (dict): any keyword argument will be passed as tags to MLFLow
"""
self._logger.debug(f"mlflow[session={self._session_name}].set_tags({kwargs})")
mlflow.set_tags(kwargs)
def set_platform_properties(self):
""" Capture platform sysinfo and record as properties. """
self.set_properties(
machine=platform.machine(),
processor=platform.processor(),
architecture="-".join(platform.architecture()),
platform=platform.platform(),
system=platform.system(),
system_version=platform.version(),
cpu_count=os.cpu_count(),
cpu_frequency=round(psutil.cpu_freq().current),
system_memory=round((psutil.virtual_memory().total) / (1024*1024*1024))
)
def set_properties_from_json(self, json_string):
""" Set properties/tags for the session from a json_string.
Args:
json_string (str): a string parsable as json, contains a dict.
"""
try:
json_dict = json.loads(json_string)
except:
raise ValueError(f"During parsing of JSON properties '{json_string}', an exception occured: {traceback.format_exc()}")
if not isinstance(json_dict, dict):
raise ValueError(f"Provided JSON properties should be a dict, instead it was {str(type(json_dict))}: {json_string}")
properties_dict = dict(
[
(k, str(v)) # transform whatever as a string
for k,v in json_dict.items()
]
)
self.set_properties(**properties_dict)
def log_parameters(self, **kwargs):
""" Logs parameters to MLFlow.
Args:
kwargs (dict): any keyword arguments will be passed as parameters to MLFlow
"""
self._logger.debug(f"mlflow[session={self._session_name}].log_params({kwargs})")
# NOTE: to avoid mlflow exception when value length is too long (ex: label_gain)
for key,value in kwargs.items():
if isinstance(value, str) and len(value) > 255:
self._logger.warning(f"parameter {key} (str) could not be logged, value length {len(value)} > 255")
else:
mlflow.log_param(key,value)
def log_time_block(self, metric_name, step=None):
""" [Proxy] Use in a `with` statement to measure execution time of a code block.
Uses LogTimeBlock.
Example
-------
```python
with LogTimeBlock("my_perf_metric_name"):
print("(((sleeping for 1 second)))")
time.sleep(1)
```
"""
# see class below with proper __enter__ and __exit__
return LogTimeBlock(metric_name, step=step, metrics_logger=self)
def log_inferencing_latencies(self, time_per_batch, batch_length=1, factor_to_usecs=1000000.0):
"""Logs prediction latencies (for inferencing) with lots of fancy metrics and plots.
Args:
time_per_batch_list (List[float]): time per inferencing batch
batch_lengths (Union[List[int],int]): length of each batch (List or constant)
factor_to_usecs (float): factor to apply to time_per_batch to convert to microseconds
"""
if isinstance(batch_length, list):
sum_batch_lengths = sum(batch_length)
else:
sum_batch_lengths = batch_length*len(time_per_batch)
# log metadata
self.log_metric("prediction_batches", len(time_per_batch))
self.log_metric("prediction_queries", sum_batch_lengths)
if len(time_per_batch) > 0:
self.log_metric("prediction_latency_avg", (sum(time_per_batch) * factor_to_usecs)/sum_batch_lengths) # usecs
# if there's more than 1 batch, compute percentiles
if len(time_per_batch) > 1:
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
# latency per batch
batch_run_times = np.array(time_per_batch) * factor_to_usecs
self.log_metric("batch_latency_p50_usecs", np.percentile(batch_run_times, 50))
self.log_metric("batch_latency_p75_usecs", np.percentile(batch_run_times, 75))
self.log_metric("batch_latency_p90_usecs", np.percentile(batch_run_times, 90))
self.log_metric("batch_latency_p95_usecs", np.percentile(batch_run_times, 95))
self.log_metric("batch_latency_p99_usecs", np.percentile(batch_run_times, 99))
# show the distribution prediction latencies
fig, ax = plt.subplots(1)
ax.hist(batch_run_times, bins=100)
ax.set_title("Latency-per-batch histogram (log scale)")
plt.xlabel("usecs")
plt.ylabel("occurence")
plt.yscale('log')
# record in mlflow
self.log_figure(fig, "batch_latency_log_histogram.png")
# latency per query
if isinstance(batch_length, list):
prediction_latencies = np.array(time_per_batch) * factor_to_usecs / np.array(batch_length)
else:
prediction_latencies = np.array(time_per_batch) * factor_to_usecs / batch_length
self.log_metric("prediction_latency_p50_usecs", np.percentile(prediction_latencies, 50))
self.log_metric("prediction_latency_p75_usecs", np.percentile(prediction_latencies, 75))
self.log_metric("prediction_latency_p90_usecs", np.percentile(prediction_latencies, 90))
self.log_metric("prediction_latency_p95_usecs", np.percentile(prediction_latencies, 95))
self.log_metric("prediction_latency_p99_usecs", np.percentile(prediction_latencies, 99))
# show the distribution prediction latencies
fig, ax = plt.subplots(1)
ax.hist(prediction_latencies, bins=100)
ax.set_title("Latency-per-prediction histogram (log scale)")
plt.xlabel("usecs")
plt.ylabel("occurence")
plt.yscale('log')
# record in mlflow
self.log_figure(fig, "prediction_latency_log_histogram.png")
########################
### CODE BLOCK TIMER ###
########################
class LogTimeBlock(object):
""" This class should be used to time a code block.
The time diff is computed from __enter__ to __exit__.
Example
-------
```python
with LogTimeBlock("my_perf_metric_name"):
print("(((sleeping for 1 second)))")
time.sleep(1)
```
"""
def __init__(self, name, **kwargs):
"""
Constructs the LogTimeBlock.
Args:
name (str): key for the time difference (for storing as metric)
kwargs (dict): any keyword will be added as properties to metrics for logging (work in progress)
"""
# kwargs
self.tags = kwargs.get('tags', None)
self.step = kwargs.get('step', None)
self.metrics_logger = kwargs.get('metrics_logger', None)
# internal variables
self.name = name
self.start_time = None
self._logger = logging.getLogger(__name__)
def __enter__(self):
""" Starts the timer, gets triggered at beginning of code block """
self.start_time = time.time() # starts "timer"
def __exit__(self, exc_type, value, traceback):
""" Stops the timer and stores accordingly
gets triggered at beginning of code block.
Note:
arguments are by design for with statements.
"""
run_time = time.time() - self.start_time # stops "timer"
self._logger.info(f"--- time elapsed: {self.name} = {run_time:2f} s" + (f" [tags: {self.tags}]" if self.tags else ""))
if self.metrics_logger:
self.metrics_logger.log_metric(self.name, run_time, step=self.step)
else:
MetricsLogger().log_metric(self.name, run_time, step=self.step)
|
microsoft/lightgbm-benchmark | src/scripts/inferencing/lightgbm_python/score.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
LightGBM/Python inferencing script
"""
import os
import sys
import argparse
import logging
import time
import numpy as np
from distutils.util import strtobool
import lightgbm
# Add the right path to PYTHONPATH
# so that you can import from common.*
COMMON_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
if COMMON_ROOT not in sys.path:
logging.info(f"Adding {COMMON_ROOT} to PYTHONPATH")
sys.path.append(str(COMMON_ROOT))
# useful imports from common
from common.components import RunnableScript
from common.io import input_file_path
class LightGBMPythonInferecingScript(RunnableScript):
def __init__(self):
super().__init__(
task = "score",
framework = "lightgbm",
framework_version = "PYTHON_API."+str(lightgbm.__version__)
)
@classmethod
def get_arg_parser(cls, parser=None):
"""Adds component/module arguments to a given argument parser.
Args:
parser (argparse.ArgumentParser): an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add generic arguments
parser = RunnableScript.get_arg_parser(parser)
group_i = parser.add_argument_group("Input Data")
group_i.add_argument("--data",
required=True, type=input_file_path, help="Inferencing data location (file path)")
group_i.add_argument("--model",
required=False, type=input_file_path, help="Exported model location (file path)")
group_i.add_argument("--output",
required=False, default=None, type=str, help="Inferencing output location (file path)")
group_params = parser.add_argument_group("Scoring parameters")
group_params.add_argument("--num_threads",
required=False, default=1, type=int, help="number of threads")
group_params.add_argument("--predict_disable_shape_check",
required=False, default=False, type=strtobool, help="See LightGBM documentation")
return parser
def run(self, args, logger, metrics_logger, unknown_args):
"""Run script with arguments (the core of the component)
Args:
args (argparse.namespace): command line arguments provided to script
logger (logging.getLogger() for this script)
metrics_logger (common.metrics.MetricLogger)
unknown_args (list[str]): list of arguments not recognized during argparse
"""
# record relevant parameters
metrics_logger.log_parameters(
num_threads=args.num_threads
)
# register logger for lightgbm logs
lightgbm.register_logger(logger)
# make sure the output argument exists
if args.output:
os.makedirs(args.output, exist_ok=True)
args.output = os.path.join(args.output, "predictions.txt")
logger.info(f"Loading model from {args.model}")
booster = lightgbm.Booster(model_file=args.model)
logger.info(f"Loading data for inferencing")
with metrics_logger.log_time_block("time_data_loading"):
# NOTE: this is bad, but allows for libsvm format (not just numpy)
inference_data = lightgbm.Dataset(args.data, free_raw_data=False).construct()
inference_raw_data = inference_data.get_data()
# capture data shape as property
metrics_logger.set_properties(
inference_data_length = inference_data.num_data(),
inference_data_width = inference_data.num_feature()
)
logger.info(f"Running .predict()")
batch_start_time = time.monotonic()
predictions_array = booster.predict(
data=inference_raw_data,
num_threads=args.num_threads,
predict_disable_shape_check=bool(args.predict_disable_shape_check)
)
prediction_time = (time.monotonic() - batch_start_time)
metrics_logger.log_metric("time_inferencing", prediction_time)
# use helper to log latency with the right metric names
metrics_logger.log_inferencing_latencies(
[prediction_time], # only one big batch
batch_length=inference_data.num_data(),
factor_to_usecs=1000000.0 # values are in seconds
)
if args.output:
np.savetxt(
args.output,
predictions_array,
fmt='%f',
delimiter=',',
newline='\n',
header='',
footer='',
comments='# ',
encoding=None
)
def get_arg_parser(parser=None):
""" To ensure compatibility with shrike unit tests """
return LightGBMPythonInferecingScript.get_arg_parser(parser)
def main(cli_args=None):
""" To ensure compatibility with shrike unit tests """
LightGBMPythonInferecingScript.main(cli_args)
if __name__ == "__main__":
main()
|
microsoft/lightgbm-benchmark | src/common/perf.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Helps with reporting performance metrics (cpu/mem utilization).
Needs to be implemented in the rest of the code.
"""
import logging
import threading
import time
import psutil
class PerformanceReportingThread(threading.Thread):
"""Thread to report performance (cpu/mem/net)"""
def __init__(self,
initial_time_increment=1.0,
cpu_interval=1.0,
callback_on_loop=None,
callback_on_exit=None):
"""Constructor
Args:
initial_time_increment (float): how much time to sleep between perf readings
cpu_interval (float): interval to capture cpu utilization
callback_on_loop (func): function to call when a perf reading is issued
callback_on_exit (func): function to call when thread is finalized
"""
threading.Thread.__init__(self)
self.killed = False # flag, set to True to kill from the inside
self.logger = logging.getLogger(__name__)
# time between perf reports
self.time_increment = initial_time_increment
self.cpu_interval = cpu_interval
# set callbacks
self.callback_on_loop = callback_on_loop
self.callback_on_exit = callback_on_exit
#####################
### RUN FUNCTIONS ###
#####################
def run(self):
"""Run function of the thread, while(True)"""
while not(self.killed):
if self.time_increment >= self.cpu_interval: # cpu_percent.interval already consumes 1sec
time.sleep(self.time_increment - self.cpu_interval) # will double every time report_store_max_length is reached
self._run_loop()
if self.callback_on_exit:
self.callback_on_exit()
def _run_loop(self):
"""What to run within the while(not(killed))"""
perf_report = {}
# CPU UTILIZATION
cpu_utilization = psutil.cpu_percent(interval=self.cpu_interval, percpu=True) # will take 1 sec to return
perf_report["cpu_pct_per_cpu_avg"] = sum(cpu_utilization) / len(cpu_utilization)
perf_report["cpu_pct_per_cpu_min"] = min(cpu_utilization)
perf_report["cpu_pct_per_cpu_max"] = max(cpu_utilization)
# MEM UTILIZATION
perf_report["mem_percent"] = psutil.virtual_memory().percent
# DISK UTILIZAITON
perf_report["disk_usage_percent"] = psutil.disk_usage('/').percent
perf_report["disk_io_read_mb"] = (psutil.disk_io_counters(perdisk=False).read_bytes / (1024 * 1024))
perf_report["disk_io_write_mb"] = (psutil.disk_io_counters(perdisk=False).write_count / (1024 * 1024))
# NET I/O SEND/RECV
net_io_counters = psutil.net_io_counters(pernic=True)
net_io_lo_identifiers = []
net_io_ext_identifiers = []
for key in net_io_counters:
if 'loopback' in key.lower():
net_io_lo_identifiers.append(key)
elif key.lower() == 'lo':
net_io_lo_identifiers.append(key)
else:
net_io_ext_identifiers.append(key)
lo_sent_mb = sum(
[
net_io_counters.get(key).bytes_sent
for key in net_io_lo_identifiers
]
) / (1024 * 1024)
ext_sent_mb = sum(
[
net_io_counters.get(key).bytes_sent
for key in net_io_ext_identifiers
]
) / (1024 * 1024)
lo_recv_mb = sum(
[
net_io_counters.get(key).bytes_recv
for key in net_io_lo_identifiers
]
) / (1024 * 1024)
ext_recv_mb = sum(
[
net_io_counters.get(key).bytes_recv
for key in net_io_ext_identifiers
]
) / (1024 * 1024)
perf_report["net_io_lo_sent_mb"] = lo_sent_mb
perf_report["net_io_ext_sent_mb"] = ext_sent_mb
perf_report["net_io_lo_recv_mb"] = lo_recv_mb
perf_report["net_io_ext_recv_mb"] = ext_recv_mb
# add a timestamp
perf_report["timestamp"] = time.time()
# END OF REPORT
if self.callback_on_loop:
self.callback_on_loop(perf_report)
def finalize(self):
"""Ask the thread to finalize (clean)"""
self.killed = True
self.join()
class PerformanceMetricsCollector():
"""Collects performance metrics from PerformanceReportingThread
Limits all values to a maximum length"""
def __init__(self, max_length=1000):
"""Constructor
Args:
max_length (int): maximum number of perf reports to keep
"""
self.logger = logging.getLogger(__name__)
# create a thread to generate reports regularly
self.report_thread = PerformanceReportingThread(
initial_time_increment=1.0,
cpu_interval=1.0,
callback_on_loop=self.append_perf_metrics
)
self.perf_reports = [] # internal storage
self.perf_reports_freqs = 1 # frequency to skip reports from thread
self.perf_reports_counter = 0 # how many reports we had so far
self.max_length = (max_length//2 + max_length%2) * 2 # has to be dividable by 2
def start(self):
"""Start collector perf metrics (start internal thread)"""
self.logger.info(f"Starting perf metric collector (max_length={self.max_length})")
self.report_thread.start()
def finalize(self):
"""Stop collector perf metrics (stop internal thread)"""
self.logger.info(f"Finalizing perf metric collector (length={len(self.perf_reports)})")
self.report_thread.finalize()
def append_perf_metrics(self, perf_metrics):
"""Add a perf metric report to the internal storage"""
self.perf_reports_counter += 1
if (self.perf_reports_counter % self.perf_reports_freqs):
# if we've decided to skip this one
return
self.perf_reports.append(perf_metrics)
if len(self.perf_reports) > self.max_length:
# trim the report by half
self.perf_reports = [
self.perf_reports[i]
for i in range(0, self.max_length, 2)
]
self.perf_reports_freqs *= 2 # we'll start accepting reports only 1 out of 2
self.logger.warning(f"Perf report store reached max, increasing freq to {self.perf_reports_freqs}")
class PerfReportPlotter():
"""Once collected all perf reports from all nodes"""
def __init__(self, metrics_logger):
self.all_reports = {}
self.metrics_logger = metrics_logger
def add_perf_reports(self, perf_reports, node):
"""Add a set of reports from a given node"""
self.all_reports[node] = perf_reports
def report_nodes_perf(self):
# Currently reporting one metric per node
for node in self.all_reports:
# CPU UTILIZATION
self.metrics_logger.log_metric(
"max_t_(cpu_pct_per_cpu_avg)",
max([ report["cpu_pct_per_cpu_avg"] for report in self.all_reports[node] ]),
step=node
)
self.metrics_logger.log_metric(
"max_t_(cpu_pct_per_cpu_min)",
max([ report["cpu_pct_per_cpu_min"] for report in self.all_reports[node] ]),
step=node
)
self.metrics_logger.log_metric(
"max_t_(cpu_pct_per_cpu_max)",
max([ report["cpu_pct_per_cpu_max"] for report in self.all_reports[node] ]),
step=node
)
# MEM
self.metrics_logger.log_metric(
"max_t_(mem_percent)",
max([ report["mem_percent"] for report in self.all_reports[node] ]),
step=node
)
# DISK
self.metrics_logger.log_metric(
"max_t_(disk_usage_percent)",
max([ report["disk_usage_percent"] for report in self.all_reports[node] ]),
step=node
)
self.metrics_logger.log_metric(
"max_t_(disk_io_read_mb)",
max([ report["disk_io_read_mb"] for report in self.all_reports[node] ]),
step=node
)
self.metrics_logger.log_metric(
"max_t_(disk_io_write_mb)",
max([ report["disk_io_write_mb"] for report in self.all_reports[node] ]),
step=node
)
# NET I/O
self.metrics_logger.log_metric(
"max_t_(net_io_lo_sent_mb)",
max([ report["net_io_lo_sent_mb"] for report in self.all_reports[node] ]),
step=node
)
self.metrics_logger.log_metric(
"max_t_(net_io_ext_sent_mb)",
max([ report["net_io_ext_sent_mb"] for report in self.all_reports[node] ]),
step=node
)
self.metrics_logger.log_metric(
"max_t_(net_io_lo_recv_mb)",
max([ report["net_io_lo_recv_mb"] for report in self.all_reports[node] ]),
step=node
)
self.metrics_logger.log_metric(
"max_t_(net_io_ext_recv_mb)",
max([ report["net_io_ext_recv_mb"] for report in self.all_reports[node] ]),
step=node
)
|
microsoft/lightgbm-benchmark | src/common/paths.py | <filename>src/common/paths.py
import os
LIGHTGBM_REPO_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
SCRIPTS_SOURCES_ROOT = os.path.join(LIGHTGBM_REPO_ROOT, 'src')
COMPONENTS_ROOT = os.path.join(LIGHTGBM_REPO_ROOT, 'src', 'scripts')
CONFIG_PATH = os.path.join(LIGHTGBM_REPO_ROOT, 'conf')
|
microsoft/lightgbm-benchmark | src/common/aml.py | <reponame>microsoft/lightgbm-benchmark
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
This script contains methods to handle connection to AzureML,
such as registering Datasets or obtaining a Dataset handler from a given workspace.
"""
import logging
from azureml.core import Datastore, Dataset
def dataset_from_dstore_path(workspace, datastore, datastore_path, validate=True):
""" Obtains a local reference for a given datastore and path
Args:
datastore (str): name of the AzureML datastore
datastore_path (str): path in datastore to register as Dataset
validate (bool): validate files exist or not
Returns:
azureml.core.Dataset: registered Dataset object
"""
logger = logging.getLogger(__name__)
logger.info(f"Connecting to Datastore {datastore}...")
datastore = Datastore.get(workspace, datastore)
logger.info(f"Reading path {datastore_path}...")
remote_ds_path = [(datastore, datastore_path)]
logger.info(f"Registering as dataset...")
remote_dataset = Dataset.File.from_files(path=remote_ds_path, validate=validate)
return remote_dataset
def load_dataset_from_data_input_spec(workspace, data_input_spec):
""" Loads a dataset based on config object data_input_spec (see tasks.py data_input_spec)
Args:
workspace (azureml.core.Workspace): connector to an AzureML workspace
data_input_spec (OmegaConf.DictConfig): config Hydra dataclass data_input_spec (see tasks.py)
Returns:
azureml.core.Dataset: registered Dataset object
"""
logger = logging.getLogger(__name__)
if data_input_spec.name:
logger.info(f"Reading dataset from name={data_input_spec.name} version={data_input_spec.version}")
loaded_dataset = Dataset.get_by_name(workspace, name=data_input_spec.name, version=data_input_spec.version)
elif data_input_spec.uuid:
logger.info(f"Reading dataset from uuid")
loaded_dataset = Dataset.get_by_id(workspace, id=data_input_spec.uuid)
elif data_input_spec.datastore and data_input_spec.path:
logger.info(f"Connecting to Datastore {data_input_spec.datastore}...")
datastore = Datastore.get(workspace, data_input_spec.datastore)
logger.info(f"Reading path {data_input_spec.path}...")
remote_ds_path = [(datastore, data_input_spec.path)]
logger.info(f"Registering as dataset...")
loaded_dataset = Dataset.File.from_files(path=remote_ds_path, validate=data_input_spec.validate)
else:
raise ValueError("To load a dataset using data_input_spec, you need to provide either a name, a uuid or a datastore+path (provided config = {data_input_spec})")
return loaded_dataset
def apply_sweep_settings(step, sweep_settings_config):
"""Applies the settings to a sweep step based on a config dataclass.
Args:
step (PipelineStep): the instance of the step
sweep_settings_config (OmegaConf.DictConfig): schema specified in src.common.tasks.sweep_runsettings
"""
if (not sweep_settings_config.primary_metric) or (not sweep_settings_config.goal):
raise ValueError("in sweep settings, you need to provide a primary_metric and a goal settings.")
else:
step.runsettings.sweep.objective.configure(
primary_metric = sweep_settings_config.primary_metric,
goal = sweep_settings_config.goal,
)
if not sweep_settings_config.algorithm:
raise ValueError("in sweep settings, you need to provide an algorithm setting.")
else:
step.runsettings.sweep.algorithm = sweep_settings_config.algorithm
if sweep_settings_config.limits:
step.runsettings.sweep.limits.configure(
max_total_trials = sweep_settings_config.limits.max_total_trials,
max_concurrent_trials = sweep_settings_config.limits.max_concurrent_trials,
timeout_minutes = sweep_settings_config.limits.timeout_minutes,
)
if sweep_settings_config.early_termination:
if sweep_settings_config.early_termination.policy_type == "median_stopping":
step.runsettings.sweep.early_termination.configure(
policy_type="median_stopping",
evaluation_interval=sweep_settings_config.early_termination.evaluation_interval,
delay_evaluation=sweep_settings_config.early_termination.delay_evaluation
)
elif sweep_settings_config.early_termination.policy_type == "bandit":
step.runsettings.sweep.early_termination.configure(
policy_type="bandit",
slack_factor=sweep_settings_config.early_termination.slack_factor,
evaluation_interval=sweep_settings_config.early_termination.evaluation_interval,
delay_evaluation=sweep_settings_config.early_termination.delay_evaluation
)
elif sweep_settings_config.early_termination.policy_type == "truncation_selection":
step.runsettings.sweep.early_termination.configure(
policy_type="bandit",
truncation_percentage=sweep_settings_config.early_termination.truncation_percentage,
evaluation_interval=sweep_settings_config.early_termination.evaluation_interval,
delay_evaluation=sweep_settings_config.early_termination.delay_evaluation
)
elif sweep_settings_config.early_termination.policy_type == "default":
pass
elif sweep_settings_config.early_termination.policy_type == None:
pass
else:
raise NotImplementedError(f"sweep settings early_termination policy_type={sweep_settings_config.early_termination.policy_type} is not implemented.")
|
microsoft/lightgbm-benchmark | src/scripts/inferencing/lightgbm_c_api/score.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
LightGBM inferencing script using an executable lightgbm_predict to run C API predictions.
"""
import os
import sys
import argparse
import logging
import re
import lightgbm
import numpy as np
import matplotlib.pyplot as plt
from distutils.util import strtobool
from subprocess import PIPE
from subprocess import run as subprocess_run
from subprocess import TimeoutExpired
# Add the right path to PYTHONPATH
# so that you can import from common.*
COMMON_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
if COMMON_ROOT not in sys.path:
logging.info(f"Adding {COMMON_ROOT} to PYTHONPATH")
sys.path.append(str(COMMON_ROOT))
# useful imports from common
from common.components import RunnableScript
from common.io import input_file_path
def locate_lightgbm_lib(lightgbm_lib_path=None):
"""Locates the lightgbm library installed in the python site packages"""
import site
if lightgbm_lib_path:
# if a value is provided, return that value (from argparse)
return lightgbm_lib_path
for entry in site.getsitepackages():
if os.path.isdir(os.path.join(entry, "lightgbm")):
ret_val = os.path.join(entry, "lightgbm")
logging.info(f"Found lightgbm/ at {ret_val}")
return ret_val
return None
def locate_lightgbm_benchmark_binaries(binaries_path=None):
"""Locates the lightgbm benchmark binaries (lightgbm_predict executable)"""
executable_name = "lightgbm_predict.exe" if sys.platform == "win32" else "lightgbm_predict"
if binaries_path:
if os.path.isfile(binaries_path):
# if path to exec is provided directly as a file path
return binaries_path
# if provided a directory, look for executable
if os.path.isfile(os.path.join(binaries_path, executable_name)):
return os.path.join(binaries_path, executable_name)
else:
raise FileNotFoundError(f"Could not find executable '{executable_name}' at path provided from command line arguments {binaries_path}")
if "LIGHTGBM_BENCHMARK_BINARIES_PATH" in os.environ:
if os.path.isfile(os.path.join(os.environ["LIGHTGBM_BENCHMARK_BINARIES_PATH"], executable_name)):
return os.path.join(os.environ["LIGHTGBM_BENCHMARK_BINARIES_PATH"], executable_name)
else:
raise FileNotFoundError(f"Could not find executable '{executable_name}' at path provided from environment variable LIGHTGBM_BENCHMARK_BINARIES_PATH={os.environ['LIGHTGBM_BENCHMARK_BINARIES_PATH']}")
# if all fails, return local path to component directory
if os.path.isfile(os.path.join(os.path.abspath(os.path.dirname(__file__)), executable_name)):
return os.path.join(os.path.abspath(os.path.dirname(__file__)), executable_name)
else:
raise FileNotFoundError(f"Could not find executable '{executable_name}' at the location of this script.")
class LightGBMCAPIInferecingScript(RunnableScript):
def __init__(self):
super().__init__(
task = "score",
framework = "lightgbm",
framework_version = "C_API."+str(lightgbm.__version__)
)
@classmethod
def get_arg_parser(cls, parser=None):
"""Adds component/module arguments to a given argument parser.
Args:
parser (argparse.ArgumentParser): an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add generic arguments
parser = RunnableScript.get_arg_parser(parser)
group_i = parser.add_argument_group("Input Data")
group_i.add_argument("--lightgbm_lib_path",
required=False, type=str, default=None, help="Path to lightgbm library (file path)")
group_i.add_argument("--binaries_path",
required=False, type=str, default=None, help="Path to lightgbm_predict (file path)")
group_i.add_argument("--data",
required=True, type=input_file_path, help="Inferencing data location (file path)")
group_i.add_argument("--model",
required=False, type=input_file_path, help="Exported model location (file path)")
group_i.add_argument("--output",
required=False, default=None, type=str, help="Inferencing output location (file path)")
group_params = parser.add_argument_group("Scoring parameters")
group_params.add_argument("--num_threads",
required=False, default=1, type=int, help="number of threads")
group_params.add_argument("--predict_disable_shape_check",
required=False, default=False, type=strtobool, help="See LightGBM documentation")
return parser
def run(self, args, logger, metrics_logger, unknown_args):
"""Run script with arguments (the core of the component)
Args:
args (argparse.namespace): command line arguments provided to script
logger (logging.getLogger() for this script)
metrics_logger (common.metrics.MetricLogger)
unknown_args (list[str]): list of arguments not recognized during argparse
"""
# record relevant parameters
metrics_logger.log_parameters(
num_threads=args.num_threads
)
if args.output:
# make sure the output argument exists
os.makedirs(args.output, exist_ok=True)
# and create your own file inside the output
args.output = os.path.join(args.output, "predictions.txt")
lightgbm_predict_path = locate_lightgbm_benchmark_binaries(args.binaries_path)
# assemble a command for lightgbm cli
lightgbm_predict_command = [
lightgbm_predict_path,
f"{args.model}",
f"{args.data}",
"verbosity=2",
f"num_threads={args.num_threads}",
f"predict_disable_shape_check={bool(args.predict_disable_shape_check)}"
]
# create custom environment variables for the exec
custom_env = os.environ.copy()
# try to locate the library
args.lightgbm_lib_path = locate_lightgbm_lib(args.lightgbm_lib_path)
if args.lightgbm_lib_path:
logger.info(f"Adding to PATH: {args.lightgbm_lib_path}")
if sys.platform == "win32":
custom_env["PATH"] = os.path.abspath(args.lightgbm_lib_path) + ";" + custom_env["PATH"]
else:
custom_env["PATH"] = os.path.abspath(args.lightgbm_lib_path) + ":" + custom_env["PATH"]
logger.info("Running command {}".format(" ".join(lightgbm_predict_command)))
lightgbm_predict_call = subprocess_run(
lightgbm_predict_command,
stdout=PIPE,
stderr=PIPE,
universal_newlines=True,
check=False, # will not raise an exception if subprocess fails (so we capture with .returncode)
timeout=None,
env=custom_env
)
logger.info(f"stdout: {lightgbm_predict_call.stdout}")
logger.info(f"stderr: {lightgbm_predict_call.stderr}")
logger.info(f"return code: {lightgbm_predict_call.returncode}")
if lightgbm_predict_call.returncode != 0:
raise Exception("Return code != 0, see stderr above.")
# now parsing executable logs for prediction per query time in ms
time_inferencing_per_query = []
predictions_array = []
for line in lightgbm_predict_call.stdout.split("\n"):
if line.startswith("ROW"):
row_pattern = r"ROW line=([0-9\.]+) label=([0-9\.e\-]+) null_elem=([0-9\.]+) prediction=([0-9\.e\-]+) time_usecs=([0-9\.e\-]+)"
row_matched = re.match(row_pattern, line.strip())
if row_matched:
time_inferencing_per_query.append(float(row_matched.group(5)))
predictions_array.append(float(row_matched.group(4)))
else:
logger.warning(f"log row {line} does not match expected pattern {row_pattern}")
elif line.startswith("METRIC"):
row_pattern = r"METRIC ([a-zA-Z0-9_]+)=([a-zA-Z0-9\.e\-]+)"
row_matched = re.match(row_pattern, line.strip())
if row_matched:
metrics_logger.log_metric(row_matched.group(1), float(row_matched.group(2)))
else:
logger.warning(f"log metric {line} does not match expected pattern {row_pattern}")
elif line.startswith("PROPERTY"):
row_pattern = r"PROPERTY ([a-zA-Z0-9_]+)=([a-zA-Z0-9\.e\-]+)"
row_matched = re.match(row_pattern, line.strip())
if row_matched:
metrics_logger.set_properties(**{row_matched.group(1): row_matched.group(2)})
else:
logger.warning(f"log metric {line} does not match expected pattern {row_pattern}")
# use helper to log latency with the right metric names
metrics_logger.log_inferencing_latencies(
time_inferencing_per_query,
batch_length=1, # in this exec, each row is just 1 prediction call
factor_to_usecs=1.0 # values are already in usecs
)
if args.output:
np.savetxt(
args.output,
predictions_array,
fmt='%f',
delimiter=',',
newline='\n',
header='',
footer='',
comments='# ',
encoding=None
)
def get_arg_parser(parser=None):
""" To ensure compatibility with shrike unit tests """
return LightGBMCAPIInferecingScript.get_arg_parser(parser)
def main(cli_args=None):
""" To ensure compatibility with shrike unit tests """
LightGBMCAPIInferecingScript.main(cli_args)
if __name__ == "__main__":
main()
|
microsoft/lightgbm-benchmark | src/common/ray.py | <reponame>microsoft/lightgbm-benchmark
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Helper code to run Ray distributed scripts [EXPERIMENTAL]
"""
import os
import logging
import traceback
from .components import RunnableScript
from dataclasses import dataclass
from distutils.util import strtobool
import subprocess
import ray
import time
class RayScript(RunnableScript):
def __init__(self, task, framework, framework_version, metrics_prefix=None):
""" Generic initialization for all script classes.
Args:
task (str): name of task in the pipeline/benchmark (ex: train, score)
framework (str): name of ML framework
framework_version (str): a version of this framework
metrics_prefix (str): any prefix to add to this scripts metrics
mpi_init_mode (int): mode to initialize MPI
"""
# just use the regular init
super().__init__(
task = task,
framework = framework,
framework_version = framework_version,
metrics_prefix = metrics_prefix
)
# ray init settings
self.self_is_head = True
self.head_address = None
self.head_port = 6379
self.redis_password = None
@classmethod
def get_arg_parser(cls, parser=None):
"""Adds component/module arguments to a given argument parser.
Args:
parser (argparse.ArgumentParser): an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add generic arguments
parser = RunnableScript.get_arg_parser(parser)
# add generic arguments here
group_general = parser.add_argument_group("Ray parameters")
group_general.add_argument(
"--ray_head",
required=False,
default=None,
type=str,
help="address of ray cluster (if running this script locally)",
)
group_general.add_argument(
"--ray_head_port",
required=False,
default=6379,
type=int,
help="port of ray cluster (if running this script locally)",
)
group_general.add_argument(
"--ray_redis_password",
required=False,
default=None,
type=str,
help="redis password of ray cluster (if running this script locally)",
)
group_general.add_argument(
"--ray_on_aml",
required=False,
default=False,
type=strtobool,
help="if running this script within an AzureML run (head/port will be discovered)",
)
return parser
def initialize_run(self, args):
"""Initialize the component run, opens/setups what needs to be"""
self.logger.info("Initializing Ray component script...")
if args.ray_on_aml:
# if running on AzureML, get context of cluster from env variables
self.head_address = os.environ.get("AZ_BATCHAI_JOB_MASTER_NODE_IP")
self.redis_password = os.environ.get("AZUREML_RUN_TOKEN_RAND", "<PASSWORD>")
self.self_is_head = (os.environ.get("OMPI_COMM_WORLD_RANK", "0") == "0")
self.available_nodes = int(os.environ.get("OMPI_COMM_WORLD_SIZE", "1"))
if self.self_is_head: # if we're on the first node of this job
if self.available_nodes > 1: # and if number of nodes if more than one
# then initialize head node to listen to cluster nodes
self.logger.info(f"Available nodes = {self.available_nodes}, initializing ray for HEAD node.")
self.setup_head_node()
# then run ray init
ray_init_ret_val = ray.init(address="auto", _redis_password=self.redis_password)
self.logger.info(f"Ray init returned: {ray_init_ret_val}")
self.logger.info("Ray resources: {}".format(ray.available_resources()))
# and wait for cluster nodes to be initialized as well
for i in range(60):
self.logger.info(f"Waiting for ray cluster to reach available nodes size... [{len(ray.nodes())}/{self.available_nodes}]")
if (len(ray.nodes()) >= self.available_nodes):
break
time.sleep(1)
else:
raise Exception("Could not reach maximum number of nodes before 60 seconds.")
else:
# if just one node, nothing to do here
self.logger.info(f"Available nodes = {self.available_nodes}, running ray.init() as for a single node...")
ray.init()
else:
self.setup_cluster_node()
else:
# considering this one as head
self.self_is_head = True
# if not running this script in AzureML...
if args.ray_head:
# initialize ray for remote ray cluster
ray.init(
redis_addr=f"{args.ray_head}:{args.ray_head_port}",
_redis_password=args.ray_redis_password
)
else:
# initialize ray locally
ray.init()
# open mlflow
self.metrics_logger.open()
def finalize_run(self, args):
"""Finalize the run, close what needs to be"""
self.logger.info(f"Finalizing Ray component script...")
# clean ray exit on HEAD node only
if self.self_is_head:
self.logger.info(f"At finalization, number of nodes is [nodes={len(ray.nodes())}]")
ray.shutdown()
# close mlflow
self.metrics_logger.close()
#####################
### SETUP METHODS ###
#####################
def run_ray_cli(self, ray_cli_command, timeout=60):
"""Runs subprocess for ray setup command"""
self.logger.info(f"Launching ray cli with command: {ray_cli_command}")
ray_cli_command_call = subprocess.run(
ray_cli_command,
#stdout=PIPE,
#stderr=PIPE,
universal_newlines=True,
check=False, # will not raise an exception if subprocess fails (so we capture with .returncode)
timeout=timeout, # TODO: more than a minute would be weird?
#env=custom_env
)
self.logger.info(f"return code: {ray_cli_command_call.returncode}")
if ray_cli_command_call.returncode != 0:
raise RuntimeError("Ray cli command returned code != 0")
return ray_cli_command_call.returncode
def setup_head_node(self):
"""Setup to run only on head node"""
self.logger.info("Setting up Ray for HEAD node.")
# run ray cli
ray_setup_command = [
"ray",
"start",
"--head",
f"--port={self.head_port}",
f"--redis-password={<PASSWORD>_password}"
]
self.run_ray_cli(ray_setup_command)
def setup_cluster_node(self):
"""Setup to run only on non-head cluster nodes"""
self.logger.info("Setting up Ray for CLUSTER node.")
# run ray cli
ray_setup_command = [
"ray",
"start",
f"--address={self.head_address}:{self.head_port}",
f"--redis-password={<PASSWORD>.<PASSWORD>_password}",
#"--block" # should remain in subprocess forever
]
self.run_ray_cli(ray_setup_command, timeout=None)
############################
### SPECIFIC MAIN METHOD ###
############################
@classmethod
def main(cls, cli_args=None):
""" Component main function, it is not recommended to override this method.
It parses arguments and executes run() with the right arguments.
Args:
cli_args (List[str], optional): list of args to feed script, useful for debugging. Defaults to None.
"""
# initialize root logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
# construct arg parser
parser = cls.get_arg_parser()
# if argument parsing fails, or if unknown arguments, will except
args, unknown_args = parser.parse_known_args(cli_args)
logger.setLevel(logging.DEBUG if args.verbose else logging.INFO)
# create script instance, initialize mlflow
script_instance = cls()
script_instance.initialize_run(args)
# catch run function exceptions to properly finalize run (kill/join threads)
try:
# run the actual run method ONLY ON HEAD
if script_instance.self_is_head:
script_instance.run(args, script_instance.logger, script_instance.metrics_logger, unknown_args)
else:
script_instance.logger.warning("This is not HEAD node, exiting script now")
except BaseException as e:
logging.critical(f"Exception occured during run():\n{traceback.format_exc()}")
script_instance.finalize_run(args)
raise e
# close mlflow
script_instance.finalize_run(args)
# return for unit tests
return script_instance
|
microsoft/lightgbm-benchmark | src/common/pipelines.py | """
Replacement-code for Shrike
"""
# pylint: disable=no-member
# NOTE: because it raises 'dict' has no 'outputs' member in dsl.pipeline construction
import os
import sys
import json
import logging
import argparse
# config management
from dataclasses import dataclass
from omegaconf import MISSING
from typing import Any, Optional
import hydra
from hydra.core.config_store import ConfigStore
from omegaconf import DictConfig, OmegaConf
from azureml.core import Workspace
from azureml.pipeline.core import Pipeline
from shrike.pipeline.aml_connect import azureml_connect as shrike_azureml_connect
# when running this script directly, needed to import common
from .paths import COMPONENTS_ROOT, CONFIG_PATH
@dataclass
class aml_connection_config: # pylint: disable=invalid-name
"""AML connection configuration"""
subscription_id: str = MISSING
resource_group: str = MISSING
workspace_name: str = MISSING
tenant: Optional[str] = None
auth: str = "interactive"
force: bool = False
@dataclass
class compute_config: # pylint: disable=invalid-name
"""AML workspace compute targets"""
default_compute_target: str = "cpu-cluster"
linux_cpu: str = MISSING
linux_gpu: str = MISSING
windows_cpu: str = MISSING
@dataclass
class run_config: # pylint: disable=invalid-name
"""Pipeline config for command line parameters"""
regenerate_outputs: bool = False
continue_on_failure: bool = False
submit: bool = False
validate: bool = True
@dataclass
class experiment_config: # pylint: disable=invalid-name
"""Pipeline config for experiment parameters"""
name: str = MISSING
description: Optional[str] = None
display_name: Optional[str] = None
tags: Optional[Any] = None
_GLOBAL_CONFIG = None
def parse_pipeline_config(pipeline_config_dataclass: dataclass, cli_args: list=None):
"""Standard helper function to submit a pipeline to AzureML.
This is a lightweight version of what Shrike does (https://github.com/Azure/shrike).
Args:
pipeline_config_dataclass (dataclass): class for hosting the config of pipeline_func
cli_args (List): command line arguments (if None, use sys.argv)
"""
# create an argument parser just to catch --exp-conf
arg_parser = argparse.ArgumentParser(add_help=False)
arg_parser.add_argument("--exp-config", dest="exp_conf", required=False, default=None)
# all remaining arguments will be passed to hydra
args, unknown_args = arg_parser.parse_known_args(cli_args)
# resolve config_dir and config_name from --exp-conf
if args.exp_conf:
config_abspath = os.path.abspath(args.exp_conf)
config_relpath = os.path.relpath(config_abspath, start=CONFIG_PATH) # relative path from config folder to specified config yaml
config_name = os.path.dirname(config_relpath).replace("\\", "/") + "/" + os.path.basename(config_relpath)
logging.getLogger(__name__).info(f"Using config_name={config_name} and config_dir={CONFIG_PATH}")
else:
config_name = None
# create config with pipeline dataclass
# store it in hydra default
config_store = ConfigStore.instance()
config_dict = {
"aml": aml_connection_config,
"compute": compute_config,
"run": run_config,
"experiment": experiment_config
}
config_dict[pipeline_config_dataclass.__name__] = pipeline_config_dataclass
config_store.store(name="default", node=config_dict)
# override argv with only hydra args (TODO: do better)
sys.argv = (
[sys.argv[0]]
+ unknown_args
+ [
"--config-dir", CONFIG_PATH,
"--config-name", config_name,
]
)
# create a hydra main function to get overrides
@hydra.main(config_name="default")
def hydra_main(cfg : DictConfig) -> None:
global _GLOBAL_CONFIG
cfg = OmegaConf.merge(config_dict, cfg)
#logging.getLogger(__name__).info(OmegaConf.to_yaml(cfg))
_GLOBAL_CONFIG = cfg
# call the hydra main function
hydra_main()
return _GLOBAL_CONFIG.copy()
def azureml_connect(config: DictConfig):
"""Connects to AzureML.
Args:
config (DictConfig): containing aml_config dataclass
Returns:
workspace (azure.ml.core.Workspace)
"""
return shrike_azureml_connect(
aml_subscription_id=config.aml.subscription_id,
aml_resource_group=config.aml.resource_group,
aml_workspace_name=config.aml.workspace_name,
aml_auth=config.aml.auth,
aml_tenant=config.aml.tenant
)
def pipeline_submit(workspace: Workspace,
pipeline_config: DictConfig,
pipeline_instance: Pipeline,
experiment_name: str=None,
experiment_description: str=None,
display_name: str=None,
tags: dict=None):
"""Standard helper function to submit a pipeline to AzureML.
Args:
workspace (azure.ml.core.Workspace): AzureML workspace (see azureml_connect())
pipeline_config (DictConfig): class for hosting the config of pipeline_func
pipeline_instance (Pipeline): pipeline object
experiment_name (str): override config.experiment.name at runtime
experiment_description (str): override config.experiment.description at runtime
display_name (str): override config.experiment.display_name at runtime
tags (dict): override config.experiment.tags at runtime
Returns:
pipeline (azure.ml.core.PipelineRun)
"""
if pipeline_config.run.validate:
pipeline_instance.validate(workspace=workspace)
experiment_description = (experiment_description or pipeline_config.experiment.description)
if experiment_description and len(experiment_description) > 5000:
experiment_description = experiment_description[:5000-50] + "\n<<<TRUNCATED DUE TO SIZE LIMIT>>>"
if pipeline_config.run.submit:
pipeline_run = pipeline_instance.submit(
workspace=workspace,
experiment_name=(experiment_name or pipeline_config.experiment.name),
description=experiment_description,
display_name=(display_name or pipeline_config.experiment.display_name),
tags=(tags or pipeline_config.experiment.tags),
default_compute_target=pipeline_config.compute.default_compute_target,
regenerate_outputs=pipeline_config.run.regenerate_outputs,
continue_on_step_failure=pipeline_config.run.continue_on_failure,
)
logging.info(
f"""
#################################
#################################
#################################
Follow link below to access your pipeline run directly:
-------------------------------------------------------
{pipeline_run.get_portal_url()}
#################################
#################################
#################################
"""
)
return pipeline_run
else:
logging.warning("Pipeline was not submitted, to submit it please add +run.submit=true to your command.")
|
Bill-Park/rpi-iot-gcamp | serial_communication/serial_read.py | <reponame>Bill-Park/rpi-iot-gcamp
import serial
ser = serial.Serial('/dev/ttyACM0', 9600)
while True :
r = ser.read()
print(r)
ser.close()
|
Bill-Park/rpi-iot-gcamp | api_series/dust_2.py | <reponame>Bill-Park/rpi-iot-gcamp
import requests
import json
serviceKey = "<KEY>2F6kdqe2%2F<KEY>yQjLlQH%2B0ZD%2Fg%3D%3D"
sido = "μμΈ"
url = "http://openapi.airkorea.or.kr/openapi/services/rest/ArpltnInforInqireSvc/getCtprvnMesureSidoLIst?sidoName={sido}&searchCondition=DAILY&pageNo=1&numOfRows=10&ServiceKey={key}&_returnType=json".format(sido=sido, key=serviceKey)
headers = {'content-type' : 'application/json;charset=utf-8'}
res = requests.get(url, headers = headers)
#print(res.text)
res_json = json.loads(res.text)
for data in res_json["list"] :
print(data["cityName"], data["pm10Value"], data["pm25Value"])
|
Bill-Park/rpi-iot-gcamp | snowboy/example/Python3/sample1.py | import snowboydecoder
import sys
import signal
import make_sound_file
import os
from naver_api import stt, tts
import weather_api
import serial
interrupted = False
ser = serial.Serial("/dev/ttyACM0", 9600)
led_on_str = "[led on\r\n"
led_off_str = "[led off\r\n"
def test() :
print("hi")
snowboydecoder.play_audio_file()
detector.terminate()
file_name = make_sound_file.main()
return_text = stt.main(file_name)
print(return_text)
if return_text.find("μλ
") != -1 :
file_name = tts.main("μλ
νμΈμ")
os.system("mpg123 {}".format(file_name))
elif return_text.find("λ μ¨") >= 0 :
temp, humi = weather_api.main()
weather_text = "μ¨λλ {}λ, μ΅λλ {}% μ
λλ€.".format(temp, humi)
file_name = tts.main(weather_text)
os.system("mpg123 {}".format(file_name))
elif return_text.find("μ‘°λͺ
") != -1 :
if return_text.find("λ°κ²") != -1 :
ser.write(led_on_str.encode())
elif return_text.find("μ΄λ‘κ²") != -1 :
ser.write(led_off_str.encode())
detector.start(detected_callback=test,
interrupt_check=interrupt_callback,
sleep_time=0.03)
def signal_handler(signal, frame):
global interrupted
interrupted = True
def interrupt_callback():
global interrupted
return interrupted
if len(sys.argv) == 1:
print("Error: need to specify model name")
print("Usage: python demo.py your.model")
sys.exit(-1)
model = sys.argv[1]
# capture SIGINT signal, e.g., Ctrl+C
signal.signal(signal.SIGINT, signal_handler)
detector = snowboydecoder.HotwordDetector(model, sensitivity=0.7)
print('Listening... Press Ctrl+C to exit')
# main loop
detector.start(detected_callback=test,
interrupt_check=interrupt_callback,
sleep_time=0.03)
detector.terminate()
|
Bill-Park/rpi-iot-gcamp | snowboy/example/Python3/make_sound_file.py | <reponame>Bill-Park/rpi-iot-gcamp
import pyaudio
import wave
import audioop
import math
import os
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "output.mp3"
def main(file_name = "output.mp3") :
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("* recording")
frames = []
volume_count = 0
while volume_count < 35 :
data = stream.read(CHUNK)
frames.append(data)
rms = audioop.rms(data, 2)
volume = 20 * math.log10(rms)
print(volume)
if volume < 61 :
volume_count += 1
else :
volume_count = 0
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
return file_name
if __name__ == "__main__" :
file_name = main()
os.system("aplay {}".format(file_name))
|
Bill-Park/rpi-iot-gcamp | api_series/dust_1.py | <reponame>Bill-Park/rpi-iot-gcamp<gh_stars>0
import requests
from xml.etree import ElementTree
serviceKey = "Z9vuWBX1vbycgKOhb8zaQ%2FFAdiYe1IiQTxPL04nRhLPoLakrbl%2F6kdqe2%2F1EpeK1qPlQkdBu2yQjLlQH%2B0ZD%2Fg%3D%3D"
sido = "μμΈ"
url = "http://openapi.airkorea.or.kr/openapi/services/rest/ArpltnInforInqireSvc/getCtprvnMesureSidoLIst?sidoName={sido}&searchCondition=DAILY&pageNo=1&numOfRows=10&ServiceKey={key}".format(sido=sido, key=serviceKey)
headers = {'content-type' : 'application/json;charset=utf-8'}
res = requests.get(url, headers = headers)
tree = ElementTree.fromstring(res.text)
#print(res.text)
for child in tree.iter('item') :
cityName = child.find('cityName').text
pm10 = child.find('pm10Value').text
pm25 = child.find('pm25Value').text
print(cityName, pm10, pm25) |
Bill-Park/rpi-iot-gcamp | snowboy/example/Python3/naver_api/stt.py | <gh_stars>0
import sys
import requests
import json
client_id = "Client ID"
client_secret = "Client Secret"
lang = "Kor" # μΈμ΄ μ½λ ( Kor, Jpn, Eng, Chn )
url = "https://naveropenapi.apigw.ntruss.com/recog/v1/stt?lang=" + lang
def main(file_name) :
data = open(file_name, 'rb')
headers = {
"X-NCP-APIGW-API-KEY-ID": client_id,
"X-NCP-APIGW-API-KEY": client_secret,
"Content-Type": "application/octet-stream"
}
response = requests.post(url, data=data, headers=headers)
rescode = response.status_code
if(rescode == 200):
#print (response.text)
return_text = json.loads(response.text)["text"]
else:
#print("Error : " + response.text)
return_text = "Error: " + response.text
return return_text
if __name__ == "__main__" :
return_text = main("output.mp3")
print(return_text)
|
Bill-Park/rpi-iot-gcamp | snowboy/example/Python3/naver_api/tts.py | import os
import sys
import requests
import json
client_id = "Client ID"
client_secret = "Client Secret"
url = "https://naveropenapi.apigw.ntruss.com/voice/v1/tts"
def main(tts_text, file_name = "input.mp3") :
datas = {
"speaker" : "mijin",
"speed" : "0",
"text" : tts_text
}
headers = {
"X-NCP-APIGW-API-KEY-ID": client_id,
"X-NCP-APIGW-API-KEY": client_secret,
"Content-Type": "application/x-www-form-urlencoded"
}
response = requests.post(url, data=datas, headers=headers)
with open(file_name, 'wb') as f:
f.write(response.content)
return file_name
if __name__ == "__main__" :
file_name = main("μλ
νμΈμ Gcamp")
os.system("mpg123 {}".format(file_name))
|
Bill-Park/rpi-iot-gcamp | serial_communication/serial_send.py | import serial
import time
ser = serial.Serial("/dev/ttyACM0", 9600)
sample_str = "[led on\r\n"
time.sleep(3)
ser.write(sample_str.encode())
ser.close()
|
Bill-Park/rpi-iot-gcamp | api_series/weather_api.py | import requests
import json
api_key = "dd40631e0b2c53fdc84e1dffd1da95f6"
url = "http://api.openweathermap.org/data/2.5/weather?q=Seoul&appid={key}".format(key=api_key)
def main() :
res = requests.get(url)
res_json = json.loads(res.text)
temperature = round(res_json["main"]["temp"] - 273, 1)
humidity = round(res_json["main"]["humidity"])
return temperature, humidity
if __name__ == "__main__" :
temp, humi = main()
print(temp)
print(humi)
|
justinzzy-code/FlappyBird_AI | ai_creation.py | <filename>ai_creation.py
from setup import *
COLUMNWIDTH,OPENING_SIZE = 50, 300
import random
from random import randint as rng
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from tensorflow.keras.optimizers import Adam
import os
import shelve
SHUTDOWN = None
class Env():
def __init__(self):
self.bird=Bird()
self.column1 = Column(SCREENWIDTH,COLUMNWIDTH,OPENING_SIZE)
self.column2 = Column(SCREENWIDTH*3//2 + COLUMNWIDTH//2, COLUMNWIDTH,OPENING_SIZE)
self.reward = 0
self.count=0
self.done=False
def reset(self):
self.__init__()
return self.get_state()
def get_state(self):
state = tuple()
state += self.bird.get_position()[:2]
state += (self.bird.vel,)
pos1 = self.column1.get_position()[:2]
pos2 = self.column2.get_position()[:2]
state += pos1+pos2
state = list(state)
return state
def bird_movement(self):
if self.bird.action == 1: # jump
self.bird.move(True)
else: # not jumping
self.bird.move()
if self.column1.score(self.bird) or self.column2.score(self.bird):
self.reward += 1
self.count += 1
if self.column1.collide(self.bird) or self.column2.collide(self.bird):
self.bird.kill()
if self.bird.y+self.bird.h > SCREENWIDTH:
self.bird.kill()
if not self.bird.alive: #bird is dead, give negative reward
self.reward = -100
else: #bird is alive
self.reward += 0.1
def runframe(self, action1):
self.done = False
self.bird.action = action1
self.column1.move()
self.column2.move()
self.bird_movement()
state = self.get_state()
if not self.bird.alive:
self.done=True
if self.count>60:
self.done=True
return state, self.reward, self.done
def render(self):
global SHUTDOWN
for event in pygame.event.get():
if event.type == pygame.QUIT:
SHUTDOWN = True
window.fill((255,255,255))
for item in [self.bird, self.column1, self.column2]:
item.draw(window)
pygame.display.update()
env = Env()
state_size = 7 #env.observation_space.shape[0]
action_size = 2 #env.action_space.n
batch_size=64
n_episodes = 1000
output_dir = 'data/'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
class DQNAgent:
def __init__(self,state_size,action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=100000)
self.gamma = 0.95 #decrease by 0.95 each time
self.epsilon = 1.0 #exploitation=0 vs exploration=1
self.epsilon_decay = 0.995 #less and less each time
self.epsilon_min = 0.001 #0.1% exploration
self.learning_rate = 0.001
self.model = self._build_model()
def _build_model(self):
model=Sequential()
model.add(Dense(24,input_dim = self.state_size, activation = 'relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(loss='mse',optimizer=Adam(learning_rate=self.learning_rate))
return model
def remember(self,state,action,reward,next_state,done):
self.memory.append((state,action,reward,next_state,done))
def act(self,state):
if np.random.rand()<=self.epsilon:
return int(np.random.rand() < 0.055)
#return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0])
def replay(self,batch_size):
minibatch=random.sample(self.memory,batch_size)
for state,action,reward,next_state,done in minibatch:
target=reward
if not done:
target = reward + self.gamma * np.amax(self.model.predict(next_state)[0])
target_f = self.model.predict(state)
target_f[0][action]=target
self.model.fit(state,target_f,epochs=1,verbose=0)
if self.epsilon>self.epsilon_min:
self.epsilon*=self.epsilon_decay
def save(self,name):
self.model.save_weights(name)
def load(self,name):
self.model.load_weights(name)
def load_all(self, episode_num):
self.load_reset('{}agent_{:05d}.hdf5'.format(output_dir,episode_num))
self.load_memory(episode_num)
def load_reset(self, filename):
self.load(filename)
self.epsilon = 0.0
def load_memory(self, episode_num):
with shelve.open(f'{output_dir}memory.pickle') as file:
self.memory = file.get(f'{episode_num}') or self.memory
agent = DQNAgent(state_size,action_size)
episode = 0
count = 0
if __name__ == '__main__':
while True:
if SHUTDOWN:
break
episode+=1
# Learning Loop
state=env.reset()
state=np.reshape(state,[1, state_size])
for t in range(5000):
if window is not None:
env.render()
if SHUTDOWN:
break
action = agent.act(state)
next_state, reward, done = env.runframe(action)
next_state=np.reshape(next_state,[1, state_size])
agent.remember(state,action,reward,next_state,done)
state=next_state
if done:
if episode%10 ==1 or env.count>5:
print('episode: {}/{},\ttime: {},\tscore: {},\tepsilon: {:.2}'.format( episode,n_episodes,t,env.count,agent.epsilon))
break
# Learning Algorithm in Replay, updates agent.model
if len(agent.memory)>batch_size:
agent.replay(batch_size)
#To save file every 100 episodes into hdf5
if episode%100==0 and episode>100:
print(episode,'epsilon:',round(agent.epsilon,3))
agent.save('{}agent_{:05d}.hdf5'.format(output_dir,episode))
with shelve.open(f'{output_dir}memory.pickle') as file:
file[f'{episode}'] = agent.memory
#To save best file
count+= 1 if env.count>=59 else -1 if count>20 else -count
if env.count >20:
agent.save('{}agent_{:05d}.hdf5'.format(output_dir,episode))
if count>60:
agent.save('best{}.hdf5'.format(n))
break
if RENDERING:
pygame.quit()
|
justinzzy-code/FlappyBird_AI | setup_load.py | <reponame>justinzzy-code/FlappyBird_AI<filename>setup_load.py
import random
import math
import time
import copy
from random import randint as rng, shuffle
# GLOBALS
BIRD_X = 300
BIRD_Y = 100
BIRD_WIDTH = 15
BIRD_HEIGHT = 15
GRAVITY = 1.2
JUMP_HEIGHT = 12
BIRD_MAX_VEL = 20
COLUMNWIDTH,OPENING_SIZE = 50,200
VELOCITY_X = 8
# PYGAME
SCREENWIDTH = 600 # pixels
SCREENHEIGHT = 600 # pixels
CAPTION = "Flappy Bird"
RENDERING = True
if RENDERING:
import pygame
pygame.init()
window = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
pygame.display.set_caption(CAPTION)
CLOCK = pygame.time.Clock()
FRAMERATE = 10
DEFAULT_FONT = pygame.font.SysFont("arial",30)
START_TEXT = DEFAULT_FONT.render("PRESS SPACE TO START", True, (0,0,0))
else:
window = None
class Rect:
def __init__(self, x,y,w,h):
self.x=x
self.y=y
self.w=w
self.h=h
if RENDERING:
self.color = (random.randint(0,255),
random.randint(0,255),
random.randint(0,255))
def move(self, x=0, y=0):
self.x+=x
self.y+=y
def colliderect(self, rect):
left = max(self.x, rect.x)
top = max(self.y, rect.y)
right = min(self.x + self.w, rect.x + rect.w)
bot = min(self.y + self.h, rect.y + rect.h)
if right-left > 0 and bot-top > 0:
return ((left-right) * (top-bot)) > 0
return False
def collidepoint(self, first=None, second=None):
if second:
x=first
y=second
else:
try:
x,y = first
except Exception as e:
print(e)
return self.x <= x <= self.x+self.w and self.y <= y <= self.y+self.h
def get_position(self):
return (int(self.x), int(self.y), self.w, self.h)
def draw(self, win):
if not RENDERING:
return
pygame.draw.rect(win, self.color, self.get_position())
class Game_Object(Rect):
pass
class Bird(Game_Object):
def __init__(self,x=BIRD_X,y=BIRD_Y,
vel=0,
max_vel = BIRD_MAX_VEL,
jump_height = JUMP_HEIGHT,
gravity=GRAVITY,):
self.x = x
self.y = y
self.w = BIRD_WIDTH
self.h = BIRD_HEIGHT
self.vel = vel
self.max_vel = max_vel
self.jump_height = jump_height
self.gravity = gravity
self.action = 0
self.alive=True
if RENDERING:
self.color = (random.randint(0,255),
random.randint(0,255),
random.randint(0,255))
def move(self, jumping = False):
if self.alive:
if jumping:
if self.vel < -self.jump_height/3: #double jump
self.vel = -self.jump_height * 1.3
else: #single jump
self.vel = -self.jump_height
self.y += self.vel
if self.y<=0:
self.vel=0
self.y = max(self.y, 0)
self.y = min(SCREENWIDTH-self.h, self.y)
self.vel += self.gravity
self.vel = min(self.vel,self.max_vel)
else:
self.x -= VELOCITY_X
self.y += self.vel
self.y = max(self.y, 0)
self.y = min(SCREENWIDTH-self.h, self.y)
self.vel += self.gravity
self.vel = min(self.vel,self.max_vel)
def kill(self):
if self.alive:
self.vel = -self.jump_height
self.alive=False
class Wall(Game_Object):
def __init__(self, x,y,w,h):
super().__init__(x,y,w,h)
def collide(self, bird):
left = max(self.x, bird.x);
top = max(self.y, bird.y);
right = min(self.x + self.w, bird.x + bird.w);
bot = min(self.y + self.h, bird.y + bird.h);
if right-left > 0 and bot-top > 0:
return ((left-right) * (top-bot)) > 0
return False
def move(self, vel):
self.x-=vel
class Scorebox(Game_Object):
def __init__(self, x,y,w,h):
super().__init__(x,y,w,h)
self.score = False
def move(self, vel):
self.x-=vel
def collide(self, bird):
if not self.score:
left = max(self.x, bird.x);
top = max(self.y, bird.y);
right = min(self.x + self.w, bird.x + bird.w);
bot = min(self.y + self.h, bird.y + bird.h);
if right-left > 0 and bot-top > 0:
self.score = ((left-right) * (top-bot)) > 0
return self.score
return False
return False
class Column: #group 2 walls together
def __init__(self, x, w, opening_size):
self.x = x
self.w = w
self.opening_size = opening_size
self.set_opening()
def set_opening(self):
self.opening_top = random.randint(50,SCREENHEIGHT-50-self.opening_size)
self.top_wall = Wall(self.x, 0, self.w, self.opening_top)
self.bot_wall = Wall(self.x, self.opening_top+self.opening_size, self.w, SCREENHEIGHT-self.opening_top-self.opening_size)
self.opening = Scorebox(self.x, self.opening_top, self.w, self.opening_size)
def get_position(self):
return self.opening.get_position()
def move(self, vel=VELOCITY_X):
self.x -= vel
self.top_wall.move(vel)
self.bot_wall.move(vel)
self.opening.move(vel)
if self.x < -self.w:
self.x += SCREENWIDTH+COLUMNWIDTH
self.set_opening()
def collide(self,bird):
top = self.top_wall.collide(bird)
bot = self.bot_wall.collide(bird)
return top or bot
def score(self,bird):
return self.opening.collide(bird)
def draw(self,win):
self.top_wall.draw(win)
self.bot_wall.draw(win)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.