signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def custom(self, command):
self.command.append(command)<EOL>return self<EOL>
Run arbitrary SoX effect commands. Examples: custom('echo 0.8 0.9 1000 0.3') for an echo effect. References: - https://linux.die.net/man/1/soxexam - http://sox.sourceforge.net/sox.html - http://tldp.org/LDP/LG/issue73/chung.html - http://dsl.org/cookbook/cookbook_29.html
f7577:c0:m36
def buy_product(self, product_pk):
if self.invoice_sales.filter(line_invoice_sales__line_order__product__pk=product_pk).exists()or self.ticket_sales.filter(line_ticket_sales__line_order__product__pk=product_pk).exists():<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>
determina si el customer ha comprado un producto
f7606:c1:m5
@staticmethod<EOL><INDENT>def create_document_from_another(pk, list_lines,<EOL>MODEL_SOURCE, MODEL_FINAL,<EOL>url_reverse,<EOL>msg_error_relation, msg_error_not_found, unique):<DEDENT>
context = {}<EOL>obj_src = MODEL_SOURCE.objects.filter(pk=pk).first()<EOL>if list_lines and obj_src:<EOL><INDENT>list_lines = [int(x) for x in list_lines]<EOL>if unique:<EOL><INDENT>create = not SalesLines.objects.filter(**{"<STR_LIT>": list_lines}).exists()<EOL><DEDENT>else:<EOL><INDENT>create = True<EOL><DEDENT>"""<STR_LIT>"""<EOL>if create:<EOL><INDENT>with transaction.atomic():<EOL><INDENT>obj_final = MODEL_FINAL()<EOL>obj_final.customer = obj_src.customer<EOL>obj_final.date = datetime.datetime.now()<EOL>obj_final.billing_series = obj_src.billing_series<EOL>if isinstance(obj_src, SalesBasket):<EOL><INDENT>field_src = '<STR_LIT>'<EOL><DEDENT>elif isinstance(obj_src, SalesOrder) or isinstance(obj_src, SalesAlbaran):<EOL><INDENT>field_src = '<STR_LIT>'<EOL><DEDENT>elif isinstance(obj_src, SalesTicket) or isinstance(obj_src, SalesTicketRectification):<EOL><INDENT>field_src = '<STR_LIT>'<EOL><DEDENT>elif isinstance(obj_src, SalesInvoice) or isinstance(obj_src, SalesInvoiceRectification):<EOL><INDENT>field_src = '<STR_LIT>'<EOL><DEDENT>complete = True<EOL>if isinstance(obj_final, SalesOrder):<EOL><INDENT>obj_final.budget = obj_src<EOL>field_final = '<STR_LIT>'<EOL><DEDENT>elif isinstance(obj_final, SalesAlbaran):<EOL><INDENT>field_final = '<STR_LIT>'<EOL>complete = False<EOL><DEDENT>elif isinstance(obj_final, SalesTicket):<EOL><INDENT>field_final = '<STR_LIT>'<EOL><DEDENT>elif isinstance(obj_final, SalesTicketRectification):<EOL><INDENT>field_final = '<STR_LIT>'<EOL>complete = False<EOL><DEDENT>elif isinstance(obj_final, SalesInvoice):<EOL><INDENT>field_final = '<STR_LIT>'<EOL><DEDENT>elif isinstance(obj_final, SalesInvoiceRectification):<EOL><INDENT>field_final = '<STR_LIT>'<EOL>complete = False<EOL><DEDENT>obj_final.save()<EOL>for line in SalesLines.objects.filter(**{'<STR_LIT>': list_lines, '<STR_LIT>'.format(field): True}):<EOL><INDENT>setattr(line, field, obj_final)<EOL>if complete:<EOL><INDENT>setattr(line, '<STR_LIT>'.format(field_final), '<STR_LIT>'.format(field_src))<EOL>setattr(line, '<STR_LIT>'.format(field_final), '<STR_LIT>'.format(field_src))<EOL>setattr(line, '<STR_LIT>'.format(field_final), '<STR_LIT>'.format(field_src))<EOL>setattr(line, '<STR_LIT>'.format(field_final), '<STR_LIT>'.format(field_src))<EOL>setattr(line, '<STR_LIT>'.format(field_final), '<STR_LIT>'.format(field_src))<EOL>setattr(line, '<STR_LIT>'.format(field_final), '<STR_LIT>'.format(field_src))<EOL><DEDENT>setattr(line, '<STR_LIT>'.format(field_final), '<STR_LIT>'.format(field_src))<EOL>line.save()<EOL>"""<STR_LIT>"""<EOL><DEDENT>obj_src.lock = True<EOL>obj_src.save()<EOL>context['<STR_LIT:url>'] = "<STR_LIT>".format(reverse(url_reverse), obj_final.pk)<EOL>context['<STR_LIT>'] = obj_final<EOL><DEDENT><DEDENT>else:<EOL><INDENT>context['<STR_LIT:error>'] = msg_error_relation<EOL><DEDENT><DEDENT>else:<EOL><INDENT>context['<STR_LIT:error>'] = msg_error_not_found<EOL><DEDENT>return context<EOL>
pk: pk del documento origen list_lines: listado de pk de lineas de origen MODEL_SOURCE: modelo del documento origen MODEL_FINAL: model del documento final url_reverse: url del destino msg_error_relation: Mensaje de error indicando que las lineas ya están relacionadas msg_error_not_found: Mensaje de error indicando que no se encuentra el objeto origen unique: (True/False) Indica si puede haber más de una linea asociada a otras lineas
f7606:c20:m0
@staticmethod<EOL><INDENT>def create_albaran_automatic(pk, list_lines):<DEDENT>
lines = SalesLines.objects.filter(pk__in=list_lines).exclude(albaran__isnull=False).values_list('<STR_LIT>')<EOL>lines_to_albaran = [x[<NUM_LIT:0>] for x in lines]<EOL>SalesLines.create_albaran_from_order(pk, lines_to_albaran)<EOL>"""<STR_LIT>"""<EOL>
creamos de forma automatica el albaran
f7606:c20:m3
@staticmethod<EOL><INDENT>def find(pos, user):<DEDENT>
<EOL>ck = dateparse.parse_time(getattr(settings, "<STR_LIT>", '<STR_LIT>'))<EOL>year = timezone.now().year<EOL>month = timezone.now().month<EOL>day = timezone.now().day<EOL>hour = ck.hour<EOL>minute = ck.minute<EOL>second = ck.second<EOL>checkpoint = timezone.datetime(year, month, day, hour, minute, second)<EOL>cashdiary = CashDiary.objects.filter(pos=pos, opened_date__gte=checkpoint).order_by("<STR_LIT>").first()<EOL>if not cashdiary:<EOL><INDENT>oldercashdiary = CashDiary.objects.filter(pos=pos, opened_date__lt=checkpoint).order_by("<STR_LIT>").first()<EOL>if oldercashdiary:<EOL><INDENT>if oldercashdiary.closed_user:<EOL><INDENT>cashdiary = None<EOL><DEDENT>else:<EOL><INDENT>amount_cash = oldercashdiary.amount_cash()<EOL>amount_cards = oldercashdiary.amount_cards()<EOL>oldercashdiary.closed_cash = amount_cash<EOL>oldercashdiary.closed_cards = amount_cards<EOL>oldercashdiary.closed_user = user<EOL>oldercashdiary.closed_date = timezone.now()<EOL>oldercashdiary.save()<EOL>cashdiary = CashDiary()<EOL>cashdiary.pos = pos<EOL>cashdiary.opened_cash = amount_cash<EOL>cashdiary.opened_cards = amount_cards<EOL>cashdiary.opened_user = user<EOL>cashdiary.opened_date = timezone.now()<EOL>cashdiary.save()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>cashdiary = CashDiary()<EOL>cashdiary.pos = pos<EOL>cashdiary.opened_cash = Decimal('<STR_LIT:0>')<EOL>cashdiary.opened_cards = Decimal('<STR_LIT:0>')<EOL>cashdiary.opened_user = user<EOL>cashdiary.opened_date = timezone.now()<EOL>cashdiary.save()<EOL><DEDENT><DEDENT>return cashdiary<EOL>
Get a valid CashDiary for today from the given POS, it will return: - None: if no CashDiary is available today and older one was already closed - New CashDiary: if no CashDiary is available today but there is an older one which it was opened - Existing CashDiary: if a CashDiary is available today (open or close)
f7610:c0:m2
def buy_product(self, product_pk):
if self.invoice_sales.filter(lines_sales__product_final__pk=product_pk).exists()or self.ticket_sales.filter(lines_sales__product_final__pk=product_pk).exists():<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>
determina si el customer ha comprado un producto
f7612:c1:m5
@staticmethod<EOL><INDENT>def create_document_from_another(pk, list_lines,<EOL>MODEL_SOURCE, MODEL_FINAL,<EOL>url_reverse,<EOL>msg_error_relation, msg_error_not_found, msg_error_line_not_found,<EOL>unique):<DEDENT>
context = {}<EOL>obj_src = MODEL_SOURCE.objects.filter(pk=pk).first()<EOL>if list_lines and obj_src:<EOL><INDENT>list_lines = [int(x) for x in list_lines]<EOL>obj_final = MODEL_FINAL()<EOL>complete = True<EOL>field_final_tax = None<EOL>if isinstance(obj_final, SalesOrder):<EOL><INDENT>obj_final.budget = obj_src<EOL>field_final = '<STR_LIT>'<EOL>field_final_tax = '<STR_LIT>'<EOL><DEDENT>elif isinstance(obj_final, SalesAlbaran):<EOL><INDENT>field_final = '<STR_LIT>'<EOL>field_final_tax = '<STR_LIT>'<EOL>complete = False<EOL><DEDENT>elif isinstance(obj_final, SalesTicket):<EOL><INDENT>field_final = '<STR_LIT>'<EOL>field_final_tax = '<STR_LIT>'<EOL><DEDENT>elif isinstance(obj_final, SalesTicketRectification):<EOL><INDENT>field_final = '<STR_LIT>'<EOL>complete = False<EOL><DEDENT>elif isinstance(obj_final, SalesInvoice):<EOL><INDENT>field_final = '<STR_LIT>'<EOL>field_final_tax = '<STR_LIT>'<EOL><DEDENT>elif isinstance(obj_final, SalesInvoiceRectification):<EOL><INDENT>field_final = '<STR_LIT>'<EOL>complete = False<EOL><DEDENT>if unique:<EOL><INDENT>create = not SalesLines.objects.filter(**{<EOL>"<STR_LIT>": list_lines,<EOL>"<STR_LIT>".format(field_final): False<EOL>}).exists()<EOL><DEDENT>else:<EOL><INDENT>create = True<EOL><DEDENT>"""<STR_LIT>"""<EOL>if create:<EOL><INDENT>with transaction.atomic():<EOL><INDENT>if hasattr(obj_src, '<STR_LIT>'):<EOL><INDENT>customer = obj_src.customer<EOL><DEDENT>else:<EOL><INDENT>customer = obj_src.lines_sales.filter(removed=False).first().order.customer<EOL><DEDENT>obj_final.customer = customer<EOL>obj_final.date = datetime.datetime.now()<EOL>obj_final.billing_series = obj_src.billing_series<EOL>field_src_tax = None<EOL>if isinstance(obj_src, SalesBasket):<EOL><INDENT>field_src = '<STR_LIT>'<EOL>field_src_tax = '<STR_LIT>'<EOL><DEDENT>elif isinstance(obj_src, SalesOrder) or isinstance(obj_src, SalesAlbaran):<EOL><INDENT>field_src = '<STR_LIT>'<EOL>field_src_tax = '<STR_LIT>'<EOL><DEDENT>elif isinstance(obj_src, SalesTicket) or isinstance(obj_src, SalesTicketRectification):<EOL><INDENT>field_src = '<STR_LIT>'<EOL>field_src_tax = '<STR_LIT>'<EOL><DEDENT>elif isinstance(obj_src, SalesInvoice) or isinstance(obj_src, SalesInvoiceRectification):<EOL><INDENT>field_src = '<STR_LIT>'<EOL>field_src_tax = '<STR_LIT>'<EOL><DEDENT>obj_final.save()<EOL>qs = SalesLines.objects.filter(**{'<STR_LIT>': list_lines, '<STR_LIT>'.format(field_final): True})<EOL>if qs:<EOL><INDENT>for line in qs:<EOL><INDENT>setattr(line, field_final, obj_final)<EOL>if complete:<EOL><INDENT>setattr(line, '<STR_LIT>'.format(field_final), getattr(line, '<STR_LIT>'.format(field_src)))<EOL>setattr(line, '<STR_LIT>'.format(field_final), getattr(line, '<STR_LIT>'.format(field_src)))<EOL>setattr(line, '<STR_LIT>'.format(field_final), getattr(line, '<STR_LIT>'.format(field_src)))<EOL>setattr(line, '<STR_LIT>'.format(field_final), getattr(line, '<STR_LIT>'.format(field_src)))<EOL>setattr(line, '<STR_LIT>'.format(field_final), getattr(line, '<STR_LIT>'.format(field_src)))<EOL>setattr(line, '<STR_LIT>'.format(field_final), getattr(line, '<STR_LIT>'.format(field_src)))<EOL><DEDENT>if field_src_tax and field_final_tax:<EOL><INDENT>setattr(line, '<STR_LIT:{}>'.format(field_final_tax), getattr(line, '<STR_LIT:{}>'.format(field_src_tax)))<EOL><DEDENT>setattr(line, '<STR_LIT>'.format(field_final), getattr(line, '<STR_LIT>'.format(field_src)))<EOL>line.save()<EOL>"""<STR_LIT>"""<EOL><DEDENT>obj_src.lock = True<EOL>obj_src.save()<EOL>context['<STR_LIT:url>'] = "<STR_LIT>".format(reverse(url_reverse), obj_final.pk)<EOL>context['<STR_LIT>'] = obj_final<EOL><DEDENT>else:<EOL><INDENT>context['<STR_LIT:error>'] = msg_error_relation<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>context['<STR_LIT:error>'] = msg_error_relation<EOL><DEDENT><DEDENT>else:<EOL><INDENT>context['<STR_LIT:error>'] = msg_error_not_found<EOL><DEDENT>return context<EOL>
pk: pk del documento origen list_lines: listado de pk de lineas de origen MODEL_SOURCE: modelo del documento origen MODEL_FINAL: model del documento final url_reverse: url del destino msg_error_relation: Mensaje de error indicando que las lineas ya están relacionadas msg_error_not_found: Mensaje de error indicando que no se encuentra el objeto origen unique: (True/False) Indica si puede haber más de una linea asociada a otras lineas
f7612:c19:m13
@staticmethod <EOL><INDENT>def create_albaran_automatic(pk, list_lines):<DEDENT>
lines = SalesLines.objects.filter(pk__in=list_lines, removed=False).exclude(albaran__isnull=False).values_list('<STR_LIT>')<EOL>lines_to_albaran = [x[<NUM_LIT:0>] for x in lines]<EOL>SalesLines.create_albaran_from_order(pk, lines_to_albaran)<EOL>
creamos de forma automatica el albaran
f7612:c19:m16
def get(self, request, *args, **kwargs):
cart = ShoppingCartProxy(request)<EOL>return JsonResponse(cart.get_products(onlypublic=request.GET.get('<STR_LIT>', True)))<EOL>
List all products in the shopping cart
f7617:c132:m0
def post(self, request, *args, **kwargs):
POST = json.loads(request.body.decode('<STR_LIT:utf-8>'))<EOL>if '<STR_LIT>' in POST and '<STR_LIT>' in POST:<EOL><INDENT>cart = ShoppingCartProxy(request)<EOL>cart.add(<EOL>product_pk=int(POST['<STR_LIT>']),<EOL>quantity=int(POST['<STR_LIT>'])<EOL>)<EOL>return JsonResponse(cart.products)<EOL><DEDENT>return HttpResponseBadRequest()<EOL>
Adds new product to the current shopping cart
f7617:c132:m1
def buy_product(self, product_pk):
if self.invoice_sales.filter(line_invoice_sales__line_order__product__pk=product_pk).exists()or self.ticket_sales.filter(line_ticket_sales__line_order__product__pk=product_pk).exists():<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>
determina si el customer ha comprado un producto
f7625:c1:m5
@staticmethod<EOL><INDENT>def create_document_from_another(pk, list_lines,<EOL>MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,<EOL>url_reverse, related_line, related_object,<EOL>msg_error_relation, msg_error_not_found, unique):<DEDENT>
context = {}<EOL>obj_src = MODEL_SOURCE.objects.filter(pk=pk).first()<EOL>if list_lines and obj_src:<EOL><INDENT>list_lines = [int(x) for x in list_lines]<EOL>if unique:<EOL><INDENT>create = not MODEL_LINE_FINAL.objects.filter(**{"<STR_LIT>".format(related_line): list_lines}).exists()<EOL><DEDENT>else:<EOL><INDENT>create = True<EOL><DEDENT>"""<STR_LIT>"""<EOL>if create:<EOL><INDENT>with transaction.atomic():<EOL><INDENT>obj_final = MODEL_FINAL()<EOL>obj_final.customer = obj_src.customer<EOL>obj_final.date = datetime.datetime.now()<EOL>obj_final.billing_series = obj_src.billing_series<EOL>if isinstance(obj_final, SalesOrder):<EOL><INDENT>obj_final.budget = obj_src<EOL><DEDENT>obj_final.save()<EOL>for lb_pk in list_lines:<EOL><INDENT>line_src = MODEL_LINE_SOURCE.objects.filter(pk=lb_pk).first()<EOL>if line_src:<EOL><INDENT>line_final = MODEL_LINE_FINAL(**{"<STR_LIT>".format(related_object): obj_final.pk, related_line: line_src})<EOL>src_list_fields = [f.name for f in line_src._meta.get_fields()]<EOL>dst_list_fields = [f.name for f in line_final._meta.get_fields()]<EOL>if '<STR_LIT>' in src_list_fields and '<STR_LIT>' in dst_list_fields:<EOL><INDENT>line_final.product = line_src.product<EOL><DEDENT>if '<STR_LIT:description>' in src_list_fields and '<STR_LIT:description>' in dst_list_fields:<EOL><INDENT>line_final.description = line_src.description<EOL><DEDENT>if '<STR_LIT:code>' in src_list_fields and '<STR_LIT:code>' in dst_list_fields:<EOL><INDENT>line_final.code = line_src.code<EOL><DEDENT>if '<STR_LIT>' in src_list_fields and '<STR_LIT>' in dst_list_fields:<EOL><INDENT>line_final.line_order = line_src.line_order<EOL><DEDENT>line_final.quantity = line_src.quantity<EOL>line_final.price_base = line_src.price_base<EOL>if '<STR_LIT>' in src_list_fields and '<STR_LIT>' in dst_list_fields:<EOL><INDENT>line_final.price_recommended = line_src.price_recommended<EOL><DEDENT>line_final.tax = line_src.tax<EOL>line_final.discount = line_src.discount<EOL>if '<STR_LIT>' in src_list_fields and '<STR_LIT>' in dst_list_fields:<EOL><INDENT>line_final.removed = line_src.removed<EOL><DEDENT>line_final.save()<EOL>if hasattr(line_src, '<STR_LIT>') and line_src.line_basket_option_sales.exists():<EOL><INDENT>for opt_src in line_src.line_basket_option_sales.all():<EOL><INDENT>opt_dst = SalesLineOrderOption()<EOL>opt_dst.line_order = line_final<EOL>opt_dst.product_option = opt_src.product_option<EOL>opt_dst.product_final = opt_src.product_final<EOL>opt_dst.quantity = opt_src.quantity<EOL>opt_dst.save()<EOL><DEDENT><DEDENT><DEDENT><DEDENT>obj_src.lock = True<EOL>obj_src.save()<EOL>context['<STR_LIT:url>'] = "<STR_LIT>".format(reverse(url_reverse), obj_final.pk)<EOL>context['<STR_LIT>'] = obj_final<EOL><DEDENT><DEDENT>else:<EOL><INDENT>context['<STR_LIT:error>'] = msg_error_relation<EOL><DEDENT><DEDENT>else:<EOL><INDENT>context['<STR_LIT:error>'] = msg_error_not_found<EOL><DEDENT>return context<EOL>
pk: pk del documento origen list_lines: listado de pk de lineas de origen MODEL_SOURCE: modelo del documento origen MODEL_FINAL: model del documento final MODEL_LINE_SOURCE: modelo de la linea origen MODEL_LINE_FINAL: modelo de la linea final url_reverse: url del destino related_line: campo del modelo linea final en el que irá asignada la linea origen related_object: campo del modelo linea final en el que irá asignado el objeto final msg_error_relation: Mensaje de error indicando que las lineas ya están relacionadas msg_error_not_found: Mensaje de error indicando que no se encuentra el objeto origen unique: (True/False) Indica si puede haber más de una linea asociada a otras lineas
f7625:c11:m7
@staticmethod<EOL><INDENT>def create_albaran_automatic(pk, list_lines):<DEDENT>
line_bd = SalesLineAlbaran.objects.filter(line_order__pk__in=list_lines).values_list('<STR_LIT>')<EOL>if line_bd.count() == <NUM_LIT:0> or len(list_lines) != len(line_bd[<NUM_LIT:0>]):<EOL><INDENT>if line_bd.count() != <NUM_LIT:0>:<EOL><INDENT>for x in line_bd[<NUM_LIT:0>]:<EOL><INDENT>list_lines.pop(list_lines.index(x))<EOL><DEDENT><DEDENT>GenLineProduct.create_albaran_from_order(pk, list_lines)<EOL><DEDENT>
creamos de forma automatica el albaran
f7625:c11:m10
@staticmethod<EOL><INDENT>def create_ticket_from_albaran(pk, list_lines):<DEDENT>
context = {}<EOL>if list_lines:<EOL><INDENT>new_list_lines = [x[<NUM_LIT:0>] for x in SalesLineAlbaran.objects.values_list('<STR_LIT>').filter(<EOL>pk__in=[int(x) for x in list_lines]<EOL>).exclude(invoiced=True)]<EOL>if new_list_lines:<EOL><INDENT>lo = SalesLineOrder.objects.values_list('<STR_LIT>').filter(pk__in=new_list_lines)[:<NUM_LIT:1>]<EOL>if lo and lo[<NUM_LIT:0>] and lo[<NUM_LIT:0>][<NUM_LIT:0>]:<EOL><INDENT>new_pk = lo[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>context = GenLineProduct.create_ticket_from_order(new_pk, new_list_lines)<EOL>if '<STR_LIT:error>' not in context or not context['<STR_LIT:error>']:<EOL><INDENT>SalesLineAlbaran.objects.filter(<EOL>pk__in=[int(x) for x in list_lines]<EOL>).exclude(invoiced=True).update(invoiced=True)<EOL><DEDENT>return context<EOL><DEDENT>else:<EOL><INDENT>error = _('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>error = _('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>error = _('<STR_LIT>')<EOL><DEDENT>context['<STR_LIT:error>'] = error<EOL>return context<EOL>
la pk y list_lines son de albaranes, necesitamos la info de las lineas de pedidos
f7625:c11:m15
@staticmethod<EOL><INDENT>def create_invoice_from_albaran(pk, list_lines):<DEDENT>
context = {}<EOL>if list_lines:<EOL><INDENT>new_list_lines = [x[<NUM_LIT:0>] for x in SalesLineAlbaran.objects.values_list('<STR_LIT>').filter(<EOL>pk__in=[int(x) for x in list_lines]<EOL>).exclude(invoiced=True)]<EOL>if new_list_lines:<EOL><INDENT>lo = SalesLineOrder.objects.values_list('<STR_LIT>').filter(pk__in=new_list_lines)[:<NUM_LIT:1>]<EOL>if lo and lo[<NUM_LIT:0>] and lo[<NUM_LIT:0>][<NUM_LIT:0>]:<EOL><INDENT>new_pk = lo[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>context = GenLineProduct.create_invoice_from_order(new_pk, new_list_lines)<EOL>if '<STR_LIT:error>' not in context or not context['<STR_LIT:error>']:<EOL><INDENT>SalesLineAlbaran.objects.filter(<EOL>pk__in=[int(x) for x in list_lines]<EOL>).exclude(invoiced=True).update(invoiced=True)<EOL><DEDENT>return context<EOL><DEDENT>else:<EOL><INDENT>error = _('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>error = _('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>error = _('<STR_LIT>')<EOL><DEDENT>context['<STR_LIT:error>'] = error<EOL>return context<EOL>
la pk y list_lines son de albaranes, necesitamos la info de las lineas de pedidos
f7625:c11:m16
@staticmethod<EOL><INDENT>def create_invoice_from_ticket(pk, list_lines):<DEDENT>
context = {}<EOL>if list_lines:<EOL><INDENT>new_list_lines = [x[<NUM_LIT:0>] for x in SalesLineTicket.objects.values_list('<STR_LIT>').filter(pk__in=[int(x) for x in list_lines])]<EOL>if new_list_lines:<EOL><INDENT>lo = SalesLineOrder.objects.values_list('<STR_LIT>').filter(pk__in=new_list_lines)[:<NUM_LIT:1>]<EOL>if lo and lo[<NUM_LIT:0>] and lo[<NUM_LIT:0>][<NUM_LIT:0>]:<EOL><INDENT>new_pk = lo[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>return GenLineProduct.create_invoice_from_order(new_pk, new_list_lines)<EOL><DEDENT>else:<EOL><INDENT>error = _('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>error = _('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>error = _('<STR_LIT>')<EOL><DEDENT>context['<STR_LIT:error>'] = error<EOL>return context<EOL>
la pk y list_lines son de ticket, necesitamos la info de las lineas de pedidos
f7625:c11:m17
def set_options(self, options):
with transaction.atomic():<EOL><INDENT>for option in options:<EOL><INDENT>opt = self.line_basket_option_sales.filter(<EOL>product_option=option['<STR_LIT>']<EOL>).first()<EOL>if opt: <EOL><INDENT>change = False<EOL>if opt.quantity != option['<STR_LIT>']:<EOL><INDENT>opt.quantity = option['<STR_LIT>']<EOL>change = True<EOL><DEDENT>if opt.product_final != option['<STR_LIT>']:<EOL><INDENT>opt.product_final = option['<STR_LIT>']<EOL>change = True<EOL><DEDENT>if change:<EOL><INDENT>opt.save()<EOL><DEDENT><DEDENT>else: <EOL><INDENT>opt = SalesLineBasketOption()<EOL>opt.line_budget = SalesLineBasket.objects.get(pk=self.pk)<EOL>opt.product_option = option['<STR_LIT>']<EOL>opt.product_final = option['<STR_LIT>']<EOL>opt.quantity = option['<STR_LIT>']<EOL>opt.save()<EOL><DEDENT><DEDENT><DEDENT>
options = [{ 'product_option': instance of ProductFinalOption, 'product_final': instance of ProductFinal, 'quantity': Float }, ]
f7625:c15:m4
def ensure_path_exists(dir_path):
if not os.path.exists(dir_path):<EOL><INDENT>os.makedirs(dir_path)<EOL>return True<EOL><DEDENT>return False<EOL>
Make sure that a path exists
f7641:m2
def ensure_path_exists(dir_path):
if not os.path.exists(dir_path):<EOL><INDENT>os.makedirs(dir_path)<EOL>return True<EOL><DEDENT>return False<EOL>
Make sure that a path exists
f7646:m3
def p_jsonpath_binop(self, p):
op = p[<NUM_LIT:2>]<EOL>if op == '<STR_LIT:.>':<EOL><INDENT>p[<NUM_LIT:0>] = Child(p[<NUM_LIT:1>], p[<NUM_LIT:3>])<EOL><DEDENT>elif op == '<STR_LIT:..>':<EOL><INDENT>p[<NUM_LIT:0>] = Descendants(p[<NUM_LIT:1>], p[<NUM_LIT:3>])<EOL><DEDENT>elif op == '<STR_LIT>':<EOL><INDENT>p[<NUM_LIT:0>] = Where(p[<NUM_LIT:1>], p[<NUM_LIT:3>])<EOL><DEDENT>elif op == '<STR_LIT:|>':<EOL><INDENT>p[<NUM_LIT:0>] = Union(p[<NUM_LIT:1>], p[<NUM_LIT:3>])<EOL><DEDENT>elif op == '<STR_LIT:&>':<EOL><INDENT>p[<NUM_LIT:0>] = Intersect(p[<NUM_LIT:1>], p[<NUM_LIT:3>])<EOL><DEDENT>
jsonpath : jsonpath '.' jsonpath | jsonpath DOUBLEDOT jsonpath | jsonpath WHERE jsonpath | jsonpath '|' jsonpath | jsonpath '&' jsonpath
f7655:c0:m4
def p_jsonpath_fields(self, p):
p[<NUM_LIT:0>] = Fields(*p[<NUM_LIT:1>])<EOL>
jsonpath : fields_or_any
f7655:c0:m5
def p_jsonpath_named_operator(self, p):
if p[<NUM_LIT:1>] == '<STR_LIT>':<EOL><INDENT>p[<NUM_LIT:0>] = This()<EOL><DEDENT>elif p[<NUM_LIT:1>] == '<STR_LIT>':<EOL><INDENT>p[<NUM_LIT:0>] = Parent()<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>' % (p[<NUM_LIT:1>], p.lineno(<NUM_LIT:1>), p.lexpos(<NUM_LIT:1>)))<EOL><DEDENT>
jsonpath : NAMED_OPERATOR
f7655:c0:m6
def p_jsonpath_root(self, p):
p[<NUM_LIT:0>] = Root()<EOL>
jsonpath : '$
f7655:c0:m7
def p_jsonpath_idx(self, p):
p[<NUM_LIT:0>] = p[<NUM_LIT:2>]<EOL>
jsonpath : '[' idx ']
f7655:c0:m8
def p_jsonpath_slice(self, p):
p[<NUM_LIT:0>] = p[<NUM_LIT:2>]<EOL>
jsonpath : '[' slice ']
f7655:c0:m9
def p_jsonpath_fieldbrackets(self, p):
p[<NUM_LIT:0>] = Fields(*p[<NUM_LIT:2>])<EOL>
jsonpath : '[' fields ']
f7655:c0:m10
def p_jsonpath_child_fieldbrackets(self, p):
p[<NUM_LIT:0>] = Child(p[<NUM_LIT:1>], Fields(*p[<NUM_LIT:3>]))<EOL>
jsonpath : jsonpath '[' fields ']
f7655:c0:m11
def p_jsonpath_child_idxbrackets(self, p):
p[<NUM_LIT:0>] = Child(p[<NUM_LIT:1>], p[<NUM_LIT:3>])<EOL>
jsonpath : jsonpath '[' idx ']
f7655:c0:m12
def p_jsonpath_child_slicebrackets(self, p):
p[<NUM_LIT:0>] = Child(p[<NUM_LIT:1>], p[<NUM_LIT:3>])<EOL>
jsonpath : jsonpath '[' slice ']
f7655:c0:m13
def p_jsonpath_parens(self, p):
p[<NUM_LIT:0>] = p[<NUM_LIT:2>]<EOL>
jsonpath : '(' jsonpath ')
f7655:c0:m14
def p_fields_or_any(self, p):
if p[<NUM_LIT:1>] == '<STR_LIT:*>':<EOL><INDENT>p[<NUM_LIT:0>] = ['<STR_LIT:*>']<EOL><DEDENT>else:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL><DEDENT>
fields_or_any : fields | '*
f7655:c0:m15
def p_fields_id(self, p):
p[<NUM_LIT:0>] = [p[<NUM_LIT:1>]]<EOL>
fields : ID
f7655:c0:m16
def p_fields_comma(self, p):
p[<NUM_LIT:0>] = p[<NUM_LIT:1>] + p[<NUM_LIT:3>]<EOL>
fields : fields ',' fields
f7655:c0:m17
def p_idx(self, p):
p[<NUM_LIT:0>] = Index(p[<NUM_LIT:1>])<EOL>
idx : NUMBER
f7655:c0:m18
def p_slice_any(self, p):
p[<NUM_LIT:0>] = Slice()<EOL>
slice : '*
f7655:c0:m19
def p_slice(self, p):
p[<NUM_LIT:0>] = Slice(start=p[<NUM_LIT:1>], end=p[<NUM_LIT:3>])<EOL>
slice : maybe_int ':' maybe_int
f7655:c0:m20
def p_maybe_int(self, p):
p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>
maybe_int : NUMBER | empty
f7655:c0:m21
def p_empty(self, p):
p[<NUM_LIT:0>] = None<EOL>
empty :
f7655:c0:m22
def tokenize(self, string):
new_lexer = ply.lex.lex(module=self, debug=self.debug, errorlog=logger)<EOL>new_lexer.latest_newline = <NUM_LIT:0><EOL>new_lexer.string_value = None<EOL>new_lexer.input(string)<EOL>while True:<EOL><INDENT>t = new_lexer.token()<EOL>if t is None: break<EOL>t.col = t.lexpos - new_lexer.latest_newline<EOL>yield t<EOL><DEDENT>if new_lexer.string_value is not None:<EOL><INDENT>raise JsonPathLexerError('<STR_LIT>')<EOL><DEDENT>
Maps a string to an iterator over tokens. In other words: [char] -> [token]
f7656:c1:m1
def t_ID(self, t):
t.type = self.reserved_words.get(t.value, '<STR_LIT>')<EOL>return t<EOL>
r'[a-zA-Z_@][a-zA-Z0-9_@\-]*
f7656:c1:m2
def t_NUMBER(self, t):
t.value = int(t.value)<EOL>return t<EOL>
r'-?\d+
f7656:c1:m3
def t_singlequote(self, t):
t.lexer.string_start = t.lexer.lexpos<EOL>t.lexer.string_value = '<STR_LIT>'<EOL>t.lexer.push_state('<STR_LIT>')<EOL>
r
f7656:c1:m4
def t_singlequote_content(self, t):
t.lexer.string_value += t.value<EOL>
r"[^'\\]+
f7656:c1:m5
def t_singlequote_escape(self, t):
t.lexer.string_value += t.value[<NUM_LIT:1>]<EOL>
r'\\.
f7656:c1:m6
def t_singlequote_end(self, t):
t.value = t.lexer.string_value<EOL>t.type = '<STR_LIT>'<EOL>t.lexer.string_value = None<EOL>t.lexer.pop_state()<EOL>return t<EOL>
r
f7656:c1:m7
def t_doublequote(self, t):
t.lexer.string_start = t.lexer.lexpos<EOL>t.lexer.string_value = '<STR_LIT>'<EOL>t.lexer.push_state('<STR_LIT>')<EOL>
r
f7656:c1:m9
def t_doublequote_content(self, t):
t.lexer.string_value += t.value<EOL>
r'[^"\\]+
f7656:c1:m10
def t_doublequote_escape(self, t):
t.lexer.string_value += t.value[<NUM_LIT:1>]<EOL>
r'\\.
f7656:c1:m11
def t_doublequote_end(self, t):
t.value = t.lexer.string_value<EOL>t.type = '<STR_LIT>'<EOL>t.lexer.string_value = None<EOL>t.lexer.pop_state()<EOL>return t<EOL>
r
f7656:c1:m12
def t_backquote(self, t):
t.lexer.string_start = t.lexer.lexpos<EOL>t.lexer.string_value = '<STR_LIT>'<EOL>t.lexer.push_state('<STR_LIT>')<EOL>
r'`
f7656:c1:m14
def t_backquote_escape(self, t):
t.lexer.string_value += t.value[<NUM_LIT:1>]<EOL>
r'\\.
f7656:c1:m15
def t_backquote_content(self, t):
t.lexer.string_value += t.value<EOL>
r"[^`\\]+
f7656:c1:m16
def t_backquote_end(self, t):
t.value = t.lexer.string_value<EOL>t.type = '<STR_LIT>'<EOL>t.lexer.string_value = None<EOL>t.lexer.pop_state()<EOL>return t<EOL>
r'`
f7656:c1:m17
def t_newline(self, t):
t.lexer.lineno += <NUM_LIT:1><EOL>t.lexer.latest_newline = t.lexpos<EOL>
r'\n
f7656:c1:m19
def find(self, data):
raise NotImplementedError()<EOL>
All `JSONPath` types support `find()`, which returns an iterable of `DatumInContext`s. They keep track of the path followed to the current location, so if the calling code has some opinion about that, it can be passed in here as a starting point.
f7659:c0:m0
def update(self, data, val):
raise NotImplementedError()<EOL>
Returns `data` with the specified path replaced by `val`. Only updates if the specified path exists.
f7659:c0:m1
def child(self, child):
if isinstance(self, This) or isinstance(self, Root):<EOL><INDENT>return child<EOL><DEDENT>elif isinstance(child, This):<EOL><INDENT>return self<EOL><DEDENT>elif isinstance(child, Root):<EOL><INDENT>return child<EOL><DEDENT>else:<EOL><INDENT>return Child(self, child)<EOL><DEDENT>
Equivalent to Child(self, next) but with some canonicalization
f7659:c0:m2
@property<EOL><INDENT>def id_pseudopath(self):<DEDENT>
try:<EOL><INDENT>pseudopath = Fields(str(self.value[auto_id_field]))<EOL><DEDENT>except (TypeError, AttributeError, KeyError): <EOL><INDENT>pseudopath = self.path<EOL><DEDENT>if self.context:<EOL><INDENT>return self.context.id_pseudopath.child(pseudopath)<EOL><DEDENT>else:<EOL><INDENT>return pseudopath<EOL><DEDENT>
Looks like a path, but with ids stuck in when available
f7659:c1:m4
def __init__(self, datum, id_field=None):
self.datum = datum<EOL>self.id_field = id_field or auto_id_field<EOL>
Invariant is that datum.path is the path from context to datum. The auto id will either be the id in the datum (if present) or the id of the context followed by the path to the datum. The path to this datum is always the path to the context, the path to the datum, and then the auto id field.
f7659:c2:m0
def find(self, datum):
return [submatch<EOL>for subdata in self.left.find(datum)<EOL>if not isinstance(subdata, AutoIdForDatum)<EOL>for submatch in self.right.find(subdata)]<EOL>
Extra special case: auto ids do not have children, so cut it off right now rather than auto id the auto id
f7659:c5:m1
def setUp(self):
<EOL>num_replicates = <NUM_LIT:100><EOL>num_params = <NUM_LIT:5><EOL>self.bootstrap_replicates =(np.arange(<NUM_LIT:1>, <NUM_LIT:1> + num_replicates)[:, None] *<EOL>np.arange(<NUM_LIT:1>, <NUM_LIT:1> + num_params)[None, :])<EOL>self.mle_params = self.bootstrap_replicates[<NUM_LIT:50>, :]<EOL>array_container = []<EOL>for est in self.mle_params:<EOL><INDENT>array_container.append(gumbel_r.rvs(loc=est, size=<NUM_LIT:10>))<EOL><DEDENT>self.jackknife_replicates =np.concatenate([x[:, None] for x in array_container], axis=<NUM_LIT:1>)<EOL>self.conf_percentage = <NUM_LIT><EOL>self.test_data =np.array([<NUM_LIT>, <NUM_LIT>, <NUM_LIT:20>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT:20>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT:6>,<EOL><NUM_LIT:0>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT:4>, <NUM_LIT:32>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT:30>, <NUM_LIT>])<EOL>num_test_obs = self.test_data.size<EOL>def calc_theta(array):<EOL><INDENT>result = ((array - array.mean())**<NUM_LIT:2>).sum() / float(array.size)<EOL>return result<EOL><DEDENT>self.calc_theta = calc_theta<EOL>self.test_theta_hat = np.array([calc_theta(self.test_data)])<EOL>raw_series = pd.Series(self.test_data)<EOL>jackknife_replicates = np.empty((num_test_obs, <NUM_LIT:1>), dtype=float)<EOL>for obs in xrange(num_test_obs):<EOL><INDENT>current_data = raw_series[raw_series.index != obs].values<EOL>jackknife_replicates[obs] = calc_theta(current_data)<EOL><DEDENT>self.test_jackknife_replicates = jackknife_replicates<EOL>return None<EOL>
Note that the spatial test data used in many of these tests comes from Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap. CRC press, 1994. Chapter 14.
f7662:c0:m0
def setUp(self):
self.asym_model = self.make_asym_model()<EOL>self.uneven_model, self.scobit_model =self.make_uneven_and_scobit_models()<EOL>self.clog_model, self.mnl_model = self.make_clog_and_mnl_models()<EOL>self.mixed_model = self.make_mixed_model()<EOL>self.nested_model = self.make_nested_model()<EOL>return None<EOL>
Create the real model objects.
f7664:c0:m5
def setUp(self):
<EOL>self.fake_draws = mlc.get_normal_draws(<NUM_LIT:2>, <NUM_LIT:2>, <NUM_LIT:1>, seed=<NUM_LIT:1>)[<NUM_LIT:0>]<EOL>self.fake_betas = np.array([<NUM_LIT>, -<NUM_LIT>, <NUM_LIT>])<EOL>self.fake_std = <NUM_LIT:1><EOL>self.fake_betas_ext = np.concatenate((self.fake_betas,<EOL>np.array([self.fake_std])),<EOL>axis=<NUM_LIT:0>)<EOL>self.fake_design = np.array([[<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>],<EOL>[<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>],<EOL>[<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:3>],<EOL>[<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT>],<EOL>[<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT>],<EOL>[<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT>],<EOL>[<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0.5>],<EOL>[<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1.0>],<EOL>[<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT>]])<EOL>self.mixing_pos = [<NUM_LIT:2>]<EOL>self.situation_ids = np.array([<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:2>, <NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:3>, <NUM_LIT:3>])<EOL>self.individual_ids = np.array([<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:2>, <NUM_LIT:2>])<EOL>self.alternative_ids = np.array([<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>])<EOL>self.choice_array = np.array([<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>])<EOL>self.obs_1_rows = np.ones(self.fake_design.shape[<NUM_LIT:0>])<EOL>self.obs_1_rows[-<NUM_LIT:3>:] = <NUM_LIT:0><EOL>self.obs_2_rows = <NUM_LIT:1> - self.obs_1_rows<EOL>self.fake_rows_to_mixers = csr_matrix(self.obs_1_rows[:, None] ==<EOL>np.array([<NUM_LIT:1>, <NUM_LIT:0>])[None, :])<EOL>self.fake_rows_to_obs = csr_matrix(self.situation_ids[:, None] ==<EOL>np.arange(<NUM_LIT:1>, <NUM_LIT:4>)[None, :])<EOL>self.fake_rows_to_alts = csr_matrix(self.alternative_ids[:, None] ==<EOL>np.arange(<NUM_LIT:1>, <NUM_LIT:4>)[None, :])<EOL>arrays_to_join = (self.fake_design.copy(),<EOL>self.fake_design.copy()[:, -<NUM_LIT:1>][:, None])<EOL>self.fake_design_draw_1 = np.concatenate(arrays_to_join, axis=<NUM_LIT:1>)<EOL>self.fake_design_draw_2 = self.fake_design_draw_1.copy()<EOL>self.fake_design_draw_1[:, -<NUM_LIT:1>] *= (self.obs_1_rows *<EOL>self.fake_draws[<NUM_LIT:0>, <NUM_LIT:0>] +<EOL>self.obs_2_rows *<EOL>self.fake_draws[<NUM_LIT:1>, <NUM_LIT:0>])<EOL>self.fake_design_draw_2[:, -<NUM_LIT:1>] *= (self.obs_1_rows *<EOL>self.fake_draws[<NUM_LIT:0>, <NUM_LIT:1>] +<EOL>self.obs_2_rows *<EOL>self.fake_draws[<NUM_LIT:1>, <NUM_LIT:1>])<EOL>extended_design_draw_1 = self.fake_design_draw_1[:, None, :]<EOL>extended_design_draw_2 = self.fake_design_draw_2[:, None, :]<EOL>self.fake_design_3d = np.concatenate((extended_design_draw_1,<EOL>extended_design_draw_2),<EOL>axis=<NUM_LIT:1>)<EOL>self.sys_utilities_draw_1 = (self.fake_design_draw_1<EOL>.dot(self.fake_betas_ext))<EOL>self.sys_utilities_draw_2 = (self.fake_design_draw_2<EOL>.dot(self.fake_betas_ext))<EOL>long_exp_draw_1 = np.exp(self.sys_utilities_draw_1)<EOL>long_exp_draw_2 = np.exp(self.sys_utilities_draw_2)<EOL>ind_exp_sums_draw_1 = self.fake_rows_to_obs.T.dot(long_exp_draw_1)<EOL>ind_exp_sums_draw_2 = self.fake_rows_to_obs.T.dot(long_exp_draw_2)<EOL>long_exp_sum_draw_1 = self.fake_rows_to_obs.dot(ind_exp_sums_draw_1)<EOL>long_exp_sum_draw_2 = self.fake_rows_to_obs.dot(ind_exp_sums_draw_2)<EOL>long_probs_draw_1 = long_exp_draw_1 / long_exp_sum_draw_1<EOL>long_probs_draw_2 = long_exp_draw_2 / long_exp_sum_draw_2<EOL>self.prob_array = np.concatenate((long_probs_draw_1[:, None],<EOL>long_probs_draw_2[:, None]),<EOL>axis=<NUM_LIT:1>)<EOL>self.alt_id_column = "<STR_LIT>"<EOL>self.situation_id_column = "<STR_LIT>"<EOL>self.obs_id_column = "<STR_LIT>"<EOL>self.choice_column = "<STR_LIT>"<EOL>data = {"<STR_LIT:x>": self.fake_design[:, <NUM_LIT:2>],<EOL>self.alt_id_column: self.alternative_ids,<EOL>self.situation_id_column: self.situation_ids,<EOL>self.obs_id_column: self.individual_ids,<EOL>self.choice_column: self.choice_array}<EOL>self.fake_old_df = pd.DataFrame(data)<EOL>self.fake_old_df["<STR_LIT>"] = <NUM_LIT:1><EOL>self.fake_spec = OrderedDict()<EOL>self.fake_names = OrderedDict()<EOL>self.fake_spec["<STR_LIT>"] = [<NUM_LIT:1>, <NUM_LIT:2>]<EOL>self.fake_names["<STR_LIT>"] = ["<STR_LIT>", "<STR_LIT>"]<EOL>self.fake_spec["<STR_LIT:x>"] = [[<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>]]<EOL>self.fake_names["<STR_LIT:x>"] = ["<STR_LIT>"]<EOL>self.fake_mixing_vars = ["<STR_LIT>"]<EOL>args = [self.fake_old_df,<EOL>self.alt_id_column,<EOL>self.situation_id_column,<EOL>self.choice_column,<EOL>self.fake_spec]<EOL>kwargs = {"<STR_LIT>": self.fake_names,<EOL>"<STR_LIT>": self.obs_id_column,<EOL>"<STR_LIT>": self.fake_mixing_vars}<EOL>self.mixl_obj = mixed_logit.MixedLogit(*args, **kwargs)<EOL>self.mixl_obj.design_3d = self.fake_design_3d<EOL>self.mixl_obj.ind_var_names += ["<STR_LIT>"]<EOL>self.mixl_obj.coefs = pd.Series(self.fake_betas_ext)<EOL>self.mixl_obj.intercepts = None<EOL>self.mixl_obj.shapes = None<EOL>self.mixl_obj.nests = None<EOL>return None<EOL>
Set up a mixed logit model
f7668:c0:m0
def setUp(self):
<EOL>self.fake_betas = np.array([-<NUM_LIT>])<EOL>self.fake_intercepts = np.array([<NUM_LIT:1>, <NUM_LIT:0.5>])<EOL>self.fake_intercept_names = ["<STR_LIT>", "<STR_LIT>"]<EOL>self.fake_intercept_ref_pos = <NUM_LIT:2><EOL>self.fake_shapes = np.array([-<NUM_LIT:1>, <NUM_LIT:1>])<EOL>self.fake_shape_names = ["<STR_LIT>", "<STR_LIT>"]<EOL>self.fake_shape_ref_pos = <NUM_LIT:2><EOL>self.fake_all_params = np.concatenate((self.fake_shapes,<EOL>self.fake_intercepts,<EOL>self.fake_betas))<EOL>self.fake_rows_to_alts = csr_matrix(np.array([[<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>],<EOL>[<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>],<EOL>[<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>],<EOL>[<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>],<EOL>[<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>]]))<EOL>self.fake_design = np.array([[<NUM_LIT:1>],<EOL>[<NUM_LIT:2>],<EOL>[<NUM_LIT:3>],<EOL>[<NUM_LIT>],<EOL>[<NUM_LIT>]])<EOL>self.fake_index = self.fake_design.dot(self.fake_betas)<EOL>self.fake_df = pd.DataFrame({"<STR_LIT>": [<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:2>],<EOL>"<STR_LIT>": [<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:1>, <NUM_LIT:3>],<EOL>"<STR_LIT>": [<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>],<EOL>"<STR_LIT:x>": self.fake_design[:, <NUM_LIT:0>],<EOL>"<STR_LIT>": [<NUM_LIT:1> for i in range(<NUM_LIT:5>)]})<EOL>self.alt_id_col = "<STR_LIT>"<EOL>self.obs_id_col = "<STR_LIT>"<EOL>self.choice_col = "<STR_LIT>"<EOL>self.fake_specification = OrderedDict()<EOL>self.fake_names = OrderedDict()<EOL>self.fake_specification["<STR_LIT:x>"] = [[<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>]]<EOL>self.fake_names["<STR_LIT:x>"] = ["<STR_LIT>"]<EOL>self.fake_nest_spec = OrderedDict()<EOL>self.fake_nest_spec["<STR_LIT>"] = [<NUM_LIT:1>, <NUM_LIT:3>]<EOL>self.fake_nest_spec["<STR_LIT>"] = [<NUM_LIT:2>]<EOL>self.constructor_args = [self.fake_df,<EOL>self.alt_id_col,<EOL>self.obs_id_col,<EOL>self.choice_col,<EOL>self.fake_specification]<EOL>self.constructor_kwargs = {"<STR_LIT>":<EOL>self.fake_intercept_ref_pos,<EOL>"<STR_LIT>": self.fake_shape_ref_pos,<EOL>"<STR_LIT>": self.fake_names,<EOL>"<STR_LIT>":<EOL>self.fake_intercept_names,<EOL>"<STR_LIT>": self.fake_shape_names,<EOL>"<STR_LIT>": self.fake_nest_spec}<EOL>self.model_obj = base_cm.MNDC_Model(*self.constructor_args,<EOL>**self.constructor_kwargs)<EOL>
Create a fake dataset and specification from which we can initialize a choice model.
f7669:c0:m0
def setUp(self):
<EOL>self.fake_betas = np.array([-<NUM_LIT>])<EOL>self.fake_intercepts = np.array([<NUM_LIT:1>, <NUM_LIT:0.5>])<EOL>self.fake_intercept_names = ["<STR_LIT>", "<STR_LIT>"]<EOL>self.fake_intercept_ref_pos = <NUM_LIT:2><EOL>self.fake_shapes = np.array([-<NUM_LIT:1>, <NUM_LIT:1>])<EOL>self.fake_shape_names = ["<STR_LIT>", "<STR_LIT>"]<EOL>self.fake_shape_ref_pos = <NUM_LIT:2><EOL>self.fake_all_params = np.concatenate((self.fake_shapes,<EOL>self.fake_intercepts,<EOL>self.fake_betas))<EOL>self.fake_rows_to_alts = csr_matrix(np.array([[<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>],<EOL>[<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>],<EOL>[<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>],<EOL>[<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>],<EOL>[<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>]]))<EOL>self.fake_design = np.array([[<NUM_LIT:1>],<EOL>[<NUM_LIT:2>],<EOL>[<NUM_LIT:3>],<EOL>[<NUM_LIT>],<EOL>[<NUM_LIT>]])<EOL>self.fake_index = self.fake_design.dot(self.fake_betas)<EOL>self.fake_df = pd.DataFrame({"<STR_LIT>": [<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:2>],<EOL>"<STR_LIT>": [<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:1>, <NUM_LIT:3>],<EOL>"<STR_LIT>": [<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>],<EOL>"<STR_LIT:x>": self.fake_design[:, <NUM_LIT:0>],<EOL>"<STR_LIT>": [<NUM_LIT:1> for i in range(<NUM_LIT:5>)]})<EOL>self.alt_id_col = "<STR_LIT>"<EOL>self.obs_id_col = "<STR_LIT>"<EOL>self.choice_col = "<STR_LIT>"<EOL>self.fake_specification = OrderedDict()<EOL>self.fake_names = OrderedDict()<EOL>self.fake_specification["<STR_LIT:x>"] = [[<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>]]<EOL>self.fake_names["<STR_LIT:x>"] = ["<STR_LIT>"]<EOL>self.fake_nest_spec = OrderedDict()<EOL>self.fake_nest_spec["<STR_LIT>"] = [<NUM_LIT:1>, <NUM_LIT:3>]<EOL>self.fake_nest_spec["<STR_LIT>"] = [<NUM_LIT:2>]<EOL>self.constructor_args = [self.fake_df,<EOL>self.alt_id_col,<EOL>self.obs_id_col,<EOL>self.choice_col,<EOL>self.fake_specification]<EOL>self.constructor_kwargs = {"<STR_LIT>":<EOL>self.fake_intercept_ref_pos,<EOL>"<STR_LIT>": self.fake_shape_ref_pos,<EOL>"<STR_LIT>": self.fake_names,<EOL>"<STR_LIT>":<EOL>self.fake_intercept_names,<EOL>"<STR_LIT>": self.fake_shape_names,<EOL>"<STR_LIT>": self.fake_nest_spec}<EOL>self.model_obj = base_cm.MNDC_Model(*self.constructor_args,<EOL>**self.constructor_kwargs)<EOL>self.log_likelihood = -<NUM_LIT:10><EOL>self.fitted_probs = np.array([<NUM_LIT>, <NUM_LIT>])<EOL>self.long_fitted_probs = np.array([<NUM_LIT:0.1>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>])<EOL>self.long_residuals = np.array([-<NUM_LIT:0.1>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>])<EOL>self.ind_chi_squareds = (np.square(self.long_residuals) /<EOL>self.long_fitted_probs)<EOL>self.chi_square = self.ind_chi_squareds.sum()<EOL>self.estimation_success = True<EOL>self.estimation_message = "<STR_LIT>"<EOL>self.null_log_likelihood = -<NUM_LIT><EOL>self.rho_squared = <NUM_LIT:1> - (self.log_likelihood / self.null_log_likelihood)<EOL>self.rho_bar_squared = (self.rho_squared +<EOL>self.fake_all_params.shape[<NUM_LIT:0>] /<EOL>self.null_log_likelihood)<EOL>self.estimation_message = "<STR_LIT>"<EOL>self.estimation_success = True<EOL>return None<EOL>
Perform additional setup materials needed to test the store estimation results functions.
f7669:c4:m0
def temp_utility_transform(sys_utility_array, *args, **kwargs):
<EOL>if len(sys_utility_array.shape) == <NUM_LIT:1>:<EOL><INDENT>systematic_utilities = sys_utility_array[:, np.newaxis]<EOL><DEDENT>else:<EOL><INDENT>systematic_utilities = sys_utility_array<EOL><DEDENT>return systematic_utilities<EOL>
Parameters ---------- sys_utility_array : numpy array. Should have 1D or 2D. Should have been created by the dot product of a design matrix and an array of index coefficients. Returns ------- 2D numpy array. The returned array will contain a representation of the `sys_utility_array`. If `sys_utility_array` is 2D, then `sys_utility_array` will be returned unaltered. Else, the function will return `sys_utility_array[:, None]`.
f7672:m0
def setUp(self):
<EOL>self.fake_betas = np.array([-<NUM_LIT>])<EOL>self.fake_intercepts = np.array([<NUM_LIT:1>, <NUM_LIT:0.5>])<EOL>self.fake_intercept_names = ["<STR_LIT>", "<STR_LIT>"]<EOL>self.fake_intercept_ref_pos = <NUM_LIT:2><EOL>self.fake_shapes = np.array([-<NUM_LIT:1>, <NUM_LIT:1>])<EOL>self.fake_shape_names = ["<STR_LIT>", "<STR_LIT>"]<EOL>self.fake_shape_ref_pos = <NUM_LIT:2><EOL>self.fake_all_params = np.concatenate((self.fake_shapes,<EOL>self.fake_intercepts,<EOL>self.fake_betas))<EOL>self.fake_rows_to_alts = csr_matrix(np.array([[<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>],<EOL>[<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>],<EOL>[<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>],<EOL>[<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>],<EOL>[<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>]]))<EOL>self.fake_design = np.array([[<NUM_LIT:1>],<EOL>[<NUM_LIT:2>],<EOL>[<NUM_LIT:3>],<EOL>[<NUM_LIT>],<EOL>[<NUM_LIT>]])<EOL>self.fake_index = self.fake_design.dot(self.fake_betas)<EOL>self.fake_df = pd.DataFrame({"<STR_LIT>": [<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:2>],<EOL>"<STR_LIT>": [<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:1>, <NUM_LIT:3>],<EOL>"<STR_LIT>": [<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>],<EOL>"<STR_LIT:x>": self.fake_design[:, <NUM_LIT:0>],<EOL>"<STR_LIT>": [<NUM_LIT:1> for i in range(<NUM_LIT:5>)]})<EOL>self.alt_id_col = "<STR_LIT>"<EOL>self.obs_id_col = "<STR_LIT>"<EOL>self.choice_col = "<STR_LIT>"<EOL>self.fake_specification = OrderedDict()<EOL>self.fake_names = OrderedDict()<EOL>self.fake_specification["<STR_LIT:x>"] = [[<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>]]<EOL>self.fake_names["<STR_LIT:x>"] = ["<STR_LIT>"]<EOL>self.fake_nest_spec = OrderedDict()<EOL>self.fake_nest_spec["<STR_LIT>"] = [<NUM_LIT:1>, <NUM_LIT:3>]<EOL>self.fake_nest_spec["<STR_LIT>"] = [<NUM_LIT:2>]<EOL>self.constructor_args = [self.fake_df,<EOL>self.alt_id_col,<EOL>self.obs_id_col,<EOL>self.choice_col,<EOL>self.fake_specification]<EOL>self.constructor_kwargs = {"<STR_LIT>":<EOL>self.fake_intercept_ref_pos,<EOL>"<STR_LIT>": self.fake_shape_ref_pos,<EOL>"<STR_LIT>": self.fake_names,<EOL>"<STR_LIT>":<EOL>self.fake_intercept_names,<EOL>"<STR_LIT>": self.fake_shape_names,<EOL>"<STR_LIT>": self.fake_nest_spec}<EOL>self.model_obj = base_cm.MNDC_Model(*self.constructor_args,<EOL>**self.constructor_kwargs)<EOL>
Create a fake dataset and specification from which we can initialize a choice model.
f7677:c0:m0
def setUp(self):
<EOL>self.fake_betas = np.array([-<NUM_LIT>])<EOL>self.fake_intercepts = np.array([<NUM_LIT:1>, <NUM_LIT:0.5>])<EOL>self.fake_intercept_names = ["<STR_LIT>", "<STR_LIT>"]<EOL>self.fake_intercept_ref_pos = <NUM_LIT:2><EOL>self.fake_shapes = np.array([-<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>])<EOL>self.fake_shape_names = ["<STR_LIT>", "<STR_LIT>", "<STR_LIT>"]<EOL>self.fake_shape_ref_pos = <NUM_LIT:2><EOL>self.fake_design = np.array([[<NUM_LIT:1>],<EOL>[<NUM_LIT:2>],<EOL>[<NUM_LIT:3>],<EOL>[<NUM_LIT>],<EOL>[<NUM_LIT>]])<EOL>self.fake_index = self.fake_design.dot(self.fake_betas)<EOL>self.fake_df = pd.DataFrame({"<STR_LIT>": [<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:2>],<EOL>"<STR_LIT>": [<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:1>, <NUM_LIT:3>],<EOL>"<STR_LIT>": [<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>],<EOL>"<STR_LIT:x>": self.fake_design[:, <NUM_LIT:0>],<EOL>"<STR_LIT>": [<NUM_LIT:1> for i in range(<NUM_LIT:5>)]})<EOL>self.alt_id_col = "<STR_LIT>"<EOL>self.obs_id_col = "<STR_LIT>"<EOL>self.choice_col = "<STR_LIT>"<EOL>self.fake_specification = OrderedDict()<EOL>self.fake_names = OrderedDict()<EOL>self.fake_specification["<STR_LIT:x>"] = [[<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>]]<EOL>self.fake_names["<STR_LIT:x>"] = ["<STR_LIT>"]<EOL>self.fake_nest_spec = OrderedDict()<EOL>self.fake_nest_spec["<STR_LIT>"] = [<NUM_LIT:1>, <NUM_LIT:2>]<EOL>self.fake_nest_spec["<STR_LIT>"] = [<NUM_LIT:3>]<EOL>self.constructor_args = [self.fake_df,<EOL>self.alt_id_col,<EOL>self.obs_id_col,<EOL>self.choice_col,<EOL>self.fake_specification,<EOL>"<STR_LIT>"]<EOL>self.constructor_kwargs = {"<STR_LIT>":<EOL>self.fake_intercept_ref_pos,<EOL>"<STR_LIT>": self.fake_names,<EOL>"<STR_LIT>":<EOL>self.fake_intercept_names,<EOL>"<STR_LIT>": self.fake_shape_names}<EOL>
Create the input data needed to test the choice model constructor.
f7678:c0:m0
def setUp(self):
self.mnl_model = self.make_mnl_model()<EOL>self.asym_model = self.make_asym_model()<EOL>self.mixed_model = self.make_mixed_model()<EOL>self.nested_model = self.make_nested_model()<EOL>return None<EOL>
Create the real model objects.
f7680:c2:m4
def calc_theta(self, weight):
a_mean = weight.dot(self.test_data)<EOL>differences = (self.test_data - a_mean)<EOL>squared_diffs = differences**<NUM_LIT:2><EOL>return weight.dot(squared_diffs)<EOL>
See Equation 14.22 of Efron and Tibshirani (1994).
f7681:c1:m0
def split_param_vec(param_vec, rows_to_alts, design, return_all_types=False):
<EOL>num_index_coefs = design.shape[<NUM_LIT:1>]<EOL>betas = param_vec[-<NUM_LIT:1> * num_index_coefs:]<EOL>remaining_idx = param_vec.shape[<NUM_LIT:0>] - num_index_coefs<EOL>if remaining_idx > <NUM_LIT:0>:<EOL><INDENT>intercepts = param_vec[:remaining_idx]<EOL><DEDENT>else:<EOL><INDENT>intercepts = None<EOL><DEDENT>if return_all_types:<EOL><INDENT>return None, None, intercepts, betas<EOL><DEDENT>else:<EOL><INDENT>return None, intercepts, betas<EOL><DEDENT>
Parameters ---------- param_vec : 1D ndarray. Elements should all be ints, floats, or longs. Should have as many elements as there are parameters being estimated. rows_to_alts : 2D scipy sparse matrix. There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. All elements should be zeros or ones. design : 2D ndarray. There should be one row per observation per available alternative. There should be one column per utility coefficient being estimated. All elements should be ints, floats, or longs. return_all_types : bool, optional. Determines whether or not a tuple of 4 elements will be returned (with one element for the nest, shape, intercept, and index parameters for this model). If False, a tuple of 3 elements will be returned, as described below. Returns ------- `(None, intercepts, betas)` : tuple. The first element will be None since the clog-log model has no shape parameters. The second element will either be a 1D array of "outside" intercept parameters for this model or None, depending on whether outside intercepts are being estimated or not. The third element will be a 1D array of the index coefficients. Note ---- If `return_all_types == True` then the function will return a tuple of four objects. In order, these objects will either be None or the arrays representing the arrays corresponding to the nest, shape, intercept, and index parameters.
f7683:m0
def _cloglog_utility_transform(systematic_utilities,<EOL>alt_IDs,<EOL>rows_to_alts,<EOL>shape_params,<EOL>intercept_params,<EOL>intercept_ref_pos=None,<EOL>*args, **kwargs):
<EOL>exp_v = np.exp(systematic_utilities)<EOL>exp_exp_v = np.exp(exp_v)<EOL>transformations = np.log(exp_exp_v - <NUM_LIT:1>)<EOL>transformations[np.isneginf(transformations)] = -<NUM_LIT:1> * max_comp_value<EOL>too_big_idx = np.where(systematic_utilities >= <NUM_LIT>)<EOL>transformations[too_big_idx] = np.exp(systematic_utilities[too_big_idx])<EOL>inf_idx = np.isposinf(transformations)<EOL>transformations[inf_idx] = max_comp_value<EOL>if intercept_params is not None and intercept_ref_pos is not None:<EOL><INDENT>needed_idxs = range(rows_to_alts.shape[<NUM_LIT:1>])<EOL>needed_idxs.remove(intercept_ref_pos)<EOL>if len(intercept_params.shape) > <NUM_LIT:1> and intercept_params.shape[<NUM_LIT:1>] > <NUM_LIT:1>:<EOL><INDENT>all_intercepts = np.zeros((rows_to_alts.shape[<NUM_LIT:1>],<EOL>intercept_params.shape[<NUM_LIT:1>]))<EOL>all_intercepts[needed_idxs, :] = intercept_params<EOL><DEDENT>else:<EOL><INDENT>all_intercepts = np.zeros(rows_to_alts.shape[<NUM_LIT:1>])<EOL>all_intercepts[needed_idxs] = intercept_params<EOL><DEDENT>transformations += rows_to_alts.dot(all_intercepts)<EOL><DEDENT>if len(transformations.shape) == <NUM_LIT:1>:<EOL><INDENT>transformations = transformations[:, None]<EOL><DEDENT>return transformations<EOL>
Parameters ---------- systematic_utilities : 1D ndarray. All elements should be ints, floats, or longs. Should contain the systematic utilities of each observation per available alternative. Note that this vector is formed by the dot product of the design matrix with the vector of utility coefficients. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_alts : 2D scipy sparse matrix. There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. All elements should be zeros or ones. shape_params : None or 1D ndarray. Should be None since the clog-log model has no shape parameters. intercept_params : None or 1D ndarray. If an array, each element should be an int, float, or long. If J is the total number of possible alternatives for the dataset being modeled, there should be J-1 elements in the array. Use None if no outside intercepts are being estimated. intercept_ref_pos : int, or None, optional. Specifies the index of the alternative, in the ordered array of unique alternatives, that is not having its intercept parameter estimated (in order to ensure identifiability). Should only be None if `intercept_params` is None. Returns ------- transformations : 2D ndarray. Should have shape `(systematic_utilities.shape[0], 1)`. The returned array contains the transformed utility values for this model. All elements will be ints, longs, or floats.
f7683:m1
def _cloglog_transform_deriv_v(systematic_utilities,<EOL>alt_IDs,<EOL>rows_to_alts,<EOL>shape_params,<EOL>output_array=None,<EOL>*args, **kwargs):
exp_neg_v = np.exp(-<NUM_LIT:1> * systematic_utilities)<EOL>exp_v = np.exp(systematic_utilities)<EOL>denom_part_1 = <NUM_LIT:1> - np.exp(-<NUM_LIT:1> * exp_v)<EOL>exp_neg_v[np.isposinf(exp_neg_v)] = max_comp_value<EOL>exp_neg_v[np.where(exp_neg_v == <NUM_LIT:0>)] = min_comp_value<EOL>derivs = <NUM_LIT:1.0> / (denom_part_1 * exp_neg_v)<EOL>derivs[np.where(denom_part_1 == <NUM_LIT:0>)] = <NUM_LIT:1><EOL>derivs[np.isposinf(derivs)] = max_comp_value<EOL>output_array.data = derivs<EOL>return output_array<EOL>
Parameters ---------- systematic_utilities : 1D ndarray. All elements should be ints, floats, or longs. Should contain the systematic utilities of each observation per available alternative. Note that this vector is formed by the dot product of the design matrix with the vector of utility coefficients. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_alts : 2D scipy sparse matrix. There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. All elements should be zeros or ones. shape_params : None or 1D ndarray. If an array, each element should be an int, float, or long. There should be one value per shape parameter of the model being used. output_array : 2D scipy sparse array. The array should be square and it should have `systematic_utilities.shape[0]` rows. It's data is to be replaced with the correct derivatives of the transformation vector with respect to the vector of systematic utilities. This argument is NOT optional. Returns ------- output_array : 2D scipy sparse array. The shape of the returned array is `(systematic_utilities.shape[0], systematic_utilities.shape[0])`. The returned array specifies the derivative of the transformed utilities with respect to the systematic utilities. All elements are ints, floats, or longs.
f7683:m2
def _cloglog_transform_deriv_c(*args, **kwargs):
return None<EOL>
Returns ------- None. This is a place holder function since the Clog-log model has no shape parameters.
f7683:m3
def _cloglog_transform_deriv_alpha(systematic_utilities,<EOL>alt_IDs,<EOL>rows_to_alts,<EOL>intercept_params,<EOL>output_array=None,<EOL>*args, **kwargs):
return output_array<EOL>
Parameters ---------- systematic_utilities : 1D ndarray. All elements should be ints, floats, or longs. Should contain the systematic utilities of each observation per available alternative. Note that this vector is formed by the dot product of the design matrix with the vector of utility coefficients. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_alts : 2D scipy sparse matrix. There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. All elements should be zeros or ones. intercept_params : 1D ndarray or None. If an array, each element should be an int, float, or long. For identifiability, there should be J- 1 elements where J is the total number of observed alternatives for this dataset. output_array: None or 2D scipy sparse array. If a sparse array is pased, it should contain the derivative of the vector of transformed utilities with respect to the intercept parameters outside of the index. This keyword argurment will be returned. If there are no intercept parameters outside of the index, then `output_array` should equal None. If there are intercept parameters outside of the index, then `output_array` should be `rows_to_alts` with the all of its columns except the column corresponding to the alternative whose intercept is not being estimated in order to ensure identifiability. Returns ------- output_array.
f7683:m4
def create_calc_dh_dv(estimator):
dh_dv = diags(np.ones(estimator.design.shape[<NUM_LIT:0>]), <NUM_LIT:0>, format='<STR_LIT>')<EOL>calc_dh_dv = partial(_cloglog_transform_deriv_v, output_array=dh_dv)<EOL>return calc_dh_dv<EOL>
Return the function that can be used in the various gradient and hessian calculations to calculate the derivative of the transformation with respect to the index. Parameters ---------- estimator : an instance of the estimation.LogitTypeEstimator class. Should contain a `design` attribute that is a 2D ndarray representing the design matrix for this model and dataset. Returns ------- Callable. Will accept a 1D array of systematic utility values, a 1D array of alternative IDs, (shape parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the tranformed utility vector with respect to the vector of systematic utilities. The dimensions of the returned vector should be `(design.shape[0], design.shape[0])`.
f7683:m5
def create_calc_dh_d_alpha(estimator):
if estimator.intercept_ref_pos is not None:<EOL><INDENT>needed_idxs = range(estimator.rows_to_alts.shape[<NUM_LIT:1>])<EOL>needed_idxs.remove(estimator.intercept_ref_pos)<EOL>dh_d_alpha = (estimator.rows_to_alts<EOL>.copy()<EOL>.transpose()[needed_idxs, :]<EOL>.transpose())<EOL><DEDENT>else:<EOL><INDENT>dh_d_alpha = None<EOL><DEDENT>calc_dh_d_alpha = partial(_cloglog_transform_deriv_alpha,<EOL>output_array=dh_d_alpha)<EOL>return calc_dh_d_alpha<EOL>
Return the function that can be used in the various gradient and hessian calculations to calculate the derivative of the transformation with respect to the outside intercept parameters. Parameters ---------- estimator : an instance of the estimation.LogitTypeEstimator class. Should contain a `rows_to_alts` attribute that is a 2D scipy sparse matrix that maps the rows of the `design` matrix to the alternatives available in this dataset. Should also contain an `intercept_ref_pos` attribute that is either None or an int. This attribute should denote which intercept is not being estimated (in the case of outside intercept parameters) for identification purposes. Returns ------- Callable. Will accept a 1D array of systematic utility values, a 1D array of alternative IDs, (shape parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the tranformed utility vector with respect to the vector of outside intercepts. The dimensions of the returned vector should be `(design.shape[0], num_alternatives - 1)`.
f7683:m6
def check_length_of_initial_values(self, init_values):
<EOL>num_alts = self.rows_to_alts.shape[<NUM_LIT:1>]<EOL>num_index_coefs = self.design.shape[<NUM_LIT:1>]<EOL>if self.intercept_ref_pos is not None:<EOL><INDENT>assumed_param_dimensions = num_index_coefs + num_alts - <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>assumed_param_dimensions = num_index_coefs<EOL><DEDENT>if init_values.shape[<NUM_LIT:0>] != assumed_param_dimensions:<EOL><INDENT>msg_1 = "<STR_LIT>"<EOL>msg_2 = "<STR_LIT>"<EOL>msg_3 = "<STR_LIT>"<EOL>raise ValueError(msg_1 +<EOL>msg_2.format(assumed_param_dimensions) +<EOL>msg_3.format(init_values.shape[<NUM_LIT:0>]))<EOL><DEDENT>return None<EOL>
Ensures that `init_values` is of the correct length. Raises a helpful ValueError if otherwise. Parameters ---------- init_values : 1D ndarray. The initial values to start the optimization process with. There should be one value for each index coefficient, outside intercept parameter, and shape parameter being estimated. Returns ------- None.
f7683:c0:m1
def fit_mle(self,<EOL>init_vals,<EOL>init_intercepts=None,<EOL>init_coefs=None,<EOL>print_res=True,<EOL>method="<STR_LIT>",<EOL>loss_tol=<NUM_LIT>,<EOL>gradient_tol=<NUM_LIT>,<EOL>maxiter=<NUM_LIT:1000>,<EOL>ridge=None,<EOL>constrained_pos=None,<EOL>just_point=False,<EOL>**kwargs):
<EOL>if "<STR_LIT>" in kwargs:<EOL><INDENT>msg = "<STR_LIT>"<EOL>msg_2 = "<STR_LIT>"<EOL>msg_3 = "<STR_LIT>"<EOL>raise ValueError(msg + msg_2 + msg_3)<EOL><DEDENT>if ridge is not None:<EOL><INDENT>warnings.warn(_ridge_warning_msg)<EOL><DEDENT>self.optimization_method = method<EOL>self.ridge_param = ridge<EOL>mapping_res = self.get_mappings_for_fit()<EOL>rows_to_alts = mapping_res["<STR_LIT>"]<EOL>if init_vals is None and init_coefs is not None:<EOL><INDENT>num_alternatives = rows_to_alts.shape[<NUM_LIT:1>]<EOL>try:<EOL><INDENT>assert init_coefs.shape[<NUM_LIT:0>] == self.design.shape[<NUM_LIT:1>]<EOL><DEDENT>except AssertionError:<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise ValueError(msg.format(init_coefs.shape,<EOL>self.design.shape[<NUM_LIT:1>]))<EOL><DEDENT>if init_intercepts is not None:<EOL><INDENT>if init_intercepts.shape[<NUM_LIT:0>] != (num_alternatives - <NUM_LIT:1>):<EOL><INDENT>msg = "<STR_LIT>"<EOL>msg_2 = "<STR_LIT>"<EOL>raise ValueError(msg.format(init_intercepts.shape) +<EOL>msg_2.format(num_alternatives - <NUM_LIT:1>))<EOL><DEDENT>init_vals = np.concatenate((init_intercepts,<EOL>init_coefs), axis=<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>init_vals = init_coefs<EOL><DEDENT><DEDENT>elif init_vals is None and init_coefs is None:<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise ValueError(msg)<EOL><DEDENT>zero_vector = np.zeros(init_vals.shape)<EOL>clog_estimator = ClogEstimator(self,<EOL>mapping_res,<EOL>ridge,<EOL>zero_vector,<EOL>split_param_vec,<EOL>constrained_pos=constrained_pos)<EOL>clog_estimator.set_derivatives()<EOL>clog_estimator.check_length_of_initial_values(init_vals)<EOL>estimation_res = estimate(init_vals,<EOL>clog_estimator,<EOL>method,<EOL>loss_tol,<EOL>gradient_tol,<EOL>maxiter,<EOL>print_res,<EOL>just_point=just_point)<EOL>if not just_point:<EOL><INDENT>self.store_fit_results(estimation_res)<EOL>return None<EOL><DEDENT>else:<EOL><INDENT>return estimation_res<EOL><DEDENT>
Parameters ---------- init_vals : 1D ndarray. The initial values to start the optimization process with. There should be one value for each index coefficient and shape parameter being estimated. Shape parameters should come before intercept parameters, which should come before index coefficients. One can also pass None, and instead pass `init_shapes`, optionally `init_intercepts` if `"intercept"` is not in the utility specification, and `init_coefs`. init_intercepts : 1D ndarray or None, optional. The initial values of the intercept parameters. There should be one parameter per possible alternative id in the dataset, minus one. The passed values for this argument will be ignored if `init_vals` is not None. This keyword argument should only be used if `"intercept"` is not in the utility specification. Default == None. init_coefs : 1D ndarray or None, optional. The initial values of the index coefficients. There should be one coefficient per index variable. The passed values for this argument will be ignored if `init_vals` is not None. Default == None. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. Default `== True`. method : str, optional. Should be a valid string for scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. Default `== 'bfgs'`. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next that is needed to determine convergence. Default `== 1e-06`. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default `== 1e-06`. maxiter : int, optional. Determines the maximum number of iterations used by the optimizer. Default `== 1000`. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a scalar is passed, then that scalar determines the ridge penalty for the optimization. The scalar should be greater than or equal to zero. Default `== None`. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_values.size.` Default == None. just_point : bool, optional. Determines whether (True) or not (False) calculations that are non- critical for obtaining the maximum likelihood point estimate will be performed. If True, this function will return the results dictionary from scipy.optimize. Default == False. Returns ------- None. Estimation results are saved to the model instance.
f7683:c1:m1
def calc_probabilities(beta,<EOL>design,<EOL>alt_IDs,<EOL>rows_to_obs,<EOL>rows_to_alts,<EOL>utility_transform,<EOL>intercept_params=None,<EOL>shape_params=None,<EOL>chosen_row_to_obs=None,<EOL>return_long_probs=False):
<EOL>if (len(beta.shape) >= <NUM_LIT:2>) and (len(design.shape) >= <NUM_LIT:3>):<EOL><INDENT>msg_1 = "<STR_LIT>"<EOL>msg_2 = "<STR_LIT>"<EOL>raise ValueError(msg_1 + msg_2)<EOL><DEDENT>if chosen_row_to_obs is None and return_long_probs is False:<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise ValueError(msg)<EOL><DEDENT>sys_utilities = design.dot(beta)<EOL>transformed_utilities = utility_transform(sys_utilities,<EOL>alt_IDs,<EOL>rows_to_alts,<EOL>shape_params,<EOL>intercept_params)<EOL>too_small_idx = transformed_utilities < min_exponent_val<EOL>too_large_idx = transformed_utilities > max_exponent_val<EOL>transformed_utilities[too_small_idx] = min_exponent_val<EOL>transformed_utilities[too_large_idx] = max_exponent_val<EOL>long_exponentials = np.exp(transformed_utilities)<EOL>individual_denominators = np.asarray(rows_to_obs.transpose().dot(<EOL>long_exponentials))<EOL>long_denominators = np.asarray(rows_to_obs.dot(individual_denominators))<EOL>if len(long_exponentials.shape) > <NUM_LIT:1> and long_exponentials.shape[<NUM_LIT:1>] > <NUM_LIT:1>:<EOL><INDENT>long_probs = (long_exponentials / long_denominators)<EOL><DEDENT>else:<EOL><INDENT>long_probs = (long_exponentials / long_denominators).ravel()<EOL><DEDENT>long_probs[long_probs == <NUM_LIT:0>] = min_comp_value<EOL>if chosen_row_to_obs is None:<EOL><INDENT>chosen_probs = None<EOL><DEDENT>else:<EOL><INDENT>chosen_exponentials = np.asarray(<EOL>chosen_row_to_obs.transpose().dot(long_exponentials))<EOL>if len(long_exponentials.shape) > <NUM_LIT:1> and long_exponentials.shape[<NUM_LIT:1>] > <NUM_LIT:1>:<EOL><INDENT>chosen_probs = chosen_exponentials / individual_denominators<EOL><DEDENT>else:<EOL><INDENT>chosen_probs = (chosen_exponentials /<EOL>individual_denominators).ravel()<EOL><DEDENT><DEDENT>if return_long_probs and chosen_probs is not None:<EOL><INDENT>return chosen_probs, long_probs<EOL><DEDENT>elif return_long_probs and chosen_probs is None:<EOL><INDENT>return long_probs<EOL><DEDENT>elif chosen_probs is not None:<EOL><INDENT>return chosen_probs<EOL><DEDENT>
Parameters ---------- beta : 1D or 2D ndarray. All elements should by ints, floats, or longs. If 1D, should have 1 element for each utility coefficient being estimated (i.e. num_features). If 2D, should have 1 column for each set of coefficients being used to predict the probabilities of each alternative being chosen. There should be one row per index coefficient. design : 2D or 3D ndarray. There should be one row per observation per available alternative. There should be one column per utility coefficient being estimated. All elements should be ints, floats, or longs. If `len(design.shape) == 3`, then beta MUST be 1D. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_obs : 2D ndarray. There should be one row per observation per available alternative and one column per observation. This matrix maps the rows of the design matrix to the unique observations (on the columns). rows_to_alts : 2D ndarray. There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. utility_transform : callable. Should accept a 1D array of systematic utility values, a 1D array of alternative IDs, and miscellaneous args and kwargs. Should return a 1D array whose elements contain the appropriately transformed systematic utility values, based on the current model being evaluated. intercept_params : 1D ndarray, or None, optional. If an array, each element should be an int, float, or long. For identifiability, there should be J- 1 elements where J is the total number of observed alternatives for this dataset. Default == None. shape_params : 1D ndarray, or None, optional. If an array, each element should be an int, float, or long. There should be one value per shape parameter of the model being used. Default == None. chosen_row_to_obs : 2D scipy sparse array, or None, optional. There should be one row per observation per available alternative and one column per observation. This matrix indicates, for each observation (on the columns), which rows of the design matrix were the realized outcome. If an array is passed then an array of shape (num_observations,) will be returned and each element will be the probability of the realized outcome of the given observation. Default == None. return_long_probs : bool, optional. Indicates whether or not the long format probabilites (a 1D numpy array with one element per observation per available alternative) should be returned. Default == False. Returns ------- numpy array or tuple of two numpy arrays. If `chosen_row_to_obs` is passed AND `return_long_probs is True`, then the tuple `(chosen_probs, long_probs)` is returned. If `return_long_probs is True` and `chosen_row_to_obs is None`, then `long_probs` is returned. If `chosen_row_to_obs` is passed and `return_long_probs is False` then `chosen_probs` is returned. `chosen_probs` is a 1D numpy array of shape (num_observations,). Each element is the probability of the corresponding observation being associated with its realized outcome. `long_probs` is a 1D numpy array with one element per observation per available alternative for that observation. Each element is the probability of the corresponding observation being associated with that rows corresponding alternative. If `beta` is a 2D array, `chosen_probs` and `long_probs` will also be 2D arrays, with as many columns as there are sets of parameters being used to calculate probabilities with. It is NOT valid to have `chosen_row_to_obs == None` and `return_long_probs == False`.
f7684:m0
def calc_log_likelihood(beta,<EOL>design,<EOL>alt_IDs,<EOL>rows_to_obs,<EOL>rows_to_alts,<EOL>choice_vector,<EOL>utility_transform,<EOL>intercept_params=None,<EOL>shape_params=None,<EOL>ridge=None,<EOL>weights=None):
<EOL>long_probs = calc_probabilities(beta,<EOL>design,<EOL>alt_IDs,<EOL>rows_to_obs,<EOL>rows_to_alts,<EOL>utility_transform,<EOL>intercept_params=intercept_params,<EOL>shape_params=shape_params,<EOL>return_long_probs=True)<EOL>if weights is None:<EOL><INDENT>weights = <NUM_LIT:1><EOL><DEDENT>log_likelihood = choice_vector.dot(weights * np.log(long_probs))<EOL>if ridge is None:<EOL><INDENT>return log_likelihood<EOL><DEDENT>else:<EOL><INDENT>param_list = [x for x in [shape_params, intercept_params, beta]<EOL>if x is not None]<EOL>if len(param_list) > <NUM_LIT:1>:<EOL><INDENT>params = np.concatenate(param_list, axis=<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>params = param_list[<NUM_LIT:0>]<EOL><DEDENT>return log_likelihood - ridge * np.square(params).sum()<EOL><DEDENT>
Parameters ---------- beta : 1D ndarray. All elements should by ints, floats, or longs. Should have 1 element for each utility coefficient being estimated (i.e. num_features). design : 2D ndarray. There should be one row per observation per available alternative. There should be one column per utility coefficient being estimated. All elements should be ints, floats, or longs. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_obs : 2D ndarray. There should be one row per observation per available alternative and one column per observation. This matrix maps the rows of the design matrix to the unique observations (on the columns). rows_to_alts : 2D ndarray. There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. choice_vector : 1D ndarray. All elements should be either ones or zeros. There should be one row per observation per available alternative for the given observation. Elements denote the alternative which is chosen by the given observation with a 1 and a zero otherwise. utility_transform: callable. Should accept a 1D array of systematic utility values, a 1D array of alternative IDs, and miscellaneous args and kwargs. Should return a 1D array whose elements contain the appropriately transformed systematic utility values, based on the current model being evaluated. intercept_params: 1D ndarray, or None, optional. If an array, each element should be an int, float, or long. For identifiability, there should be J- 1 elements where J is the total number of observed alternatives for this dataset. Default == None. shape_params : 1D ndarray, or None, optional. If an array, each element should be an int, float, or long. There should be one value per shape parameter of the model being used. Default == None. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If an int, float or long is passed, then that scalar determines the ridge penalty for the optimization. Default = None. weights : 1D ndarray or None, optional. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. Returns ------- log_likelihood : float. The log likelihood of the multinomial choice model.
f7684:m1
def calc_gradient(beta,<EOL>design,<EOL>alt_IDs,<EOL>rows_to_obs,<EOL>rows_to_alts,<EOL>choice_vector,<EOL>utility_transform,<EOL>transform_first_deriv_c,<EOL>transform_first_deriv_v,<EOL>transform_deriv_alpha,<EOL>intercept_params,<EOL>shape_params,<EOL>ridge,<EOL>weights):
<EOL>sys_utilities = design.dot(beta)<EOL>long_probs = calc_probabilities(beta,<EOL>design,<EOL>alt_IDs,<EOL>rows_to_obs,<EOL>rows_to_alts,<EOL>utility_transform,<EOL>intercept_params=intercept_params,<EOL>shape_params=shape_params,<EOL>return_long_probs=True)<EOL>if weights is None:<EOL><INDENT>weights = <NUM_LIT:1><EOL><DEDENT>dh_dc = transform_first_deriv_c(sys_utilities, alt_IDs,<EOL>rows_to_alts, shape_params)<EOL>dh_d_alpha = transform_deriv_alpha(sys_utilities, alt_IDs,<EOL>rows_to_alts, intercept_params)<EOL>dh_dv = transform_first_deriv_v(sys_utilities, alt_IDs,<EOL>rows_to_alts, shape_params)<EOL>dh_db = dh_dv.dot(design)<EOL>d_ll_dh = np.multiply(weights, choice_vector - long_probs)[np.newaxis, :]<EOL>d_ll_d_beta = d_ll_dh.dot(dh_db)<EOL>if shape_params is not None and intercept_params is not None:<EOL><INDENT>gradient = np.concatenate((np.asarray(d_ll_dh * hstack((dh_dc,<EOL>dh_d_alpha),<EOL>format='<STR_LIT>')),<EOL>d_ll_d_beta), axis=<NUM_LIT:1>).ravel()<EOL>params = np.concatenate((shape_params, intercept_params, beta),<EOL>axis=<NUM_LIT:0>)<EOL><DEDENT>elif shape_params is not None and intercept_params is None:<EOL><INDENT>gradient = np.concatenate((np.asarray(d_ll_dh * dh_dc), d_ll_d_beta),<EOL>axis=<NUM_LIT:1>).ravel()<EOL>params = np.concatenate((shape_params, beta), axis=<NUM_LIT:0>)<EOL><DEDENT>elif shape_params is None and intercept_params is not None:<EOL><INDENT>gradient = np.concatenate((np.asarray(d_ll_dh * dh_d_alpha),<EOL>d_ll_d_beta), axis=<NUM_LIT:1>).ravel()<EOL>params = np.concatenate((intercept_params, beta), axis=<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>gradient = d_ll_d_beta.ravel()<EOL>params = beta<EOL><DEDENT>if ridge is not None:<EOL><INDENT>gradient -= <NUM_LIT:2> * ridge * params<EOL><DEDENT>return gradient<EOL>
Parameters ---------- beta : 1D ndarray. All elements should by ints, floats, or longs. Should have 1 element for each utility coefficient being estimated (i.e. num_features). design : 2D ndarray. Tjere should be one row per observation per available alternative. There should be one column per utility coefficient being estimated. All elements should be ints, floats, or longs. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_obs : 2D scipy sparse array. There should be one row per observation per available alternative and one column per observation. This matrix maps the rows of the design matrix to the unique observations (on the columns). rows_to_alts : 2D scipy sparse array There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. choice_vector : 1D ndarray. All elements should be either ones or zeros. There should be one row per observation per available alternative for the given observation. Elements denote the alternative which is chosen by the given observation with a 1 and a zero otherwise. utility_transform : callable. Must accept a 1D array of systematic utility values, a 1D array of alternative IDs, and miscellaneous args and kwargs. Should return a 1D array whose elements contain the appropriately transformed systematic utility values, based on the current model being evaluated. transform_first_deriv_c : callable. Must accept a 1D array of systematic utility values, a 1D array of alternative IDs, the `rows_to_alts` array, (shape parameters if there are any) and miscellaneous args and kwargs. Should return a 2D matrix or sparse array whose elements contain the derivative of the tranformed utility vector with respect to the vector of shape parameters. The dimensions of the returned vector should be `(design.shape[0], num_alternatives)`. If there are no shape parameters then the callable should return None. transform_first_deriv_v : callable. Must accept a 1D array of systematic utility values, a 1D array of alternative IDs, (shape parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the tranformed utility vector with respect to the vector of systematic utilities. The dimensions of the returned vector should be `(design.shape[0], design.shape[0])`. transform_deriv_alpha : callable. Must accept a 1D array of systematic utility values, a 1D array of alternative IDs, the `rows_to_alts` array, (intercept parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the tranformed utility vector with respect to the vector of shape parameters. The dimensions of the returned vector should be `(design.shape[0], num_alternatives - 1)`. If there are no intercept parameters, the callable should return None. intercept_params : 1D numpy array or None. If an array, each element should be an int, float, or long. For identifiability, there should be J- 1 elements where J is the total number of observed alternatives for this dataset. Default == None. shape_params : 1D ndarray or None. If an array, each element should be an int, float, or long. There should be one value per shape parameter of the model being used. Default == None. ridge : int, float, long, or None. Determines whether or not ridge regression is performed. If an int, float or long is passed, then that scalar determines the ridge penalty for the optimization. Default = None. weights : 1D ndarray or None. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. Returns ------- gradient : 1D ndarray. It's shape is (beta.shape[0], ). It is the second derivative of the log- likelihood with respect to beta.
f7684:m2
def create_matrix_block_indices(row_to_obs):
<EOL>output_indices = []<EOL>num_obs = row_to_obs.shape[<NUM_LIT:1>]<EOL>row_indices, col_indices, values = scipy.sparse.find(row_to_obs)<EOL>for col in xrange(num_obs):<EOL><INDENT>output_indices.append(row_indices[np.where(col_indices == col)])<EOL><DEDENT>return output_indices<EOL>
Parameters ---------- row_to_obs: 2D ndarray. There should be one row per observation per available alternative and one column per observation. This matrix maps the rows of the design matrix to the unique observations (on the columns). Returns ------- output_indices : list of arrays. There will be one array per column in `row_to_obs`. The array will note which rows correspond to which observations.
f7684:m3
def robust_outer_product(vec_1, vec_2):
mantissa_1, exponents_1 = np.frexp(vec_1)<EOL>mantissa_2, exponents_2 = np.frexp(vec_2)<EOL>new_mantissas = mantissa_1[None, :] * mantissa_2[:, None]<EOL>new_exponents = exponents_1[None, :] + exponents_2[:, None]<EOL>return new_mantissas * np.exp2(new_exponents)<EOL>
Calculates a 'robust' outer product of two vectors that may or may not contain very small values. Parameters ---------- vec_1 : 1D ndarray vec_2 : 1D ndarray Returns ------- outer_prod : 2D ndarray. The outer product of vec_1 and vec_2
f7684:m4
def create_matrix_blocks(long_probs, matrix_block_indices):
<EOL>output_matrices = []<EOL>for indices in matrix_block_indices:<EOL><INDENT>current_probs = long_probs[indices]<EOL>probability_outer_product = robust_outer_product(current_probs,<EOL>current_probs)<EOL>dP_i_dh_i = np.diag(current_probs) - probability_outer_product<EOL>diag_idx = np.diag_indices_from(dP_i_dh_i)<EOL>diag_values = dP_i_dh_i[diag_idx].copy()<EOL>diag_values[diag_values == <NUM_LIT:0>] = min_comp_value<EOL>dP_i_dh_i[diag_idx] = diag_values<EOL>underflow_idxs = np.where(dP_i_dh_i == <NUM_LIT:0>)<EOL>for i in xrange(underflow_idxs[<NUM_LIT:0>].size):<EOL><INDENT>row_idx, col_idx = underflow_idxs[<NUM_LIT:0>][i], underflow_idxs[<NUM_LIT:1>][i]<EOL>if row_idx != col_idx:<EOL><INDENT>dP_i_dh_i[row_idx,<EOL>col_idx] = -<NUM_LIT:1> * min_comp_value<EOL><DEDENT><DEDENT>output_matrices.append(dP_i_dh_i)<EOL><DEDENT>return output_matrices<EOL>
Parameters ---------- long_probs : 1D ndarray. There should be one row per observation per available alternative. The elements of the array will indicate the probability of the alternative being the outcome associated with the corresponding observation. matrix_block_indices : list of arrays. There will be one array per observation. The arrays will note which rows correspond to which observations. Returns ------- output_matrices : list of matrices. Each matrix will contain the derivative of P_i with respect to H_i, and there will be one matrix for each observations i. P_i is the array of probabilities of each observation being associated with its available alternatives. H_i is the array of transformed index values for each alternative that is available to observation i.
f7684:m5
def calc_hessian(beta,<EOL>design,<EOL>alt_IDs,<EOL>rows_to_obs,<EOL>rows_to_alts,<EOL>utility_transform,<EOL>transform_first_deriv_c,<EOL>transform_first_deriv_v,<EOL>transform_deriv_alpha,<EOL>block_matrix_idxs,<EOL>intercept_params,<EOL>shape_params,<EOL>ridge,<EOL>weights):
<EOL>sys_utilities = design.dot(beta)<EOL>long_probs = calc_probabilities(beta,<EOL>design,<EOL>alt_IDs,<EOL>rows_to_obs,<EOL>rows_to_alts,<EOL>utility_transform,<EOL>intercept_params=intercept_params,<EOL>shape_params=shape_params,<EOL>return_long_probs=True)<EOL>if weights is None:<EOL><INDENT>weights = np.ones(design.shape[<NUM_LIT:0>])<EOL><DEDENT>dh_dc = transform_first_deriv_c(sys_utilities, alt_IDs,<EOL>rows_to_alts, shape_params)<EOL>dh_dv = transform_first_deriv_v(sys_utilities, alt_IDs,<EOL>rows_to_alts, shape_params)<EOL>dh_d_alpha = transform_deriv_alpha(sys_utilities, alt_IDs,<EOL>rows_to_alts, intercept_params)<EOL>dh_db = dh_dv.dot(design)<EOL>block_matrices = create_matrix_blocks(long_probs, block_matrix_idxs)<EOL>dp_dh = block_diag(*block_matrices) * weights[None, :]<EOL>d2_ll_db2 = -<NUM_LIT:1> * dh_db.T.dot(dp_dh.dot(dh_db))<EOL>if shape_params is not None and intercept_params is not None:<EOL><INDENT>d2_ll_dc2 = -<NUM_LIT:1> * dh_dc.T.dot(dp_dh * dh_dc)<EOL>d2_ll_d_alpha2 = -<NUM_LIT:1> * dh_d_alpha.T.dot(dp_dh * dh_d_alpha)<EOL>d2_ll_dc_d_alpha = -<NUM_LIT:1> * dh_d_alpha.T.dot(dp_dh * dh_dc)<EOL>d2_ll_dc_db = -<NUM_LIT:1> * dh_db.T.dot(dp_dh * dh_dc)<EOL>d2_ll_d_alpha_db = -<NUM_LIT:1> * dh_db.T.dot(dp_dh * dh_d_alpha)<EOL>top_row = np.concatenate((d2_ll_dc2,<EOL>d2_ll_dc_d_alpha.T,<EOL>d2_ll_dc_db.T), axis=<NUM_LIT:1>)<EOL>middle_row = np.concatenate((d2_ll_dc_d_alpha,<EOL>d2_ll_d_alpha2,<EOL>d2_ll_d_alpha_db.T), axis=<NUM_LIT:1>)<EOL>last_row = np.concatenate((d2_ll_dc_db,<EOL>d2_ll_d_alpha_db,<EOL>d2_ll_db2), axis=<NUM_LIT:1>)<EOL>hess = np.concatenate((top_row,<EOL>middle_row,<EOL>last_row), axis=<NUM_LIT:0>)<EOL><DEDENT>elif shape_params is not None and intercept_params is None:<EOL><INDENT>d2_ll_dc2 = -<NUM_LIT:1> * dh_dc.T.dot(dp_dh * dh_dc)<EOL>d2_ll_dc_db = -<NUM_LIT:1> * dh_db.T.dot(dp_dh * dh_dc)<EOL>hess = np.concatenate((np.concatenate((d2_ll_dc2,<EOL>d2_ll_dc_db.T), axis=<NUM_LIT:1>),<EOL>np.concatenate((d2_ll_dc_db,<EOL>d2_ll_db2), axis=<NUM_LIT:1>)), axis=<NUM_LIT:0>)<EOL><DEDENT>elif shape_params is None and intercept_params is not None:<EOL><INDENT>d2_ll_d_alpha2 = -<NUM_LIT:1> * dh_d_alpha.T.dot(dp_dh * dh_d_alpha)<EOL>d2_ll_d_alpha_db = -<NUM_LIT:1> * dh_db.T.dot(dp_dh * dh_d_alpha)<EOL>hess = np.concatenate((np.concatenate((d2_ll_d_alpha2,<EOL>d2_ll_d_alpha_db.T), axis=<NUM_LIT:1>),<EOL>np.concatenate((d2_ll_d_alpha_db,<EOL>d2_ll_db2), axis=<NUM_LIT:1>)), axis=<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>hess = d2_ll_db2<EOL><DEDENT>if ridge is not None:<EOL><INDENT>hess -= <NUM_LIT:2> * ridge<EOL><DEDENT>if isinstance(hess, np.matrixlib.defmatrix.matrix):<EOL><INDENT>hess = np.asarray(hess)<EOL><DEDENT>return hess<EOL>
Parameters ---------- beta : 1D ndarray. All elements should by ints, floats, or longs. Should have 1 element for each utility coefficient being estimated (i.e. num_features). design : 2D ndarray. There should be one row per observation per available alternative. There should be one column per utility coefficient being estimated. All elements should be ints, floats, or longs. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_obs : 2D ndarray. There should be one row per observation per available alternative and one column per observation. This matrix maps the rows of the design matrix to the unique observations (on the columns). rows_to_alts: 2D ndarray. There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. utility_transform : callable. Must accept a 1D array of systematic utility values, a 1D array of alternative IDs, and miscellaneous args and kwargs. Should return a 1D array whose elements contain the appropriately transformed systematic utility values, based on the current model being evaluated. transform_first_deriv_c : callable. Must accept a 1D array of systematic utility values, a 1D array of alternative IDs, the `rows_to_alts` array, (shape parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the tranformed utilities with respect to the vector of shape parameters. The dimensions of the returned vector should be `(design.shape[0], num_alternatives)`. transform_first_deriv_v : callable. Must accept a 1D array of systematic utility values, a 1D array of alternative IDs, (shape parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the tranformed utility vector with respect to the vector of systematic utilities. The dimensions of the returned vector should be `(design.shape[0], design.shape[0])`. transform_deriv_alpha : callable. Must accept a 1D array of systematic utility values, a 1D array of alternative IDs, the rows_to_alts array, (intercept parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the tranformed utilities with respect to the vector of shape parameters. The dimensions of the returned vector should be `(design.shape[0], num_alternatives - 1)`. If `intercept_params == None`, the callable should return None. block_matrix_idxs : list of arrays. There will be one array per observation. The arrays will note which rows correspond to which observations. intercept_params : 1D ndarray. Each element should be an int, float, or long. For identifiability, there should be J- 1 elements where J is the total number of observed alternatives in the dataset. shape_params: None or 1D ndarray. If an array, each element should be an int, float, or long. There should be one value per shape parameter of the model being used. Default == None. ridge : int, float, long, or None. Determines whether or not ridge regression is performed. If an int, float or long is passed, then that scalar determines the ridge penalty for the optimization. Default = None. weights : 1D ndarray or None. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. Returns ------- hess : 2D ndarray. It's shape is `(beta.shape[0], beta.shape[0])`. It is the second derivative of the log likelihood with respect to beta.
f7684:m6
def calc_fisher_info_matrix(beta,<EOL>design,<EOL>alt_IDs,<EOL>rows_to_obs,<EOL>rows_to_alts,<EOL>choice_vector,<EOL>utility_transform,<EOL>transform_first_deriv_c,<EOL>transform_first_deriv_v,<EOL>transform_deriv_alpha,<EOL>intercept_params,<EOL>shape_params,<EOL>ridge,<EOL>weights):
<EOL>sys_utilities = design.dot(beta)<EOL>long_probs = calc_probabilities(beta,<EOL>design,<EOL>alt_IDs,<EOL>rows_to_obs,<EOL>rows_to_alts,<EOL>utility_transform,<EOL>intercept_params=intercept_params,<EOL>shape_params=shape_params,<EOL>return_long_probs=True)<EOL>if weights is None:<EOL><INDENT>weights = np.ones(design.shape[<NUM_LIT:0>])<EOL><DEDENT>weights_per_obs =np.max(rows_to_obs.toarray() * weights[:, None], axis=<NUM_LIT:0>)<EOL>dh_dc = transform_first_deriv_c(sys_utilities, alt_IDs,<EOL>rows_to_alts, shape_params)<EOL>dh_dv = transform_first_deriv_v(sys_utilities, alt_IDs,<EOL>rows_to_alts, shape_params)<EOL>dh_d_alpha = transform_deriv_alpha(sys_utilities, alt_IDs,<EOL>rows_to_alts, intercept_params)<EOL>dh_db = np.asarray(dh_dv.dot(design))<EOL>d_ll_dh = (choice_vector - long_probs)[np.newaxis, :]<EOL>if shape_params is not None and intercept_params is not None:<EOL><INDENT>if isinstance(dh_dc, np.matrixlib.defmatrix.matrix):<EOL><INDENT>gradient_vec = d_ll_dh.T * np.concatenate((dh_dc.A,<EOL>dh_d_alpha.toarray(),<EOL>dh_db), axis=<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>gradient_vec = d_ll_dh.T * np.concatenate((dh_dc.toarray(),<EOL>dh_d_alpha.toarray(),<EOL>dh_db), axis=<NUM_LIT:1>)<EOL><DEDENT><DEDENT>elif shape_params is not None and intercept_params is None:<EOL><INDENT>if isinstance(dh_dc, np.matrixlib.defmatrix.matrix):<EOL><INDENT>gradient_vec = d_ll_dh.T * np.concatenate((dh_dc.A, dh_db), axis=<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>gradient_vec = d_ll_dh.T * np.concatenate((dh_dc.toarray(),<EOL>dh_db), axis=<NUM_LIT:1>)<EOL><DEDENT><DEDENT>elif shape_params is None and intercept_params is not None:<EOL><INDENT>gradient_vec = d_ll_dh.T * np.concatenate((dh_d_alpha.toarray(),<EOL>dh_db), axis=<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>gradient_vec = d_ll_dh.T * dh_db<EOL><DEDENT>gradient_vec = rows_to_obs.T.dot(gradient_vec)<EOL>fisher_matrix =gradient_vec.T.dot(np.multiply(weights_per_obs[:, None], gradient_vec))<EOL>if ridge is not None:<EOL><INDENT>fisher_matrix -= <NUM_LIT:2> * ridge<EOL><DEDENT>return fisher_matrix<EOL>
Parameters ---------- beta : 1D ndarray. All elements should by ints, floats, or longs. Should have 1 element for each utility coefficient being estimated (i.e. num_features). design : 2D ndarray. There should be one row per observation per available alternative. There should be one column per utility coefficient being estimated. All elements should be ints, floats, or longs. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_obs : 2D ndarray. There should be one row per observation per available alternative and one column per observation. This matrix maps the rows of the design matrix to the unique observations (on the columns). rows_to_alts : 2D ndarray There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. choice_vector : 1D ndarray. All elements should be either ones or zeros. There should be one row per observation per available alternative for the given observation. Elements denote the alternative which is chosen by the given observation with a 1 and a zero otherwise. utility_transform : callable. Must accept a 1D array of systematic utility values, a 1D array of alternative IDs, and miscellaneous args and kwargs. Should return a 1D array whose elements contain the appropriately transformed systematic utility values, based on the current model being evaluated. transform_first_deriv_c : callable. Must accept a 1D array of systematic utility values, a 1D array of alternative IDs, the `rows_to_alts` array, (shape parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the tranformed utilities with respect to the vector of shape parameters. The dimensions of the returned vector should be `(design.shape[0], num_alternatives)`. transform_first_deriv_v : callable. Must accept a 1D array of systematic utility values, a 1D array of alternative IDs, (shape parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the utility tranformation vector with respect to the vector of systematic utilities. The dimensions of the returned vector should be `(design.shape[0], design.shape[0])`. shape_params : None or 1D ndarray. If an array, each element should be an int, float, or long. There should be one value per shape parameter of the model being used. Default == None. ridge : int, float, long, or None. Determines whether or not ridge regression is performed. If an int, float or long is passed, then that scalar determines the ridge penalty for the optimization. Default = None. weights : 1D ndarray or None. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. Returns ------- fisher_matrix : 2D numpy array. It will be a square matrix, with one row and one column for each shape, intercept, and index coefficient. Contains the BHHH approximation to the Fisher Information matrix of the log likelihood.
f7684:m7
def calc_asymptotic_covariance(hessian, fisher_info_matrix):
<EOL>hess_inv = scipy.linalg.inv(hessian)<EOL>return np.dot(hess_inv, np.dot(fisher_info_matrix, hess_inv))<EOL>
Parameters ---------- hessian : 2D ndarray. It should have shape `(num_vars, num_vars)`. It is the matrix of second derivatives of the total loss across the dataset, with respect to each pair of coefficients being estimated. fisher_info_matrix : 2D ndarray. It should have a shape of `(num_vars, num_vars)`. It is the approximation of the negative of the expected hessian formed by taking the outer product of (each observation's gradient of the loss function) with itself, and then summing across all observations. Returns ------- huber_white_matrix : 2D ndarray. Will have shape `(num_vars, num_vars)`. The entries in the returned matrix are calculated by the following formula: `hess_inverse * fisher_info_matrix * hess_inverse`.
f7684:m8
def split_param_vec(beta,<EOL>rows_to_alts=None,<EOL>design=None,<EOL>return_all_types=False,<EOL>*args, **kwargs):
if return_all_types:<EOL><INDENT>return None, None, None, beta<EOL><DEDENT>else:<EOL><INDENT>return None, None, beta<EOL><DEDENT>
Parameters ---------- beta : 1D ndarray. All elements should by ints, floats, or longs. Should have 1 element for each utility coefficient being estimated (i.e. num_features). rows_to_alts : None, Not actually used. Included merely for consistency with other models. design : None. Not actually used. Included merely for consistency with other models. return_all_types : bool, optional. Determines whether or not a tuple of 4 elements will be returned (with one element for the nest, shape, intercept, and index parameters for this model). If False, a tuple of 3 elements will be returned, as described below. Returns ------- tuple. `(None, None, beta)`. This function is merely for compatibility with the other choice model files. Note ---- If `return_all_types == True` then the function will return a tuple of four objects. In order, these objects will either be None or the arrays representing the arrays corresponding to the nest, shape, intercept, and index parameters.
f7685:m0
def _mnl_utility_transform(systematic_utilities, *args, **kwargs):
<EOL>if len(systematic_utilities.shape) == <NUM_LIT:1>:<EOL><INDENT>systematic_utilities = systematic_utilities[:, np.newaxis]<EOL><DEDENT>return systematic_utilities<EOL>
Parameters ---------- systematic_utilities : 1D ndarray. Should contain the systematic utilities for each each available alternative for each observation. Returns ------- `systematic_utilities[:, None]`
f7685:m1
def _mnl_transform_deriv_c(*args, **kwargs):
<EOL>return None<EOL>
Returns None. This is a place holder function since the MNL model has no shape parameters.
f7685:m2
def _mnl_transform_deriv_alpha(*args, **kwargs):
<EOL>return None<EOL>
Returns None. This is a place holder function since the MNL model has no intercept parameters outside of the index.
f7685:m3
def check_length_of_initial_values(self, init_values):
<EOL>num_index_coefs = self.design.shape[<NUM_LIT:1>]<EOL>if init_values.shape[<NUM_LIT:0>] != num_index_coefs:<EOL><INDENT>msg_1 = "<STR_LIT>"<EOL>msg_2 = "<STR_LIT>"<EOL>msg_3 = "<STR_LIT>"<EOL>raise ValueError(msg_1 +<EOL>msg_2.format(num_index_coefs) +<EOL>msg_3.format(init_values.shape[<NUM_LIT:0>]))<EOL><DEDENT>return None<EOL>
Ensures that `init_values` is of the correct length. Raises a helpful ValueError if otherwise. Parameters ---------- init_values : 1D ndarray. The initial values to start the optimization process with. There should be one value for each index coefficient, outside intercept parameter, and shape parameter being estimated. Returns ------- None.
f7685:c0:m1
def fit_mle(self,<EOL>init_vals,<EOL>print_res=True,<EOL>method="<STR_LIT>",<EOL>loss_tol=<NUM_LIT>,<EOL>gradient_tol=<NUM_LIT>,<EOL>maxiter=<NUM_LIT:1000>,<EOL>ridge=None,<EOL>constrained_pos=None,<EOL>just_point=False,<EOL>**kwargs):
<EOL>kwargs_to_be_ignored = ["<STR_LIT>", "<STR_LIT>", "<STR_LIT>"]<EOL>if any([x in kwargs for x in kwargs_to_be_ignored]):<EOL><INDENT>msg = "<STR_LIT>"<EOL>msg_2 = "<STR_LIT>"<EOL>raise ValueError(msg.format(kwargs_to_be_ignored) + msg_2)<EOL><DEDENT>if ridge is not None:<EOL><INDENT>warnings.warn(_ridge_warning_msg)<EOL><DEDENT>self.optimization_method = method<EOL>self.ridge_param = ridge<EOL>mapping_res = self.get_mappings_for_fit()<EOL>zero_vector = np.zeros(init_vals.shape)<EOL>mnl_estimator = MNLEstimator(self,<EOL>mapping_res,<EOL>ridge,<EOL>zero_vector,<EOL>split_param_vec,<EOL>constrained_pos=constrained_pos)<EOL>mnl_estimator.set_derivatives()<EOL>mnl_estimator.check_length_of_initial_values(init_vals)<EOL>estimation_res = estimate(init_vals,<EOL>mnl_estimator,<EOL>method,<EOL>loss_tol,<EOL>gradient_tol,<EOL>maxiter,<EOL>print_res,<EOL>just_point=just_point)<EOL>if not just_point:<EOL><INDENT>self.store_fit_results(estimation_res)<EOL>return None<EOL><DEDENT>else:<EOL><INDENT>return estimation_res<EOL><DEDENT>
Parameters ---------- init_vals : 1D ndarray. The initial values to start the optimization process with. There should be one value for each utility coefficient being estimated. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. method : str, optional. Should be a valid string that can be passed to scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. If 'em' is passed, a custom coded EM algorithm will be used. Default `== 'newton-cg'`. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next that is needed to determine convergence. Default `== 1e-06`. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a scalar is passed, then that scalar determines the ridge penalty for the optimization. Default `== None`. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_vals.size.` Default == None. just_point : bool, optional. Determines whether (True) or not (False) calculations that are non- critical for obtaining the maximum likelihood point estimate will be performed. If True, this function will return the results dictionary from scipy.optimize. Default == False. Returns ------- None or dict. If `just_point` is False, None is returned and the estimation results are saved to the model instance. If `just_point` is True, then the results dictionary from scipy.optimize() is returned.
f7685:c1:m1
def is_kernel():
if any([x in sys.modules for x in ['<STR_LIT>', '<STR_LIT>']]):<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>
Determines whether or not one's code is executed inside of an ipython notebook environment.
f7686:m0
def get_param_names(model_obj):
<EOL>all_names = deepcopy(model_obj.ind_var_names)<EOL>if model_obj.intercept_names is not None:<EOL><INDENT>all_names = model_obj.intercept_names + all_names<EOL><DEDENT>if model_obj.shape_names is not None:<EOL><INDENT>all_names = model_obj.shape_names + all_names<EOL><DEDENT>if model_obj.nest_names is not None:<EOL><INDENT>all_names = model_obj.nest_names + all_names<EOL><DEDENT>return all_names<EOL>
Extracts all the names to be displayed for the estimated parameters. Parameters ---------- model_obj : an instance of an MNDC object. Should have the following attributes: `['ind_var_names', 'intercept_names', 'shape_names', 'nest_names']`. Returns ------- all_names : list of strings. There will be one element for each estimated parameter. The order of the parameter names will be `['nest_parameters', 'shape_parameters', 'outside_intercepts', 'index_coefficients']`.
f7686:m1
def get_param_list_for_prediction(model_obj, replicates):
<EOL>ensure_samples_is_ndim_ndarray(replicates, ndim=<NUM_LIT:2>, name='<STR_LIT>')<EOL>num_idx_coefs = len(model_obj.ind_var_names)<EOL>intercept_names = model_obj.intercept_names<EOL>num_outside_intercepts =<NUM_LIT:0> if intercept_names is None else len(intercept_names)<EOL>shape_names = model_obj.shape_names<EOL>num_shapes = <NUM_LIT:0> if shape_names is None else len(shape_names)<EOL>nest_names = model_obj.nest_names<EOL>num_nests = <NUM_LIT:0> if nest_names is None else len(nest_names)<EOL>parameter_numbers =[num_nests, num_shapes, num_outside_intercepts, num_idx_coefs]<EOL>current_idx = <NUM_LIT:0><EOL>param_list = []<EOL>for param_num in parameter_numbers:<EOL><INDENT>if param_num == <NUM_LIT:0>:<EOL><INDENT>param_list.insert(<NUM_LIT:0>, None)<EOL>continue<EOL><DEDENT>upper_idx = current_idx + param_num<EOL>param_list.insert(<NUM_LIT:0>, replicates[:, current_idx:upper_idx].T)<EOL>current_idx += param_num<EOL><DEDENT>return param_list<EOL>
Create the `param_list` argument for use with `model_obj.predict`. Parameters ---------- model_obj : an instance of an MNDC object. Should have the following attributes: `['ind_var_names', 'intercept_names', 'shape_names', 'nest_names']`. This model should have already undergone a complete estimation process. I.e. its `fit_mle` method should have been called without `just_point=True`. replicates : 2D ndarray. Should represent the set of parameter values that we now wish to partition for use with the `model_obj.predict` method. Returns ------- param_list : list. Contains four elements, each being a numpy array. Either all of the arrays should be 1D or all of the arrays should be 2D. If 2D, the arrays should have the same number of columns. Each column being a particular set of parameter values that one wants to predict with. The first element in the list should be the index coefficients. The second element should contain the 'outside' intercept parameters if there are any, or None otherwise. The third element should contain the shape parameters if there are any or None otherwise. The fourth element should contain the nest coefficients if there are any or None otherwise. Default == None.
f7686:m2
def ensure_replicates_kwarg_validity(replicate_kwarg):
if replicate_kwarg not in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise ValueError(msg)<EOL><DEDENT>return None<EOL>
Ensures `replicate_kwarg` is either 'bootstrap' or 'jackknife'. Raises a helpful ValueError otherwise.
f7686:m3
def generate_bootstrap_replicates(self,<EOL>num_samples,<EOL>mnl_obj=None,<EOL>mnl_init_vals=None,<EOL>mnl_fit_kwargs=None,<EOL>extract_init_vals=None,<EOL>print_res=False,<EOL>method="<STR_LIT>",<EOL>loss_tol=<NUM_LIT>,<EOL>gradient_tol=<NUM_LIT>,<EOL>maxiter=<NUM_LIT:1000>,<EOL>ridge=None,<EOL>constrained_pos=None,<EOL>boot_seed=None,<EOL>weights=None):
print("<STR_LIT>")<EOL>print(time.strftime("<STR_LIT>"))<EOL>sys.stdout.flush()<EOL>obs_id_array = self.model_obj.data[self.model_obj.obs_id_col].values<EOL>alt_id_array = self.model_obj.alt_IDs<EOL>choice_array = self.model_obj.choices<EOL>num_params = self.mle_params.shape[<NUM_LIT:0>]<EOL>obs_id_per_sample =bs.create_cross_sectional_bootstrap_samples(obs_id_array,<EOL>alt_id_array,<EOL>choice_array,<EOL>num_samples,<EOL>seed=boot_seed)<EOL>dfs_by_obs_id =bs.create_deepcopied_groupby_dict(self.model_obj.data,<EOL>self.model_obj.obs_id_col)<EOL>boot_id_col = "<STR_LIT>"<EOL>point_estimates = np.empty((num_samples, num_params), dtype=float)<EOL>fit_kwargs = {"<STR_LIT>": print_res,<EOL>"<STR_LIT>": method,<EOL>"<STR_LIT>": loss_tol,<EOL>"<STR_LIT>": gradient_tol,<EOL>"<STR_LIT>": maxiter,<EOL>"<STR_LIT>": ridge,<EOL>"<STR_LIT>": constrained_pos,<EOL>"<STR_LIT>": True}<EOL>mnl_spec = None if mnl_obj is None else mnl_obj.specification<EOL>mnl_names = None if mnl_obj is None else mnl_obj.name_spec<EOL>iterable_for_iteration = PROGRESS(xrange(num_samples),<EOL>desc="<STR_LIT>",<EOL>total=num_samples)<EOL>for row in iterable_for_iteration:<EOL><INDENT>bootstrap_df =bs.create_bootstrap_dataframe(self.model_obj.data,<EOL>self.model_obj.obs_id_col,<EOL>obs_id_per_sample[row, :],<EOL>dfs_by_obs_id,<EOL>boot_id_col=boot_id_col)<EOL>current_results =retrieve_point_est(self.model_obj,<EOL>bootstrap_df,<EOL>boot_id_col,<EOL>num_params,<EOL>mnl_spec,<EOL>mnl_names,<EOL>mnl_init_vals,<EOL>mnl_fit_kwargs,<EOL>extract_init_vals=extract_init_vals,<EOL>**fit_kwargs)<EOL>point_estimates[row] = current_results["<STR_LIT:x>"]<EOL><DEDENT>self.bootstrap_replicates =pd.DataFrame(point_estimates, columns=self.mle_params.index)<EOL>print("<STR_LIT>")<EOL>print(time.strftime("<STR_LIT>"))<EOL>return None<EOL>
Generates the bootstrap replicates for one's given model and dataset. Parameters ---------- num_samples : positive int. Specifies the number of bootstrap samples that are to be drawn. mnl_obj : an instance of pylogit.MNL or None, optional. Should be the MNL model object that is used to provide starting values for the final model being estimated. If None, then one's final model should be an MNL model. Default == None. mnl_init_vals : 1D ndarray or None, optional. If the model that is being estimated is not an MNL, then `mnl_init_val` should be passed. Should contain the values used to begin the estimation process for the MNL model that is used to provide starting values for our desired model. Default == None. mnl_fit_kwargs : dict or None. If the model that is being estimated is not an MNL, then `mnl_fit_kwargs` should be passed. extract_init_vals : callable or None, optional. Should accept 3 arguments, in the following order. First, it should accept `orig_model_obj`. Second, it should accept a pandas Series of estimated parameters from the MNL model. The Series' index will be the names of the coefficients from `mnl_names`. Thirdly, it should accept an int denoting the number of parameters in the final choice model. The callable should return a 1D ndarray of starting values for the final choice model. Default == None. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. Default `== True`. method : str, optional. Should be a valid string for scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. Default `== 'bfgs'`. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next that is needed to determine convergence. Default `== 1e-06`. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default `== 1e-06`. maxiter : int, optional. Determines the maximum number of iterations used by the optimizer. Default `== 1000`. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a scalar is passed, then that scalar determines the ridge penalty for the optimization. The scalar should be greater than or equal to zero. Default `== None`. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_vals.size.` Default == None. boot_seed = non-negative int or None, optional. Denotes the random seed to be used when generating the bootstrap samples. If None, the sample generation process will generally be non-reproducible. Default == None. weights : 1D ndarray or None, optional. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. Returns ------- None. Will store the bootstrap replicates on the `self.bootstrap_replicates` attribute.
f7686:c0:m1
def generate_jackknife_replicates(self,<EOL>mnl_obj=None,<EOL>mnl_init_vals=None,<EOL>mnl_fit_kwargs=None,<EOL>extract_init_vals=None,<EOL>print_res=False,<EOL>method="<STR_LIT>",<EOL>loss_tol=<NUM_LIT>,<EOL>gradient_tol=<NUM_LIT>,<EOL>maxiter=<NUM_LIT:1000>,<EOL>ridge=None,<EOL>constrained_pos=None):
print("<STR_LIT>")<EOL>print(time.strftime("<STR_LIT>"))<EOL>sys.stdout.flush()<EOL>obs_id_col = self.model_obj.obs_id_col<EOL>orig_obs_id_array =self.model_obj.data[obs_id_col].values<EOL>unique_obs_ids = np.sort(np.unique(orig_obs_id_array))<EOL>num_obs = unique_obs_ids.size<EOL>num_params = self.mle_params.size<EOL>fit_kwargs = {"<STR_LIT>": print_res,<EOL>"<STR_LIT>": method,<EOL>"<STR_LIT>": loss_tol,<EOL>"<STR_LIT>": gradient_tol,<EOL>"<STR_LIT>": maxiter,<EOL>"<STR_LIT>": ridge,<EOL>"<STR_LIT>": constrained_pos,<EOL>"<STR_LIT>": True}<EOL>mnl_spec = None if mnl_obj is None else mnl_obj.specification<EOL>mnl_names = None if mnl_obj is None else mnl_obj.name_spec<EOL>point_replicates = np.empty((num_obs, num_params), dtype=float)<EOL>iterable_for_iteration = PROGRESS(enumerate(unique_obs_ids),<EOL>desc="<STR_LIT>",<EOL>total=unique_obs_ids.size)<EOL>for pos, obs_id in iterable_for_iteration:<EOL><INDENT>new_df = self.model_obj.data.loc[orig_obs_id_array != obs_id]<EOL>current_results =retrieve_point_est(self.model_obj,<EOL>new_df,<EOL>obs_id_col,<EOL>num_params,<EOL>mnl_spec,<EOL>mnl_names,<EOL>mnl_init_vals,<EOL>mnl_fit_kwargs,<EOL>extract_init_vals=extract_init_vals,<EOL>**fit_kwargs)<EOL>point_replicates[pos] = current_results['<STR_LIT:x>']<EOL><DEDENT>self.jackknife_replicates =pd.DataFrame(point_replicates, columns=self.mle_params.index)<EOL>print("<STR_LIT>")<EOL>print(time.strftime("<STR_LIT>"))<EOL>return None<EOL>
Generates the jackknife replicates for one's given model and dataset. Parameters ---------- mnl_obj : an instance of pylogit.MNL or None, optional. Should be the MNL model object that is used to provide starting values for the final model being estimated. If None, then one's final model should be an MNL model. Default == None. mnl_init_vals : 1D ndarray or None, optional. If the model that is being estimated is not an MNL, then `mnl_init_val` should be passed. Should contain the values used to begin the estimation process for the MNL model that is used to provide starting values for our desired model. Default == None. mnl_fit_kwargs : dict or None. If the model that is being estimated is not an MNL, then `mnl_fit_kwargs` should be passed. extract_init_vals : callable or None, optional. Should accept 3 arguments, in the following order. First, it should accept `orig_model_obj`. Second, it should accept a pandas Series of estimated parameters from the MNL model. The Series' index will be the names of the coefficients from `mnl_names`. Thirdly, it should accept an int denoting the number of parameters in the final choice model. The callable should return a 1D ndarray of starting values for the final choice model. Default == None. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. Default `== True`. method : str, optional. Should be a valid string for scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. Default `== 'bfgs'`. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next that is needed to determine convergence. Default `== 1e-06`. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default `== 1e-06`. maxiter : int, optional. Determines the maximum number of iterations used by the optimizer. Default `== 1000`. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a scalar is passed, then that scalar determines the ridge penalty for the optimization. The scalar should be greater than or equal to zero. Default `== None`. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_vals.size.` Default == None. Returns ------- None. Will store the bootstrap replicates on the `self.bootstrap_replicates` attribute.
f7686:c0:m2